Commit 3da5a729 authored by balazs.kilvady's avatar balazs.kilvady Committed by Commit bot

MIPS: [turbofan] Improve code generation for unordered comparisons.

Port c24220c0

TEST=cctest,unittests
BUG=

Review URL: https://codereview.chromium.org/850733004

Cr-Commit-Position: refs/heads/master@{#26045}
parent dc8f4c8c
......@@ -203,6 +203,67 @@ class OutOfLineCeil FINAL : public OutOfLineRound {
: OutOfLineRound(gen, result) {}
};
Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
switch (condition) {
case kEqual:
return eq;
case kNotEqual:
return ne;
case kSignedLessThan:
return lt;
case kSignedGreaterThanOrEqual:
return ge;
case kSignedLessThanOrEqual:
return le;
case kSignedGreaterThan:
return gt;
case kUnsignedLessThan:
return lo;
case kUnsignedGreaterThanOrEqual:
return hs;
case kUnsignedLessThanOrEqual:
return ls;
case kUnsignedGreaterThan:
return hi;
case kUnorderedEqual:
case kUnorderedNotEqual:
break;
default:
break;
}
UNREACHABLE();
return kNoCondition;
}
Condition FlagsConditionToConditionTst(FlagsCondition condition) {
switch (condition) {
case kNotEqual:
return ne;
case kEqual:
return eq;
default:
break;
}
UNREACHABLE();
return kNoCondition;
}
Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
switch (condition) {
case kOverflow:
return lt;
case kNotOverflow:
return ge;
default:
break;
}
UNREACHABLE();
return kNoCondition;
}
} // namespace
......@@ -646,72 +707,18 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) {
switch (branch->condition) {
case kNotEqual:
cc = ne;
break;
case kEqual:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMipsTst, branch->condition);
break;
}
cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
// kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
switch (branch->condition) {
case kOverflow:
cc = lt;
break;
case kNotOverflow:
cc = ge;
break;
default:
UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
break;
}
cc = FlagsConditionToConditionOvf(branch->condition);
__ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
} else if (instr->arch_opcode() == kMipsCmp) {
switch (branch->condition) {
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
break;
case kSignedLessThan:
cc = lt;
break;
case kSignedGreaterThanOrEqual:
cc = ge;
break;
case kSignedLessThanOrEqual:
cc = le;
break;
case kSignedGreaterThan:
cc = gt;
break;
case kUnsignedLessThan:
cc = lo;
break;
case kUnsignedGreaterThanOrEqual:
cc = hs;
break;
case kUnsignedLessThanOrEqual:
cc = ls;
break;
case kUnsignedGreaterThan:
cc = hi;
break;
default:
UNSUPPORTED_COND(kMipsCmp, branch->condition);
break;
}
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
......@@ -721,24 +728,24 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// even if we have to unfold BranchF macro.
Label* nan = flabel;
switch (branch->condition) {
case kUnorderedEqual:
case kEqual:
cc = eq;
break;
case kUnorderedNotEqual:
case kNotEqual:
cc = ne;
nan = tlabel;
break;
case kUnorderedLessThan:
case kUnsignedLessThan:
cc = lt;
break;
case kUnorderedGreaterThanOrEqual:
case kUnsignedGreaterThanOrEqual:
cc = ge;
nan = tlabel;
break;
case kUnorderedLessThanOrEqual:
case kUnsignedLessThanOrEqual:
cc = le;
break;
case kUnorderedGreaterThan:
case kUnsignedGreaterThan:
cc = gt;
nan = tlabel;
break;
......@@ -788,17 +795,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
// not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) {
switch (condition) {
case kNotEqual:
cc = ne;
break;
case kEqual:
cc = eq;
break;
default:
UNSUPPORTED_COND(kMipsTst, condition);
break;
}
cc = FlagsConditionToConditionTst(condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot.
......@@ -806,17 +803,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
// kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
switch (condition) {
case kOverflow:
cc = lt;
break;
case kNotOverflow:
cc = ge;
break;
default:
UNSUPPORTED_COND(kMipsAddOvf, condition);
break;
}
cc = FlagsConditionToConditionOvf(condition);
__ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot.
......@@ -824,41 +811,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
} else if (instr->arch_opcode() == kMipsCmp) {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
switch (condition) {
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
break;
case kSignedLessThan:
cc = lt;
break;
case kSignedGreaterThanOrEqual:
cc = ge;
break;
case kSignedLessThanOrEqual:
cc = le;
break;
case kSignedGreaterThan:
cc = gt;
break;
case kUnsignedLessThan:
cc = lo;
break;
case kUnsignedGreaterThanOrEqual:
cc = hs;
break;
case kUnsignedLessThanOrEqual:
cc = ls;
break;
case kUnsignedGreaterThan:
cc = hi;
break;
default:
UNSUPPORTED_COND(kMipsCmp, condition);
break;
}
cc = FlagsConditionToConditionCmp(condition);
__ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot.
......@@ -870,30 +823,30 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FPURegister dummy1 = f0;
FPURegister dummy2 = f2;
switch (condition) {
case kUnorderedEqual:
case kEqual:
// TODO(plind): improve the NaN testing throughout this function.
__ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
cc = eq;
break;
case kUnorderedNotEqual:
case kNotEqual:
__ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
__ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
cc = ne;
break;
case kUnorderedLessThan:
case kUnsignedLessThan:
__ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
cc = lt;
break;
case kUnorderedGreaterThanOrEqual:
case kUnsignedGreaterThanOrEqual:
__ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
__ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
cc = ge;
break;
case kUnorderedLessThanOrEqual:
case kUnsignedLessThanOrEqual:
__ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
cc = le;
break;
case kUnorderedGreaterThan:
case kUnsignedGreaterThan:
__ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
__ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
cc = gt;
......
......@@ -680,13 +680,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
......@@ -798,19 +798,19 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node);
FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont(kUnorderedLessThan, node);
FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
......
......@@ -909,13 +909,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
......@@ -1049,19 +1049,19 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node);
FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
FlagsContinuation cont(kUnorderedLessThan, node);
FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
......
......@@ -42,19 +42,19 @@ struct FPCmp {
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD,
kMachFloat64},
kUnorderedEqual},
kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD,
kMachFloat64},
kUnorderedLessThan},
kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
kMipsCmpD, kMachFloat64},
kUnorderedLessThanOrEqual},
kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD,
kMachFloat64},
kUnorderedLessThan},
kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual,
"Float64GreaterThanOrEqual", kMipsCmpD, kMachFloat64},
kUnorderedLessThanOrEqual}};
kUnsignedLessThanOrEqual}};
struct Conversion {
// The machine_type field in MachInst1 represents the destination type.
......
......@@ -42,19 +42,19 @@ struct FPCmp {
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMips64CmpD,
kMachFloat64},
kUnorderedEqual},
kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMips64CmpD,
kMachFloat64},
kUnorderedLessThan},
kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
kMips64CmpD, kMachFloat64},
kUnorderedLessThanOrEqual},
kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan",
kMips64CmpD, kMachFloat64},
kUnorderedLessThan},
kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual,
"Float64GreaterThanOrEqual", kMips64CmpD, kMachFloat64},
kUnorderedLessThanOrEqual}};
kUnsignedLessThanOrEqual}};
struct Conversion {
// The machine_type field in MachInst1 represents the destination type.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment