Commit eb991c66 authored by martyn.capewell's avatar martyn.capewell Committed by Commit bot

[turbofan] Use cmn on ARM64 for negated rhs cmp

Use compare-negate instruction if the right-hand input to a compare is a
negate operation.

BUG=

Review URL: https://codereview.chromium.org/1410123009

Cr-Commit-Position: refs/heads/master@{#31866}
parent 859b15ca
......@@ -787,10 +787,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Cmn:
__ Cmn(i.InputRegister(0), i.InputOperand(1));
__ Cmn(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
case kArm64Cmn32:
__ Cmn(i.InputRegister32(0), i.InputOperand32(1));
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Tst:
__ Tst(i.InputRegister(0), i.InputOperand(1));
......
......@@ -223,14 +223,14 @@ void VisitBinop(InstructionSelector* selector, Node* node,
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
bool is_cmp = opcode == kArm64Cmp32;
bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
// We can commute cmp by switching the inputs and commuting the flags
// continuation.
bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
// The cmp instruction is encoded as sub with zero output register, and
// therefore supports the same operand modes.
// The cmp and cmn instructions are encoded as sub or add with zero output
// register, and therefore support the same operand modes.
bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
m.IsInt64Sub() || is_cmp;
......@@ -1516,8 +1516,29 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
VisitBinop<Int32BinopMatcher>(selector, node, kArm64Cmp32, kArithmeticImm,
cont);
Int32BinopMatcher m(node);
ArchOpcode opcode = kArm64Cmp32;
// Select negated compare for comparisons with negated right input.
if (m.right().IsInt32Sub()) {
Node* sub = m.right().node();
Int32BinopMatcher msub(sub);
if (msub.left().Is(0)) {
bool can_cover = selector->CanCover(node, sub);
node->ReplaceInput(1, msub.right().node());
// Even if the comparison node covers the subtraction, after the input
// replacement above, the node still won't cover the input to the
// subtraction; the subtraction still uses it.
// In order to get shifted operations to work, we must remove the rhs
// input to the subtraction, as TryMatchAnyShift requires this node to
// cover the input shift. We do this by setting it to the lhs input,
// as we know it's zero, and the result of the subtraction isn't used by
// any other node.
if (can_cover) sub->ReplaceInput(1, msub.left().node());
opcode = kArm64Cmn32;
}
}
VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
}
......
......@@ -2533,6 +2533,71 @@ TEST_F(InstructionSelectorTest, Word32EqualZeroWithWord32Equal) {
}
}
namespace {
struct IntegerCmp {
MachInst2 mi;
FlagsCondition cond;
};
std::ostream& operator<<(std::ostream& os, const IntegerCmp& cmp) {
return os << cmp.mi;
}
// ARM64 32-bit integer comparison instructions.
const IntegerCmp kIntegerCmpInstructions[] = {
{{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
kMachInt32},
kEqual},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
kMachInt32},
kSignedLessThan},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kArm64Cmp32, kMachInt32},
kSignedLessThanOrEqual},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
kMachUint32},
kUnsignedLessThan},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
kArm64Cmp32, kMachUint32},
kUnsignedLessThanOrEqual}};
} // namespace
TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Test 32-bit operations. Ignore ROR shifts, as compare-negate does not
// support them.
if (shift.mi.machine_type != kMachInt32 ||
shift.mi.arch_opcode == kArm64Ror32) {
continue;
}
TRACED_FORRANGE(int32_t, imm, -32, 63) {
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r = (m.*shift.mi.constructor)(p1, m.Int32Constant(imm));
m.Return(
(m.*cmp.mi.constructor)(p0, m.Int32Sub(m.Int32Constant(0), r)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
}
}
}
// -----------------------------------------------------------------------------
// Miscellaneous
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment