Commit fdb0f078 authored by georgia.kouveli's avatar georgia.kouveli Committed by Commit bot

[arm64] Use CMN for cmp(a,sub(0,b)) only when checking equality/inequality.

We were previously incorrectly changing:
  sub r0, 0, r1
  cmp r2, r0
  b.cond <addr>
to:
  cmn r2, r1
  b.cond <addr>

for all conditions. This is incorrect for conditions involving the C (carry)
and V (overflow) flags, and in particular in the case where r1 = INT_MIN.
The optimization is still safe to perform for Equal and NotEqual since they
do not depend on the C and V flags.

BUG=

Review-Url: https://codereview.chromium.org/2318043002
Cr-Commit-Position: refs/heads/master@{#39246}
parent 860f6527
......@@ -2138,8 +2138,11 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
cond, cont, &immediate_mode);
}
} else if (m.right().IsInt32Sub()) {
} else if (m.right().IsInt32Sub() && (cond == kEqual || cond == kNotEqual)) {
// Select negated compare for comparisons with negated right input.
// Only do this for kEqual and kNotEqual, which do not depend on the
// C and V flags, as those flags will be different with CMN when the
// right-hand side of the original subtraction is INT_MIN.
Node* sub = m.right().node();
Int32BinopMatcher msub(sub);
if (msub.left().Is(0)) {
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turbo
function CompareNegate(a,b) {
a = a|0;
b = b|0;
var sub = 0 - b;
return a < (sub|0);
}
var x = CompareNegate(1,0x80000000);
%OptimizeFunctionOnNextCall(CompareNegate);
CompareNegate(1,0x80000000);
assertOptimized(CompareNegate);
assertEquals(x, CompareNegate(1,0x80000000));
......@@ -3142,11 +3142,20 @@ const IntegerCmp kIntegerCmpInstructions[] = {
kUnsignedLessThanOrEqual,
kUnsignedGreaterThanOrEqual}};
const IntegerCmp kIntegerCmpEqualityInstructions[] = {
{{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
MachineType::Int32()},
kEqual,
kEqual},
{{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kArm64Cmp32,
MachineType::Int32()},
kNotEqual,
kNotEqual}};
} // namespace
TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpEqualityInstructions) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Test 32-bit operations. Ignore ROR shifts, as compare-negate does not
// support them.
......@@ -3203,7 +3212,7 @@ TEST_F(InstructionSelectorTest, CmpWithImmediateOnLeft) {
}
TEST_F(InstructionSelectorTest, CmnWithImmediateOnLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpEqualityInstructions) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
// kEqual and kNotEqual trigger the cbz/cbnz optimization, which
// is tested elsewhere.
......@@ -3244,7 +3253,7 @@ TEST_F(InstructionSelectorTest, CmpSignedExtendByteOnLeft) {
}
TEST_F(InstructionSelectorTest, CmnSignedExtendByteOnLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpEqualityInstructions) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
......@@ -3294,7 +3303,7 @@ TEST_F(InstructionSelectorTest, CmpShiftByImmediateOnLeft) {
}
TEST_F(InstructionSelectorTest, CmnShiftByImmediateOnLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpEqualityInstructions) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Only test relevant shifted operands.
if (shift.mi.machine_type != MachineType::Int32()) continue;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment