Commit f5d90fc9 authored by georgia.kouveli's avatar georgia.kouveli Committed by Commit bot

[arm64] Fix handling of CMN and ADD/SUB with overflow in VisitBinop.

CMN is a flag-setting add operation, and therefore is commutative.
{Add,Sub}WithOverflow generate ADD/SUB instructions that cannot
support a ROR shift.

BUG=

Review-Url: https://codereview.chromium.org/2087233005
Cr-Commit-Position: refs/heads/master@{#37212}
parent 2a5a8fde
...@@ -256,36 +256,96 @@ bool TryMatchLoadStoreShift(Arm64OperandGenerator* g, ...@@ -256,36 +256,96 @@ bool TryMatchLoadStoreShift(Arm64OperandGenerator* g,
} }
} }
// Bitfields describing binary operator properties:
// CanCommuteField is true if we can switch the two operands, potentially
// requiring commuting the flags continuation condition.
typedef BitField8<bool, 1, 1> CanCommuteField;
// MustCommuteCondField is true when we need to commute the flags continuation
// condition in order to switch the operands.
typedef BitField8<bool, 2, 1> MustCommuteCondField;
// IsComparisonField is true when the operation is a comparison and has no other
// result other than the condition.
typedef BitField8<bool, 3, 1> IsComparisonField;
// IsAddSubField is true when an instruction is encoded as ADD or SUB.
typedef BitField8<bool, 4, 1> IsAddSubField;
// Get properties of a binary operator.
uint8_t GetBinopProperties(InstructionCode opcode) {
uint8_t result = 0;
switch (opcode) {
case kArm64Cmp32:
case kArm64Cmp:
// We can commute CMP by switching the inputs and commuting
// the flags continuation.
result = CanCommuteField::update(result, true);
result = MustCommuteCondField::update(result, true);
result = IsComparisonField::update(result, true);
// The CMP and CMN instructions are encoded as SUB or ADD
// with zero output register, and therefore support the same
// operand modes.
result = IsAddSubField::update(result, true);
break;
case kArm64Cmn32:
case kArm64Cmn:
result = CanCommuteField::update(result, true);
result = IsComparisonField::update(result, true);
result = IsAddSubField::update(result, true);
break;
case kArm64Add32:
case kArm64Add:
result = CanCommuteField::update(result, true);
result = IsAddSubField::update(result, true);
break;
case kArm64Sub32:
case kArm64Sub:
result = IsAddSubField::update(result, true);
break;
case kArm64Tst32:
case kArm64Tst:
result = CanCommuteField::update(result, true);
result = IsComparisonField::update(result, true);
break;
case kArm64And32:
case kArm64And:
case kArm64Or32:
case kArm64Or:
case kArm64Eor32:
case kArm64Eor:
result = CanCommuteField::update(result, true);
break;
default:
UNREACHABLE();
return 0;
}
DCHECK_IMPLIES(MustCommuteCondField::decode(result),
CanCommuteField::decode(result));
return result;
}
// Shared routine for multiple binary operations. // Shared routine for multiple binary operations.
template <typename Matcher> template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node, void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode, InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) { FlagsContinuation* cont) {
Arm64OperandGenerator g(selector); Arm64OperandGenerator g(selector);
Matcher m(node);
InstructionOperand inputs[5]; InstructionOperand inputs[5];
size_t input_count = 0; size_t input_count = 0;
InstructionOperand outputs[2]; InstructionOperand outputs[2];
size_t output_count = 0; size_t output_count = 0;
bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
// We can commute cmp by switching the inputs and commuting the flags Node* left_node = node->InputAt(0);
// continuation. Node* right_node = node->InputAt(1);
bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
// The cmp and cmn instructions are encoded as sub or add with zero output uint8_t properties = GetBinopProperties(opcode);
// register, and therefore support the same operand modes. bool can_commute = CanCommuteField::decode(properties);
bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() || bool must_commute_cond = MustCommuteCondField::decode(properties);
m.IsInt64Sub() || is_cmp; bool is_add_sub = IsAddSubField::decode(properties);
Node* left_node = m.left().node();
Node* right_node = m.right().node();
if (g.CanBeImmediate(right_node, operand_mode)) { if (g.CanBeImmediate(right_node, operand_mode)) {
inputs[input_count++] = g.UseRegister(left_node); inputs[input_count++] = g.UseRegister(left_node);
inputs[input_count++] = g.UseImmediate(right_node); inputs[input_count++] = g.UseImmediate(right_node);
} else if (is_cmp && g.CanBeImmediate(left_node, operand_mode)) { } else if (can_commute && g.CanBeImmediate(left_node, operand_mode)) {
cont->Commute(); if (must_commute_cond) cont->Commute();
inputs[input_count++] = g.UseRegister(right_node); inputs[input_count++] = g.UseRegister(right_node);
inputs[input_count++] = g.UseImmediate(left_node); inputs[input_count++] = g.UseImmediate(left_node);
} else if (is_add_sub && } else if (is_add_sub &&
...@@ -295,7 +355,7 @@ void VisitBinop(InstructionSelector* selector, Node* node, ...@@ -295,7 +355,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} else if (is_add_sub && can_commute && } else if (is_add_sub && can_commute &&
TryMatchAnyExtend(&g, selector, node, right_node, left_node, TryMatchAnyExtend(&g, selector, node, right_node, left_node,
&inputs[0], &inputs[1], &opcode)) { &inputs[0], &inputs[1], &opcode)) {
if (is_cmp) cont->Commute(); if (must_commute_cond) cont->Commute();
input_count += 2; input_count += 2;
} else if (TryMatchAnyShift(selector, node, right_node, &opcode, } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
!is_add_sub)) { !is_add_sub)) {
...@@ -305,7 +365,7 @@ void VisitBinop(InstructionSelector* selector, Node* node, ...@@ -305,7 +365,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseImmediate(m_shift.right().node()); inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode, } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) { !is_add_sub)) {
if (is_cmp) cont->Commute(); if (must_commute_cond) cont->Commute();
Matcher m_shift(left_node); Matcher m_shift(left_node);
inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node); inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node()); inputs[input_count++] = g.UseRegister(m_shift.left().node());
...@@ -320,7 +380,7 @@ void VisitBinop(InstructionSelector* selector, Node* node, ...@@ -320,7 +380,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block()); inputs[input_count++] = g.Label(cont->false_block());
} }
if (!is_cmp) { if (!IsComparisonField::decode(properties)) {
outputs[output_count++] = g.DefineAsRegister(node); outputs[output_count++] = g.DefineAsRegister(node);
} }
...@@ -329,7 +389,7 @@ void VisitBinop(InstructionSelector* selector, Node* node, ...@@ -329,7 +389,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} }
DCHECK_NE(0u, input_count); DCHECK_NE(0u, input_count);
DCHECK((output_count != 0) || is_cmp); DCHECK((output_count != 0) || IsComparisonField::decode(properties));
DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count); DCHECK_GE(arraysize(outputs), output_count);
......
...@@ -184,8 +184,11 @@ const MachInst2 kOvfAddSubInstructions[] = { ...@@ -184,8 +184,11 @@ const MachInst2 kOvfAddSubInstructions[] = {
{&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow", {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
kArm64Add32, MachineType::Int32()}, kArm64Add32, MachineType::Int32()},
{&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow", {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
kArm64Sub32, MachineType::Int32()}}; kArm64Sub32, MachineType::Int32()},
{&RawMachineAssembler::Int64AddWithOverflow, "Int64AddWithOverflow",
kArm64Add, MachineType::Int64()},
{&RawMachineAssembler::Int64SubWithOverflow, "Int64SubWithOverflow",
kArm64Sub, MachineType::Int64()}};
// ARM64 shift instructions. // ARM64 shift instructions.
const Shift kShiftInstructions[] = { const Shift kShiftInstructions[] = {
...@@ -1606,6 +1609,29 @@ TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) { ...@@ -1606,6 +1609,29 @@ TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) {
} }
} }
TEST_P(InstructionSelectorOvfAddSubTest, RORShift) {
// ADD and SUB do not support ROR shifts, make sure we do not try
// to merge them into the ADD/SUB instruction.
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
auto rotate = &RawMachineAssembler::Word64Ror;
ArchOpcode rotate_opcode = kArm64Ror;
if (type == MachineType::Int32()) {
rotate = &RawMachineAssembler::Word32Ror;
rotate_opcode = kArm64Ror32;
}
TRACED_FORRANGE(int32_t, imm, -32, 63) {
StreamBuilder m(this, type, type, type);
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r = (m.*rotate)(p1, m.Int32Constant(imm));
m.Return((m.*dpi.constructor)(p0, r));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(rotate_opcode, s[0]->arch_opcode());
EXPECT_EQ(dpi.arch_opcode, s[1]->arch_opcode());
}
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorOvfAddSubTest, InstructionSelectorOvfAddSubTest,
...@@ -2999,6 +3025,7 @@ namespace { ...@@ -2999,6 +3025,7 @@ namespace {
struct IntegerCmp { struct IntegerCmp {
MachInst2 mi; MachInst2 mi;
FlagsCondition cond; FlagsCondition cond;
FlagsCondition commuted_cond;
}; };
...@@ -3011,19 +3038,24 @@ std::ostream& operator<<(std::ostream& os, const IntegerCmp& cmp) { ...@@ -3011,19 +3038,24 @@ std::ostream& operator<<(std::ostream& os, const IntegerCmp& cmp) {
const IntegerCmp kIntegerCmpInstructions[] = { const IntegerCmp kIntegerCmpInstructions[] = {
{{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32, {{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
MachineType::Int32()}, MachineType::Int32()},
kEqual,
kEqual}, kEqual},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32, {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
MachineType::Int32()}, MachineType::Int32()},
kSignedLessThan}, kSignedLessThan,
kSignedGreaterThan},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual", {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kArm64Cmp32, MachineType::Int32()}, kArm64Cmp32, MachineType::Int32()},
kSignedLessThanOrEqual}, kSignedLessThanOrEqual,
kSignedGreaterThanOrEqual},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32, {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
MachineType::Uint32()}, MachineType::Uint32()},
kUnsignedLessThan}, kUnsignedLessThan,
kUnsignedGreaterThan},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual", {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
kArm64Cmp32, MachineType::Uint32()}, kArm64Cmp32, MachineType::Uint32()},
kUnsignedLessThanOrEqual}}; kUnsignedLessThanOrEqual,
kUnsignedGreaterThanOrEqual}};
} // namespace } // namespace
...@@ -3060,6 +3092,156 @@ TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) { ...@@ -3060,6 +3092,156 @@ TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
} }
} }
TEST_F(InstructionSelectorTest, CmpWithImmediateOnLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
// kEqual and kNotEqual trigger the cbz/cbnz optimization, which
// is tested elsewhere.
if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
RawMachineLabel a, b;
m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), p0), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
m.Bind(&b);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
ASSERT_LE(2U, s[0]->InputCount());
EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
}
}
}
TEST_F(InstructionSelectorTest, CmnWithImmediateOnLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
// kEqual and kNotEqual trigger the cbz/cbnz optimization, which
// is tested elsewhere.
if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
RawMachineLabel a, b;
m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), sub), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
m.Bind(&b);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
ASSERT_LE(2U, s[0]->InputCount());
EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
}
}
}
TEST_F(InstructionSelectorTest, CmpSignedExtendByteOnLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
m.Int32Constant(24));
m.Return((m.*cmp.mi.constructor)(extend, m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
}
}
TEST_F(InstructionSelectorTest, CmnSignedExtendByteOnLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
m.Int32Constant(24));
m.Return((m.*cmp.mi.constructor)(extend, sub));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
}
}
TEST_F(InstructionSelectorTest, CmpShiftByImmediateOnLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Only test relevant shifted operands.
if (shift.mi.machine_type != MachineType::Int32()) continue;
// The available shift operand range is `0 <= imm < 32`, but we also test
// that immediates outside this range are handled properly (modulo-32).
TRACED_FORRANGE(int, imm, -32, 63) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
m.Return((m.*cmp.mi.constructor)(
(m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
m.Parameter(0)));
Stream s = m.Build();
// Cmp does not support ROR shifts.
if (shift.mi.arch_opcode == kArm64Ror32) {
ASSERT_EQ(2U, s.size());
continue;
}
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
}
}
}
}
TEST_F(InstructionSelectorTest, CmnShiftByImmediateOnLeft) {
TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Only test relevant shifted operands.
if (shift.mi.machine_type != MachineType::Int32()) continue;
// The available shift operand range is `0 <= imm < 32`, but we also test
// that immediates outside this range are handled properly (modulo-32).
TRACED_FORRANGE(int, imm, -32, 63) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
m.Return((m.*cmp.mi.constructor)(
(m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
sub));
Stream s = m.Build();
// Cmn does not support ROR shifts.
if (shift.mi.arch_opcode == kArm64Ror32) {
ASSERT_EQ(2U, s.size());
continue;
}
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
EXPECT_EQ(cmp.cond, s[0]->flags_condition());
}
}
}
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Miscellaneous // Miscellaneous
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment