Commit a456134b authored by martyn.capewell's avatar martyn.capewell Committed by Commit bot

[turbofan] Negate with shifted input for ARM64

Support negate with shifted input on ARM64 by supporting lhs zero registers for
binary operations, and removing explicit Neg instruction support.

Review URL: https://codereview.chromium.org/1404093003

Cr-Commit-Position: refs/heads/master@{#31263}
parent 2ad56552
......@@ -41,8 +41,26 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return ToRegister(instr_->InputAt(index)).W();
}
Register InputOrZeroRegister32(size_t index) {
DCHECK(instr_->InputAt(index)->IsRegister() ||
(instr_->InputAt(index)->IsImmediate() && (InputInt32(index) == 0)));
if (instr_->InputAt(index)->IsImmediate()) {
return wzr;
}
return InputRegister32(index);
}
Register InputRegister64(size_t index) { return InputRegister(index); }
Register InputOrZeroRegister64(size_t index) {
DCHECK(instr_->InputAt(index)->IsRegister() ||
(instr_->InputAt(index)->IsImmediate() && (InputInt64(index) == 0)));
if (instr_->InputAt(index)->IsImmediate()) {
return xzr;
}
return InputRegister64(index);
}
Operand InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
......@@ -519,28 +537,33 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Add:
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
__ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
break;
case kArm64Add32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
__ Adds(i.OutputRegister32(), i.InputRegister32(0),
__ Adds(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
} else {
__ Add(i.OutputRegister32(), i.InputRegister32(0),
__ Add(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
}
break;
case kArm64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
__ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
break;
case kArm64And32:
__ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
__ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
break;
case kArm64Bic:
__ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
__ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
break;
case kArm64Bic32:
__ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
__ Bic(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
break;
case kArm64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
......@@ -624,45 +647,48 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Not32:
__ Mvn(i.OutputRegister32(), i.InputOperand32(0));
break;
case kArm64Neg:
__ Neg(i.OutputRegister(), i.InputOperand(0));
break;
case kArm64Neg32:
__ Neg(i.OutputRegister32(), i.InputOperand32(0));
break;
case kArm64Or:
__ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
__ Orr(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
break;
case kArm64Or32:
__ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
__ Orr(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
break;
case kArm64Orn:
__ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
__ Orn(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
break;
case kArm64Orn32:
__ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
__ Orn(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
break;
case kArm64Eor:
__ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
__ Eor(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
break;
case kArm64Eor32:
__ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
__ Eor(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
break;
case kArm64Eon:
__ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
__ Eon(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
break;
case kArm64Eon32:
__ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
__ Eon(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
break;
case kArm64Sub:
__ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
__ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
break;
case kArm64Sub32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
__ Subs(i.OutputRegister32(), i.InputRegister32(0),
__ Subs(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
} else {
__ Sub(i.OutputRegister32(), i.InputRegister32(0),
__ Sub(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
}
break;
......@@ -747,10 +773,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Clz(i.OutputRegister32(), i.InputRegister32(0));
break;
case kArm64Cmp:
__ Cmp(i.InputRegister(0), i.InputOperand(1));
__ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
case kArm64Cmp32:
__ Cmp(i.InputRegister32(0), i.InputOperand2_32(1));
__ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Cmn:
__ Cmn(i.InputRegister(0), i.InputOperand(1));
......
......@@ -55,8 +55,6 @@ namespace compiler {
V(Arm64Umod32) \
V(Arm64Not) \
V(Arm64Not32) \
V(Arm64Neg) \
V(Arm64Neg32) \
V(Arm64Lsl) \
V(Arm64Lsl32) \
V(Arm64Lsr) \
......
......@@ -37,6 +37,15 @@ class Arm64OperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
// Use the zero register if the node has the immediate value zero, otherwise
// assign a register.
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
return UseImmediate(node);
}
return UseRegister(node);
}
// Use the provided node if it has the required value, or create a
// TempImmediate otherwise.
InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
......@@ -247,18 +256,18 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} else if (TryMatchAnyShift(selector, node, right_node, &opcode,
!is_add_sub)) {
Matcher m_shift(right_node);
inputs[input_count++] = g.UseRegister(left_node);
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
if (is_cmp) cont->Commute();
Matcher m_shift(left_node);
inputs[input_count++] = g.UseRegister(right_node);
inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else {
inputs[input_count++] = g.UseRegister(left_node);
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
}
......@@ -997,12 +1006,7 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
}
if (m.left().Is(0)) {
Emit(kArm64Neg32, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
} else {
VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
}
VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
}
......@@ -1023,11 +1027,7 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
}
}
if (m.left().Is(0)) {
Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
} else {
VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
}
VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
}
......
......@@ -43,6 +43,10 @@ class InstructionOperandConverter {
return ToConstant(instr_->InputAt(index)).ToInt32();
}
int64_t InputInt64(size_t index) {
return ToConstant(instr_->InputAt(index)).ToInt64();
}
int8_t InputInt8(size_t index) {
return static_cast<int8_t>(InputInt32(index));
}
......
......@@ -583,7 +583,6 @@ TEST_F(InstructionSelectorTest, AddImmediateOnLeft) {
TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
// Subtraction with zero on the left maps to Neg.
{
// 32-bit subtract.
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
......@@ -591,8 +590,10 @@ TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Neg32, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(kArm64Sub32, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(0)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
{
......@@ -602,13 +603,71 @@ TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Neg, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(kArm64Sub, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(0)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
TEST_F(InstructionSelectorTest, SubZeroOnLeftWithShift) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
{
// Test 32-bit operations. Ignore ROR shifts, as subtract does not
// support them.
if ((shift.mi.machine_type != kMachInt32) ||
(shift.mi.arch_opcode == kArm64Ror32) ||
(shift.mi.arch_opcode == kArm64Ror))
continue;
TRACED_FORRANGE(int, imm, -32, 63) {
StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
m.Return(m.Int32Sub(
m.Int32Constant(0),
(m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Sub32, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(0)));
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
{
// Test 64-bit operations. Ignore ROR shifts, as subtract does not
// support them.
if ((shift.mi.machine_type != kMachInt64) ||
(shift.mi.arch_opcode == kArm64Ror32) ||
(shift.mi.arch_opcode == kArm64Ror))
continue;
TRACED_FORRANGE(int, imm, -32, 127) {
StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
m.Return(m.Int64Sub(
m.Int64Constant(0),
(m.*shift.mi.constructor)(m.Parameter(1), m.Int64Constant(imm))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Sub, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(0)));
EXPECT_EQ(shift.mode, s[0]->addressing_mode());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
}
}
TEST_F(InstructionSelectorTest, AddNegImmediateOnLeft) {
{
// 32-bit add.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment