Commit 49baecc4 authored by baptiste.afsa's avatar baptiste.afsa Committed by Commit bot

[turbofan] Mark arm64 cbz/cbnz tbz/tbnz instructions as branch instructions.

The instruction selector now selects pseudo instructions: CompareAndBranch or
TestAndBranch which are associated with their continuations so that generic
code in the code generator will treat them as branch instruction and will be
able to apply optimization like avoiding branches when the code can falltrhough.

R=bmeurer@chromium.org

Review URL: https://codereview.chromium.org/798553002

Cr-Commit-Position: refs/heads/master@{#25773}
parent 8897ab89
......@@ -280,13 +280,6 @@ class OutOfLineLoadInteger FINAL : public OutOfLineCode {
} while (0)
#define ASSEMBLE_BRANCH_TO(target) \
do { \
bool fallthrough = IsNextInAssemblyOrder(target); \
if (!fallthrough) __ B(GetLabel(target)); \
} while (0)
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
......@@ -541,29 +534,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
i.InputInt8(2));
break;
case kArm64Tbz:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), GetLabel(i.InputRpo(2)));
ASSEMBLE_BRANCH_TO(i.InputRpo(3));
break;
case kArm64Tbz32:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), GetLabel(i.InputRpo(2)));
ASSEMBLE_BRANCH_TO(i.InputRpo(3));
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
// Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
break;
case kArm64Tbnz:
__ Tbnz(i.InputRegister64(0), i.InputInt6(1), GetLabel(i.InputRpo(2)));
ASSEMBLE_BRANCH_TO(i.InputRpo(3));
break;
case kArm64Tbnz32:
__ Tbnz(i.InputRegister32(0), i.InputInt5(1), GetLabel(i.InputRpo(2)));
ASSEMBLE_BRANCH_TO(i.InputRpo(3));
break;
case kArm64Cbz32:
__ Cbz(i.InputRegister32(0), GetLabel(i.InputRpo(1)));
ASSEMBLE_BRANCH_TO(i.InputRpo(2));
break;
case kArm64Cbnz32:
__ Cbnz(i.InputRegister32(0), GetLabel(i.InputRpo(1)));
ASSEMBLE_BRANCH_TO(i.InputRpo(2));
case kArm64CompareAndBranch32:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
case kArm64Claim: {
int words = MiscField::decode(instr->opcode());
......@@ -766,7 +742,44 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Arm64OperandConverter i(this, instr);
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
switch (branch->condition) {
FlagsCondition condition = branch->condition;
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
break;
case kNotEqual:
__ Cbnz(i.InputRegister32(0), tlabel);
break;
default:
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
break;
case kNotEqual:
__ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
break;
default:
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
break;
case kNotEqual:
__ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
break;
default:
UNREACHABLE();
}
} else {
switch (condition) {
case kUnorderedEqual:
__ B(vs, flabel);
// Fall through.
......@@ -822,6 +835,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
__ B(vc, tlabel);
break;
}
}
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
......
......@@ -70,12 +70,9 @@ namespace compiler {
V(Arm64Sxtw) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
V(Arm64Tbz) \
V(Arm64Tbz32) \
V(Arm64Tbnz) \
V(Arm64Tbnz32) \
V(Arm64Cbz32) \
V(Arm64Cbnz32) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64Claim) \
V(Arm64Poke) \
V(Arm64PokePairZero) \
......
......@@ -1347,9 +1347,8 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
ArchOpcode opcode =
(cont.condition() == kEqual) ? kArm64Tbz32 : kArm64Tbnz32;
Emit(opcode, NULL, g.UseRegister(m.left().node()),
Emit(cont.Encode(kArm64TestAndBranch32), NULL,
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros32(m.right().Value())),
g.Label(cont.true_block()),
......@@ -1366,9 +1365,8 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
ArchOpcode opcode =
(cont.condition() == kEqual) ? kArm64Tbz : kArm64Tbnz;
Emit(opcode, NULL, g.UseRegister(m.left().node()),
Emit(cont.Encode(kArm64TestAndBranch), NULL,
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros64(m.right().Value())),
g.Label(cont.true_block()),
......@@ -1384,9 +1382,8 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
// Branch could not be combined with a compare, compare against 0 and branch.
DCHECK((cont.condition() == kEqual) || (cont.condition() == kNotEqual));
ArchOpcode opcode = (cont.condition() == kEqual) ? kArm64Cbz32 : kArm64Cbnz32;
Emit(opcode, NULL, g.UseRegister(value), g.Label(cont.true_block()),
Emit(cont.Encode(kArm64CompareAndBranch32), NULL, g.UseRegister(value),
g.Label(cont.true_block()),
g.Label(cont.false_block()))->MarkAsControl();
}
......
......@@ -808,7 +808,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tbnz32, s[0]->arch_opcode());
EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
......@@ -827,7 +828,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tbz32, s[0]->arch_opcode());
EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
......@@ -847,7 +849,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tbnz32, s[0]->arch_opcode());
EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
......@@ -866,7 +869,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tbz32, s[0]->arch_opcode());
EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
......@@ -886,7 +890,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tbnz, s[0]->arch_opcode());
EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
......@@ -905,7 +910,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tbz, s[0]->arch_opcode());
EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
......@@ -925,7 +931,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tbnz, s[0]->arch_opcode());
EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
......@@ -944,7 +951,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Tbz, s[0]->arch_opcode());
EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
......@@ -964,7 +972,8 @@ TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cbnz32, s[0]->arch_opcode());
EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
}
......@@ -980,7 +989,8 @@ TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Cbz32, s[0]->arch_opcode());
EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
EXPECT_EQ(kEqual, s[0]->flags_condition());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment