Commit 2cacdf9e authored by Georgia Kouveli's avatar Georgia Kouveli Committed by Commit Bot

[arm] [arm64] Match LoadStackPointer with comparison.

When encountering a LoadStackPointer input to a comparison, generate a register
LocationOperand that points to the stack pointer. This can avoid unnecessary
spilling of the stack pointer.

Since sp is a special register for arm64, we need to add a mechanism to print
its name in RegisterConfiguration.

This is a port of https://chromium-review.googlesource.com/1055568 that made
the same change for arm.

It also ports the tests added in
https://chromium-review.googlesource.com/1099068 to arm and arm64.

Bug: v8:7844
Change-Id: I5adc672ff877b9888ef755e8e60e4eabbc61061b
Reviewed-on: https://chromium-review.googlesource.com/1107810Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Commit-Queue: Georgia Kouveli <georgia.kouveli@arm.com>
Cr-Commit-Position: refs/heads/master@{#53889}
parent e6799023
......@@ -2887,6 +2887,10 @@ class Assembler : public AssemblerBase {
return reinterpret_cast<byte*>(instr) - buffer_;
}
static const char* GetSpecialRegisterName(int code) {
return (code == kSPRegInternalCode) ? "sp" : "UNKNOWN";
}
// Register encoding.
static Instr Rd(CPURegister rd) {
DCHECK_NE(rd.code(), kSPRegInternalCode);
......
......@@ -227,6 +227,9 @@ class AssemblerBase : public Malloced {
return FlushICache(reinterpret_cast<void*>(start), size);
}
// Used to print the name of some special registers.
static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
protected:
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
......
......@@ -49,6 +49,16 @@ class Arm64OperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
// Use the stack pointer if the node is LoadStackPointer, otherwise assign a
// register.
InstructionOperand UseRegisterOrStackPointer(Node* node, bool sp_allowed) {
if (sp_allowed && node->opcode() == IrOpcode::kLoadStackPointer)
return LocationOperand(LocationOperand::EXPLICIT,
LocationOperand::REGISTER,
MachineRepresentation::kWord64, sp.code());
return UseRegister(node);
}
// Use the provided node if it has the required value, or create a
// TempImmediate otherwise.
InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
......@@ -1747,17 +1757,21 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (right->opcode() == IrOpcode::kLoadStackPointer ||
g.CanBeImmediate(left, immediate_mode)) {
if (!commutative) cont->Commute();
std::swap(left, right);
}
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, immediate_mode)) {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
cont);
} else if (g.CanBeImmediate(left, immediate_mode)) {
if (!commutative) cont->Commute();
VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
cont);
VisitCompare(selector, opcode,
g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp),
g.UseImmediate(right), cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
cont);
VisitCompare(selector, opcode,
g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp),
g.UseRegister(right), cont);
}
}
......
......@@ -184,7 +184,8 @@ std::ostream& operator<<(std::ostream& os,
os << "[fp_stack:" << allocated.index();
} else if (op.IsRegister()) {
os << "["
<< GetRegConfig()->GetGeneralRegisterName(allocated.register_code())
<< GetRegConfig()->GetGeneralOrSpecialRegisterName(
allocated.register_code())
<< "|R";
} else if (op.IsDoubleRegister()) {
os << "["
......
......@@ -316,6 +316,12 @@ RegisterConfiguration::RegisterConfiguration(
}
}
const char* RegisterConfiguration::GetGeneralOrSpecialRegisterName(
int code) const {
if (code < num_general_registers_) return GetGeneralRegisterName(code);
return Assembler::GetSpecialRegisterName(code);
}
// Assert that kFloat32, kFloat64, and kSimd128 are consecutive values.
STATIC_ASSERT(static_cast<int>(MachineRepresentation::kSimd128) ==
static_cast<int>(MachineRepresentation::kFloat64) + 1);
......
......@@ -102,7 +102,9 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
bool IsAllocatableSimd128Code(int index) const {
return ((1 << index) & allocatable_simd128_codes_mask_) != 0;
}
const char* GetGeneralOrSpecialRegisterName(int code) const;
const char* GetGeneralRegisterName(int code) const {
DCHECK_LT(code, num_general_registers_);
return general_register_names_[code];
}
const char* GetFloatRegisterName(int code) const {
......
......@@ -3266,6 +3266,54 @@ TEST_F(InstructionSelectorTest, SpeculationFence) {
EXPECT_EQ(kArmDsbIsb, s[0]->arch_opcode());
}
TEST_F(InstructionSelectorTest, StackCheck0) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
Node* const sp = m.LoadStackPointer();
Node* const stack_limit = m.Load(MachineType::Int32(), m.Parameter(0));
Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
RawMachineLabel if_true, if_false;
m.Branch(interrupt, &if_true, &if_false);
m.Bind(&if_true);
m.Return(m.Int32Constant(1));
m.Bind(&if_false);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArmLdr, s[0]->arch_opcode());
EXPECT_EQ(kArmCmp, s[1]->arch_opcode());
EXPECT_EQ(4U, s[1]->InputCount());
EXPECT_EQ(0U, s[1]->OutputCount());
}
TEST_F(InstructionSelectorTest, StackCheck1) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
Node* const sp = m.LoadStackPointer();
Node* const stack_limit = m.Load(MachineType::Int32(), m.Parameter(0));
Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
RawMachineLabel if_true, if_false;
m.Branch(sp_within_limit, &if_true, &if_false);
m.Bind(&if_true);
m.Return(m.Int32Constant(1));
m.Bind(&if_false);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArmLdr, s[0]->arch_opcode());
EXPECT_EQ(kArmCmp, s[1]->arch_opcode());
EXPECT_EQ(4U, s[1]->InputCount());
EXPECT_EQ(0U, s[1]->OutputCount());
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -4426,6 +4426,54 @@ TEST_F(InstructionSelectorTest, SpeculationFence) {
EXPECT_EQ(kArm64DsbIsb, s[0]->arch_opcode());
}
TEST_F(InstructionSelectorTest, StackCheck0) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
Node* const sp = m.LoadStackPointer();
Node* const stack_limit = m.Load(MachineType::Int64(), m.Parameter(0));
Node* const interrupt = m.UintPtrLessThan(sp, stack_limit);
RawMachineLabel if_true, if_false;
m.Branch(interrupt, &if_true, &if_false);
m.Bind(&if_true);
m.Return(m.Int32Constant(1));
m.Bind(&if_false);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Ldr, s[0]->arch_opcode());
EXPECT_EQ(kArm64Cmp, s[1]->arch_opcode());
EXPECT_EQ(4U, s[1]->InputCount());
EXPECT_EQ(0U, s[1]->OutputCount());
}
TEST_F(InstructionSelectorTest, StackCheck1) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
Node* const sp = m.LoadStackPointer();
Node* const stack_limit = m.Load(MachineType::Int64(), m.Parameter(0));
Node* const sp_within_limit = m.UintPtrLessThan(stack_limit, sp);
RawMachineLabel if_true, if_false;
m.Branch(sp_within_limit, &if_true, &if_false);
m.Bind(&if_true);
m.Return(m.Int32Constant(1));
m.Bind(&if_false);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Ldr, s[0]->arch_opcode());
EXPECT_EQ(kArm64Cmp, s[1]->arch_opcode());
EXPECT_EQ(4U, s[1]->InputCount());
EXPECT_EQ(0U, s[1]->OutputCount());
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment