Commit 60fb6ea1 authored by pierre.langlois's avatar pierre.langlois Committed by Commit bot

[turbofan] ARM64: Support shifted indexes in loads and stores

This patch adds support for the `Operand2_R_LSL_I` addressing mode to
loads and stores. This allows merging a shift instruction into a
MemoryOperand. Since the shift immediate is restricted to the log2 of
the operation width, the opportunities to hit this are slim. However,
Ignition's bytecode handlers hit this case all the time:

kind = BYTECODE_HANDLER
name = Star
compiler = turbofan
Instructions (size = 44)
0x23e67280     0  add x1, x19, #0x1 (1)
0x23e67284     4  ldrsb x1, [x20, x1]
0x23e67288     8  sxtw x1, w1
0x23e6728c    12  mov x2, fp
0x23e67290    16  str x0, [x2, x1, lsl #3]
                  ^^^^^^^^^^^^^^^^^^^^^
0x23e67294    20  add x19, x19, #0x2 (2)
0x23e67298    24  ldrb w1, [x20, x19]
0x23e6729c    28  ldr x1, [x21, x1, lsl #3]
                  ^^^^^^^^^^^^^^^^^^^^^
0x23e672a0    32  br x1

Additionally, I noticed the optimisation occurs once in both the
`StringPrototypeCharAt` and `StringPrototypeCharCodeAt` turbofan stubs.

BUG=

Review-Url: https://codereview.chromium.org/1972103002
Cr-Commit-Position: refs/heads/master@{#36227}
parent 2e86946f
...@@ -159,7 +159,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter { ...@@ -159,7 +159,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
const size_t index = *first_index; const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) { switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None: case kMode_None:
case kMode_Operand2_R_LSL_I:
case kMode_Operand2_R_LSR_I: case kMode_Operand2_R_LSR_I:
case kMode_Operand2_R_ASR_I: case kMode_Operand2_R_ASR_I:
case kMode_Operand2_R_ROR_I: case kMode_Operand2_R_ROR_I:
...@@ -168,6 +167,10 @@ class Arm64OperandConverter final : public InstructionOperandConverter { ...@@ -168,6 +167,10 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
case kMode_Operand2_R_SXTB: case kMode_Operand2_R_SXTB:
case kMode_Operand2_R_SXTH: case kMode_Operand2_R_SXTH:
break; break;
case kMode_Operand2_R_LSL_I:
*first_index += 3;
return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
LSL, InputInt32(index + 2));
case kMode_MRI: case kMode_MRI:
*first_index += 2; *first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
...@@ -1364,7 +1367,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1364,7 +1367,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ldrsb(i.OutputRegister(), i.MemoryOperand()); __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
break; break;
case kArm64Strb: case kArm64Strb:
__ Strb(i.InputOrZeroRegister64(2), i.MemoryOperand()); __ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break; break;
case kArm64Ldrh: case kArm64Ldrh:
__ Ldrh(i.OutputRegister(), i.MemoryOperand()); __ Ldrh(i.OutputRegister(), i.MemoryOperand());
...@@ -1373,31 +1376,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1373,31 +1376,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ldrsh(i.OutputRegister(), i.MemoryOperand()); __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
break; break;
case kArm64Strh: case kArm64Strh:
__ Strh(i.InputOrZeroRegister64(2), i.MemoryOperand()); __ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break; break;
case kArm64LdrW: case kArm64LdrW:
__ Ldr(i.OutputRegister32(), i.MemoryOperand()); __ Ldr(i.OutputRegister32(), i.MemoryOperand());
break; break;
case kArm64StrW: case kArm64StrW:
__ Str(i.InputOrZeroRegister32(2), i.MemoryOperand()); __ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
break; break;
case kArm64Ldr: case kArm64Ldr:
__ Ldr(i.OutputRegister(), i.MemoryOperand()); __ Ldr(i.OutputRegister(), i.MemoryOperand());
break; break;
case kArm64Str: case kArm64Str:
__ Str(i.InputOrZeroRegister64(2), i.MemoryOperand()); __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break; break;
case kArm64LdrS: case kArm64LdrS:
__ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand()); __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break; break;
case kArm64StrS: case kArm64StrS:
__ Str(i.InputFloat32OrZeroRegister(2), i.MemoryOperand()); __ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
break; break;
case kArm64LdrD: case kArm64LdrD:
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand()); __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break; break;
case kArm64StrD: case kArm64StrD:
__ Str(i.InputFloat64OrZeroRegister(2), i.MemoryOperand()); __ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
break; break;
case kCheckedLoadInt8: case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb); ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
......
...@@ -121,6 +121,13 @@ class Arm64OperandGenerator final : public OperandGenerator { ...@@ -121,6 +121,13 @@ class Arm64OperandGenerator final : public OperandGenerator {
return false; return false;
} }
bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
// TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
DCHECK_NE(MachineRepresentation::kSimd128, rep);
return IsIntegerConstant(node) &&
(GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
}
private: private:
bool IsLoadStoreImmediate(int64_t value, LSDataSize size) { bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
return Assembler::IsImmLSScaled(value, size) || return Assembler::IsImmLSScaled(value, size) ||
...@@ -226,6 +233,28 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector, ...@@ -226,6 +233,28 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
return false; return false;
} }
bool TryMatchLoadStoreShift(Arm64OperandGenerator* g,
InstructionSelector* selector,
MachineRepresentation rep, Node* node, Node* index,
InstructionOperand* index_op,
InstructionOperand* shift_immediate_op) {
if (!selector->CanCover(node, index)) return false;
if (index->InputCount() != 2) return false;
Node* left = index->InputAt(0);
Node* right = index->InputAt(1);
switch (index->opcode()) {
case IrOpcode::kWord32Shl:
case IrOpcode::kWord64Shl:
if (!g->CanBeLoadStoreShiftImmediate(right, rep)) {
return false;
}
*index_op = g->UseRegister(left);
*shift_immediate_op = g->UseImmediate(right);
return true;
default:
return false;
}
}
// Shared routine for multiple binary operations. // Shared routine for multiple binary operations.
template <typename Matcher> template <typename Matcher>
...@@ -359,12 +388,16 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) { ...@@ -359,12 +388,16 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
void InstructionSelector::VisitLoad(Node* node) { void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op()); LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MachineRepresentation rep = load_rep.representation();
Arm64OperandGenerator g(this); Arm64OperandGenerator g(this);
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
ArchOpcode opcode = kArchNop; InstructionCode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate; ImmediateMode immediate_mode = kNoImmediate;
switch (load_rep.representation()) { InstructionOperand inputs[3];
size_t input_count = 0;
InstructionOperand outputs[1];
switch (rep) {
case MachineRepresentation::kFloat32: case MachineRepresentation::kFloat32:
opcode = kArm64LdrS; opcode = kArm64LdrS;
immediate_mode = kLoadStoreImm32; immediate_mode = kLoadStoreImm32;
...@@ -396,13 +429,25 @@ void InstructionSelector::VisitLoad(Node* node) { ...@@ -396,13 +429,25 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE(); UNREACHABLE();
return; return;
} }
outputs[0] = g.DefineAsRegister(node);
inputs[0] = g.UseRegister(base);
if (g.CanBeImmediate(index, immediate_mode)) { if (g.CanBeImmediate(index, immediate_mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), input_count = 2;
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); inputs[1] = g.UseImmediate(index);
opcode |= AddressingModeField::encode(kMode_MRI);
} else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[1],
&inputs[2])) {
input_count = 3;
opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
} else { } else {
Emit(opcode | AddressingModeField::encode(kMode_MRR), input_count = 2;
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index)); inputs[1] = g.UseRegister(index);
opcode |= AddressingModeField::encode(kMode_MRR);
} }
Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
} }
...@@ -456,7 +501,9 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -456,7 +501,9 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode)); code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else { } else {
ArchOpcode opcode = kArchNop; InstructionOperand inputs[4];
size_t input_count = 0;
InstructionCode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate; ImmediateMode immediate_mode = kNoImmediate;
switch (rep) { switch (rep) {
case MachineRepresentation::kFloat32: case MachineRepresentation::kFloat32:
...@@ -490,15 +537,25 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -490,15 +537,25 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE(); UNREACHABLE();
return; return;
} }
inputs[0] = g.UseRegisterOrImmediateZero(value);
inputs[1] = g.UseRegister(base);
if (g.CanBeImmediate(index, immediate_mode)) { if (g.CanBeImmediate(index, immediate_mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), input_count = 3;
g.UseRegister(base), g.UseImmediate(index), inputs[2] = g.UseImmediate(index);
g.UseRegisterOrImmediateZero(value)); opcode |= AddressingModeField::encode(kMode_MRI);
} else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[2],
&inputs[3])) {
input_count = 4;
opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
} else { } else {
Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(), input_count = 3;
g.UseRegister(base), g.UseRegister(index), inputs[2] = g.UseRegister(index);
g.UseRegisterOrImmediateZero(value)); opcode |= AddressingModeField::encode(kMode_MRR);
} }
Emit(opcode, 0, nullptr, input_count, inputs);
} }
} }
......
...@@ -2355,8 +2355,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) { ...@@ -2355,8 +2355,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode()); EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(0U, s[0]->OutputCount()); EXPECT_EQ(0U, s[0]->OutputCount());
} }
} }
...@@ -2373,14 +2373,114 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreZero) { ...@@ -2373,14 +2373,114 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreZero) {
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode()); EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount()); ASSERT_EQ(3U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind()); ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2))); EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(0)));
EXPECT_EQ(0U, s[0]->OutputCount()); EXPECT_EQ(0U, s[0]->OutputCount());
} }
} }
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithShiftedIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FORRANGE(int, immediate_shift, 0, 4) {
// 32 bit shift
{
StreamBuilder m(this, memacc.type, MachineType::Pointer(),
MachineType::Int32());
Node* const index =
m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
m.Return(m.Load(memacc.type, m.Parameter(0), index));
Stream s = m.Build();
if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
} else {
// Make sure we haven't merged the shift into the load instruction.
ASSERT_NE(1U, s.size());
EXPECT_NE(memacc.ldr_opcode, s[0]->arch_opcode());
EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
}
}
// 64 bit shift
{
StreamBuilder m(this, memacc.type, MachineType::Pointer(),
MachineType::Int64());
Node* const index =
m.Word64Shl(m.Parameter(1), m.Int64Constant(immediate_shift));
m.Return(m.Load(memacc.type, m.Parameter(0), index));
Stream s = m.Build();
if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
} else {
// Make sure we haven't merged the shift into the load instruction.
ASSERT_NE(1U, s.size());
EXPECT_NE(memacc.ldr_opcode, s[0]->arch_opcode());
EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
}
}
}
}
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithShiftedIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FORRANGE(int, immediate_shift, 0, 4) {
// 32 bit shift
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
MachineType::Int32(), memacc.type);
Node* const index =
m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
m.Store(memacc.type.representation(), m.Parameter(0), index,
m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(0U, s[0]->OutputCount());
} else {
// Make sure we haven't merged the shift into the store instruction.
ASSERT_NE(1U, s.size());
EXPECT_NE(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
}
}
// 64 bit shift
{
StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
MachineType::Int64(), memacc.type);
Node* const index =
m.Word64Shl(m.Parameter(1), m.Int64Constant(immediate_shift));
m.Store(memacc.type.representation(), m.Parameter(0), index,
m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int64Constant(0));
Stream s = m.Build();
if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
EXPECT_EQ(4U, s[0]->InputCount());
EXPECT_EQ(0U, s[0]->OutputCount());
} else {
// Make sure we haven't merged the shift into the store instruction.
ASSERT_NE(1U, s.size());
EXPECT_NE(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
}
}
}
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest, InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses)); ::testing::ValuesIn(kMemoryAccesses));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment