Commit 2f7a5af0 authored by danno's avatar danno Committed by Commit bot

[turbofan]: Port lea changes to ia32

Review URL: https://codereview.chromium.org/747283005

Cr-Commit-Position: refs/heads/master@{#25771}
parent 4c3e4f8d
......@@ -605,9 +605,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movss(operand, i.InputDoubleRegister(index));
}
break;
case kIA32Lea:
__ lea(i.OutputRegister(), i.MemoryOperand());
case kIA32Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
// and addressing mode just happens to work out. The "addl"/"subl" forms
// in these cases are faster based on measurements.
if (mode == kMode_MI) {
__ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
} else if (i.InputRegister(0).is(i.OutputRegister())) {
if (mode == kMode_MRI) {
int32_t constant_summand = i.InputInt32(1);
if (constant_summand > 0) {
__ add(i.OutputRegister(), Immediate(constant_summand));
} else if (constant_summand < 0) {
__ sub(i.OutputRegister(), Immediate(-constant_summand));
}
} else if (mode == kMode_MR1) {
if (i.InputRegister(1).is(i.OutputRegister())) {
__ shl(i.OutputRegister(), 1);
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_M2) {
__ shl(i.OutputRegister(), 1);
} else if (mode == kMode_M4) {
__ shl(i.OutputRegister(), 2);
} else if (mode == kMode_M8) {
__ shl(i.OutputRegister(), 3);
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
break;
}
case kIA32Push:
if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
......
......@@ -133,6 +133,10 @@ struct BinopMatcher : public NodeMatcher {
: NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
}
BinopMatcher(Node* node, bool allow_input_swap)
: NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
if (allow_input_swap) PutConstantOnRight();
}
typedef Left LeftMatcher;
typedef Right RightMatcher;
......@@ -235,8 +239,32 @@ struct AddMatcher : public BinopMatcher {
static const IrOpcode::Value kOpcode = kAddOpcode;
typedef ScaleMatcher<BinopMatcher, kMulOpcode, kShiftOpcode> Matcher;
AddMatcher(Node* node, bool allow_input_swap)
: BinopMatcher(node, allow_input_swap),
scale_(-1),
power_of_two_plus_one_(false) {
Initialize(node, allow_input_swap);
}
explicit AddMatcher(Node* node)
: BinopMatcher(node), scale_(-1), power_of_two_plus_one_(false) {
: BinopMatcher(node, node->op()->HasProperty(Operator::kCommutative)),
scale_(-1),
power_of_two_plus_one_(false) {
Initialize(node, node->op()->HasProperty(Operator::kCommutative));
}
bool HasIndexInput() const { return scale_ != -1; }
Node* IndexInput() const {
DCHECK(HasIndexInput());
return this->left().node()->InputAt(0);
}
int scale() const {
DCHECK(HasIndexInput());
return scale_;
}
bool power_of_two_plus_one() const { return power_of_two_plus_one_; }
private:
void Initialize(Node* node, bool allow_input_swap) {
Matcher left_matcher(this->left().node(), true);
if (left_matcher.matches()) {
scale_ = left_matcher.scale();
......@@ -244,7 +272,7 @@ struct AddMatcher : public BinopMatcher {
return;
}
if (!this->HasProperty(Operator::kCommutative)) {
if (!allow_input_swap) {
return;
}
......@@ -262,18 +290,6 @@ struct AddMatcher : public BinopMatcher {
}
}
bool HasIndexInput() const { return scale_ != -1; }
Node* IndexInput() const {
DCHECK(HasIndexInput());
return this->left().node()->InputAt(0);
}
int scale() const {
DCHECK(HasIndexInput());
return scale_;
}
bool power_of_two_plus_one() const { return power_of_two_plus_one_; }
private:
int scale_;
bool power_of_two_plus_one_;
};
......@@ -286,12 +302,38 @@ typedef AddMatcher<Int64BinopMatcher, IrOpcode::kInt64Add, IrOpcode::kInt64Mul,
template <class AddMatcher>
struct BaseWithIndexAndDisplacementMatcher {
BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
: matches_(false),
index_(NULL),
scale_(0),
base_(NULL),
displacement_(NULL) {
Initialize(node, allow_input_swap);
}
explicit BaseWithIndexAndDisplacementMatcher(Node* node)
: matches_(false),
index_(NULL),
scale_(0),
base_(NULL),
displacement_(NULL) {
Initialize(node, node->op()->HasProperty(Operator::kCommutative));
}
bool matches() const { return matches_; }
Node* index() const { return index_; }
int scale() const { return scale_; }
Node* base() const { return base_; }
Node* displacement() const { return displacement_; }
private:
bool matches_;
Node* index_;
int scale_;
Node* base_;
Node* displacement_;
void Initialize(Node* node, bool allow_input_swap) {
// The BaseWithIndexAndDisplacementMatcher canonicalizes the order of
// displacements and scale factors that are used as inputs, so instead of
// enumerating all possible patterns by brute force, checking for node
......@@ -309,7 +351,7 @@ struct BaseWithIndexAndDisplacementMatcher {
// (B + D)
// (B + B)
if (node->InputCount() < 2) return;
AddMatcher m(node);
AddMatcher m(node, allow_input_swap);
Node* left = m.left().node();
Node* right = m.right().node();
Node* displacement = NULL;
......@@ -433,21 +475,6 @@ struct BaseWithIndexAndDisplacementMatcher {
scale_ = scale;
matches_ = true;
}
bool matches() const { return matches_; }
Node* index() const { return index_; }
int scale() const { return scale_; }
Node* base() const { return base_; }
Node* displacement() const { return displacement_; }
private:
bool matches_;
protected:
Node* index_;
int scale_;
Node* base_;
Node* displacement_;
};
typedef BaseWithIndexAndDisplacementMatcher<Int32AddMatcher>
......
......@@ -67,11 +67,11 @@ class X64OperandGenerator FINAL : public OperandGenerator {
inputs[(*input_count)++] = UseRegister(index);
if (displacement != NULL) {
inputs[(*input_count)++] = UseImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_M1I, kMode_M2I,
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
kMode_M4I, kMode_M8I};
mode = kMnI_modes[scale_exponent];
} else {
static const AddressingMode kMn_modes[] = {kMode_M1, kMode_MR1,
static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
kMode_M4, kMode_M8};
mode = kMn_modes[scale_exponent];
if (mode == kMode_MR1) {
......@@ -83,6 +83,21 @@ class X64OperandGenerator FINAL : public OperandGenerator {
return mode;
}
AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
InstructionOperand* inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement64Matcher m(operand, true);
DCHECK(m.matches());
if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
return kMode_MR1;
}
}
bool CanBeBetterLeftOperand(Node* node) const {
return !selector()->IsLive(node);
}
......@@ -93,8 +108,6 @@ void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
X64OperandGenerator g(this);
Node* const base = node->InputAt(0);
Node* const index = node->InputAt(1);
ArchOpcode opcode;
switch (rep) {
......@@ -122,19 +135,15 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index)) {
// load [%base + #index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else if (g.CanBeImmediate(base)) {
// load [#base + %index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
} else {
// load [%base + %index*1]
Emit(opcode | AddressingModeField::encode(kMode_MR1),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
InstructionOperand* outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand* inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
Emit(code, 1, outputs, input_count, inputs);
}
......@@ -184,21 +193,15 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
return;
}
InstructionOperand* inputs[4];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
InstructionOperand* value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
if (g.CanBeImmediate(index)) {
// store [%base + #index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(base), g.UseImmediate(index), value_operand);
} else if (g.CanBeImmediate(base)) {
// store [#base + %index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(index), g.UseImmediate(base), value_operand);
} else {
// store [%base + %index*1], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
g.UseRegister(base), g.UseRegister(index), value_operand);
}
inputs[input_count++] = value_operand;
Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
}
......
......@@ -23,7 +23,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
}
......@@ -34,18 +34,26 @@ TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
if (imm == 0) {
ASSERT_EQ(1U, s[0]->InputCount());
} else {
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
}
}
{
StreamBuilder m(this, kMachInt32, kMachInt32);
m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
if (imm == 0) {
ASSERT_EQ(1U, s[0]->InputCount());
} else {
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
}
}
}
}
......@@ -112,10 +120,13 @@ TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
m.Return(m.Int32Add(add, param1));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(2U, s[1]->InputCount());
EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0)));
}
......@@ -131,6 +142,7 @@ TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(1)));
}
......@@ -304,9 +316,10 @@ class AddressingModeUnitTest : public InstructionSelectorTest {
AddressingModeUnitTest() : m(NULL) { Reset(); }
~AddressingModeUnitTest() { delete m; }
void Run(Node* base, Node* index, AddressingMode mode) {
Node* load = m->Load(kMachInt32, base, index);
m->Store(kMachInt32, base, index, load);
void Run(Node* base, Node* load_index, Node* store_index,
AddressingMode mode) {
Node* load = m->Load(kMachInt32, base, load_index);
m->Store(kMachInt32, base, store_index, load);
m->Return(m->Int32Constant(0));
Stream s = m->Build();
ASSERT_EQ(2U, s.size());
......@@ -342,21 +355,21 @@ class AddressingModeUnitTest : public InstructionSelectorTest {
TEST_F(AddressingModeUnitTest, AddressingMode_MR) {
Node* base = base_reg;
Node* index = zero;
Run(base, index, kMode_MR);
Run(base, index, index, kMode_MR);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MRI) {
Node* base = base_reg;
Node* index = non_zero;
Run(base, index, kMode_MRI);
Run(base, index, index, kMode_MRI);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MR1) {
Node* base = base_reg;
Node* index = index_reg;
Run(base, index, kMode_MR1);
Run(base, index, index, kMode_MR1);
}
......@@ -365,16 +378,18 @@ TEST_F(AddressingModeUnitTest, AddressingMode_MRN) {
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = base_reg;
Node* index = m->Int32Mul(index_reg, scales[i]);
Run(base, index, expected[i]);
Node* load_index = m->Int32Mul(index_reg, scales[i]);
Node* store_index = m->Int32Mul(index_reg, scales[i]);
Run(base, load_index, store_index, expected[i]);
}
}
TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) {
Node* base = base_reg;
Node* index = m->Int32Add(index_reg, non_zero);
Run(base, index, kMode_MR1I);
Node* load_index = m->Int32Add(index_reg, non_zero);
Node* store_index = m->Int32Add(index_reg, non_zero);
Run(base, load_index, store_index, kMode_MR1I);
}
......@@ -383,44 +398,52 @@ TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) {
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = base_reg;
Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, index, expected[i]);
Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Node* store_index =
m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, load_index, store_index, expected[i]);
}
}
TEST_F(AddressingModeUnitTest, AddressingMode_M1) {
TEST_F(AddressingModeUnitTest, AddressingMode_M1ToMR) {
Node* base = null_ptr;
Node* index = index_reg;
Run(base, index, kMode_M1);
// M1 maps to MR
Run(base, index, index, kMode_MR);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MN) {
AddressingMode expected[] = {kMode_M1, kMode_M2, kMode_M4, kMode_M8};
AddressingMode expected[] = {kMode_MR, kMode_M2, kMode_M4, kMode_M8};
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = null_ptr;
Node* index = m->Int32Mul(index_reg, scales[i]);
Run(base, index, expected[i]);
Node* load_index = m->Int32Mul(index_reg, scales[i]);
Node* store_index = m->Int32Mul(index_reg, scales[i]);
Run(base, load_index, store_index, expected[i]);
}
}
TEST_F(AddressingModeUnitTest, AddressingMode_M1I) {
TEST_F(AddressingModeUnitTest, AddressingMode_M1IToMRI) {
Node* base = null_ptr;
Node* index = m->Int32Add(index_reg, non_zero);
Run(base, index, kMode_M1I);
Node* load_index = m->Int32Add(index_reg, non_zero);
Node* store_index = m->Int32Add(index_reg, non_zero);
// M1I maps to MRI
Run(base, load_index, store_index, kMode_MRI);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MNI) {
AddressingMode expected[] = {kMode_M1I, kMode_M2I, kMode_M4I, kMode_M8I};
AddressingMode expected[] = {kMode_MRI, kMode_M2I, kMode_M4I, kMode_M8I};
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = null_ptr;
Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, index, expected[i]);
Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Node* store_index =
m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, load_index, store_index, expected[i]);
}
}
......@@ -433,7 +456,7 @@ TEST_F(AddressingModeUnitTest, AddressingMode_MI) {
Reset();
Node* base = bases[i];
Node* index = indices[j];
Run(base, index, kMode_MI);
Run(base, index, index, kMode_MI);
}
}
}
......@@ -459,7 +482,7 @@ std::ostream& operator<<(std::ostream& os, const MultParam& m) {
const MultParam kMultParams[] = {{-1, false, kMode_None},
{0, false, kMode_None},
{1, true, kMode_M1},
{1, true, kMode_MR},
{2, true, kMode_M2},
{3, true, kMode_MR2},
{4, true, kMode_M4},
......@@ -493,11 +516,14 @@ static unsigned InputCountForLea(AddressingMode mode) {
case kMode_MR2:
case kMode_MR4:
case kMode_MR8:
case kMode_MRI:
return 2U;
case kMode_M1:
case kMode_M2:
case kMode_M4:
case kMode_M8:
case kMode_MI:
case kMode_MR:
return 1U;
default:
UNREACHABLE();
......@@ -506,7 +532,9 @@ static unsigned InputCountForLea(AddressingMode mode) {
}
static AddressingMode AddressingModeForAddMult(const MultParam& m) {
static AddressingMode AddressingModeForAddMult(int32_t imm,
const MultParam& m) {
if (imm == 0) return m.addressing_mode;
switch (m.addressing_mode) {
case kMode_MR1:
return kMode_MR1I;
......@@ -524,6 +552,8 @@ static AddressingMode AddressingModeForAddMult(const MultParam& m) {
return kMode_M4I;
case kMode_M8:
return kMode_M8I;
case kMode_MR:
return kMode_MRI;
default:
UNREACHABLE();
return kMode_None;
......@@ -563,16 +593,19 @@ TEST_P(InstructionSelectorMultTest, MultAdd32) {
if (m_param.lea_expected) {
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
EXPECT_EQ(AddressingModeForAddMult(imm, m_param),
s[0]->addressing_mode());
unsigned input_count = InputCountForLea(s[0]->addressing_mode());
ASSERT_EQ(input_count, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE,
s[0]->InputAt(input_count - 1)->kind());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
if (imm != 0) {
ASSERT_EQ(InstructionOperand::IMMEDIATE,
s[0]->InputAt(input_count - 1)->kind());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
}
} else {
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
EXPECT_EQ(kIA32Add, s[1]->arch_opcode());
EXPECT_EQ(kIA32Lea, s[1]->arch_opcode());
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment