Commit 2f7a5af0 authored by danno's avatar danno Committed by Commit bot

[turbofan]: Port lea changes to ia32

Review URL: https://codereview.chromium.org/747283005

Cr-Commit-Position: refs/heads/master@{#25771}
parent 4c3e4f8d
......@@ -605,9 +605,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movss(operand, i.InputDoubleRegister(index));
}
break;
case kIA32Lea:
__ lea(i.OutputRegister(), i.MemoryOperand());
case kIA32Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
// and addressing mode just happens to work out. The "addl"/"subl" forms
// in these cases are faster based on measurements.
if (mode == kMode_MI) {
__ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
} else if (i.InputRegister(0).is(i.OutputRegister())) {
if (mode == kMode_MRI) {
int32_t constant_summand = i.InputInt32(1);
if (constant_summand > 0) {
__ add(i.OutputRegister(), Immediate(constant_summand));
} else if (constant_summand < 0) {
__ sub(i.OutputRegister(), Immediate(-constant_summand));
}
} else if (mode == kMode_MR1) {
if (i.InputRegister(1).is(i.OutputRegister())) {
__ shl(i.OutputRegister(), 1);
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_M2) {
__ shl(i.OutputRegister(), 1);
} else if (mode == kMode_M4) {
__ shl(i.OutputRegister(), 2);
} else if (mode == kMode_M8) {
__ shl(i.OutputRegister(), 3);
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
break;
}
case kIA32Push:
if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
......
......@@ -38,279 +38,83 @@ class IA32OperandGenerator FINAL : public OperandGenerator {
}
}
bool CanBeBetterLeftOperand(Node* node) const {
return !selector()->IsLive(node);
}
};
// Get the AddressingMode of scale factor N from the AddressingMode of scale
// factor 1.
static AddressingMode AdjustAddressingMode(AddressingMode base_mode,
int power) {
DCHECK(0 <= power && power < 4);
return static_cast<AddressingMode>(static_cast<int>(base_mode) + power);
}
// Fairly intel-specify node matcher used for matching scale factors in
// addressing modes.
// Matches nodes of form [x * N] for N in {1,2,4,8}
class ScaleFactorMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[4];
explicit ScaleFactorMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
private:
Node* left_;
int power_;
};
// Fairly intel-specify node matcher used for matching index and displacement
// operands in addressing modes.
// Matches nodes of form:
// [x * N]
// [x * N + K]
// [x + K]
// [x] -- fallback case
// for N in {1,2,4,8} and K int32_t
class IndexAndDisplacementMatcher : public NodeMatcher {
public:
explicit IndexAndDisplacementMatcher(Node* node);
Node* index_node() const { return index_node_; }
int displacement() const { return displacement_; }
int power() const { return power_; }
private:
Node* index_node_;
int displacement_;
int power_;
};
// Fairly intel-specify node matcher used for matching multiplies that can be
// transformed to lea instructions.
// Matches nodes of form:
// [x * N]
// for N in {1,2,3,4,5,8,9}
class LeaMultiplyMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[7];
explicit LeaMultiplyMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
// Displacement will be either 0 or 1.
int32_t Displacement() const {
DCHECK(Matches());
return displacement_;
}
private:
Node* left_;
int power_;
int displacement_;
};
const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0) {
if (opcode() != IrOpcode::kInt32Mul) return;
// TODO(dcarney): should test 64 bit ints as well.
Int32BinopMatcher m(this->node());
if (!m.right().HasValue()) return;
int32_t value = m.right().Value();
switch (value) {
case 8:
power_++; // Fall through.
case 4:
power_++; // Fall through.
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
left_ = m.left().node();
}
IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
: NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
if (opcode() == IrOpcode::kInt32Add) {
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
displacement_ = m.right().Value();
index_node_ = m.left().node();
}
}
// Test scale factor.
ScaleFactorMatcher scale_matcher(index_node_);
if (scale_matcher.Matches()) {
index_node_ = scale_matcher.Left();
power_ = scale_matcher.Power();
}
}
const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
return;
}
int64_t value;
Node* left = NULL;
{
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
Int64BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
return;
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
Node* displacement_node,
InstructionOperand* inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
int32_t displacement = (displacement_node == NULL)
? 0
: OpParameter<int32_t>(displacement_node);
if (base != NULL) {
if (base->opcode() == IrOpcode::kInt32Constant) {
displacement += OpParameter<int32_t>(base);
base = NULL;
}
}
}
switch (value) {
case 9:
case 8:
power_++; // Fall through.
case 5:
case 4:
power_++; // Fall through.
case 3:
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
if (!base::bits::IsPowerOfTwo64(value)) {
displacement_ = 1;
}
left_ = left;
}
class AddressingModeMatcher {
public:
AddressingModeMatcher(IA32OperandGenerator* g, Node* base, Node* index)
: base_operand_(NULL),
index_operand_(NULL),
displacement_operand_(NULL),
mode_(kMode_None) {
Int32Matcher index_imm(index);
if (index_imm.HasValue()) {
int32_t displacement = index_imm.Value();
// Compute base operand and fold base immediate into displacement.
Int32Matcher base_imm(base);
if (!base_imm.HasValue()) {
base_operand_ = g->UseRegister(base);
} else {
displacement += base_imm.Value();
}
if (displacement != 0 || base_operand_ == NULL) {
displacement_operand_ = g->TempImmediate(displacement);
}
if (base_operand_ == NULL) {
mode_ = kMode_MI;
if (base != NULL) {
inputs[(*input_count)++] = UseRegister(base);
if (index != NULL) {
DCHECK(scale >= 0 && scale <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
kMode_MR4I, kMode_MR8I};
mode = kMRnI_modes[scale];
} else {
static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
kMode_MR4, kMode_MR8};
mode = kMRn_modes[scale];
}
} else {
if (displacement == 0) {
mode_ = kMode_MR;
mode = kMode_MR;
} else {
mode_ = kMode_MRI;
inputs[(*input_count)++] = TempImmediate(displacement);
mode = kMode_MRI;
}
}
} else {
// Compute index and displacement.
IndexAndDisplacementMatcher matcher(index);
index_operand_ = g->UseRegister(matcher.index_node());
int32_t displacement = matcher.displacement();
// Compute base operand and fold base immediate into displacement.
Int32Matcher base_imm(base);
if (!base_imm.HasValue()) {
base_operand_ = g->UseRegister(base);
} else {
displacement += base_imm.Value();
}
// Compute displacement operand.
if (displacement != 0) {
displacement_operand_ = g->TempImmediate(displacement);
}
// Compute mode with scale factor one.
if (base_operand_ == NULL) {
if (displacement_operand_ == NULL) {
mode_ = kMode_M1;
DCHECK(scale >= 0 && scale <= 3);
if (index != NULL) {
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
kMode_M4I, kMode_M8I};
mode = kMnI_modes[scale];
} else {
mode_ = kMode_M1I;
static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
kMode_M4, kMode_M8};
mode = kMn_modes[scale];
}
} else {
if (displacement_operand_ == NULL) {
mode_ = kMode_MR1;
} else {
mode_ = kMode_MR1I;
}
inputs[(*input_count)++] = TempImmediate(displacement);
return kMode_MI;
}
// Adjust mode to actual scale factor.
mode_ = AdjustAddressingMode(mode_, matcher.power());
}
DCHECK_NE(kMode_None, mode_);
return mode;
}
size_t SetInputs(InstructionOperand** inputs) {
size_t input_count = 0;
// Compute inputs_ and input_count.
if (base_operand_ != NULL) {
inputs[input_count++] = base_operand_;
}
if (index_operand_ != NULL) {
inputs[input_count++] = index_operand_;
}
if (displacement_operand_ != NULL) {
inputs[input_count++] = displacement_operand_;
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
InstructionOperand* inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
DCHECK(m.matches());
if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
inputs[(*input_count)++] = UseRegister(node->InputAt(0));
inputs[(*input_count)++] = UseRegister(node->InputAt(1));
return kMode_MR1;
}
DCHECK_NE(input_count, 0);
return input_count;
}
static const int kMaxInputCount = 3;
InstructionOperand* base_operand_;
InstructionOperand* index_operand_;
InstructionOperand* displacement_operand_;
AddressingMode mode_;
bool CanBeBetterLeftOperand(Node* node) const {
return !selector()->IsLive(node);
}
};
......@@ -325,8 +129,6 @@ static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
// TODO(titzer): signed/unsigned small loads
......@@ -354,11 +156,13 @@ void InstructionSelector::VisitLoad(Node* node) {
}
IA32OperandGenerator g(this);
AddressingModeMatcher matcher(&g, base, index);
InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount];
size_t input_count = matcher.SetInputs(inputs);
InstructionOperand* outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand* inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
Emit(code, 1, outputs, input_count, inputs);
}
......@@ -417,10 +221,11 @@ void InstructionSelector::VisitStore(Node* node) {
val = g.UseRegister(value);
}
AddressingModeMatcher matcher(&g, base, index);
InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
size_t input_count = matcher.SetInputs(inputs);
InstructionOperand* inputs[4];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
inputs[input_count++] = val;
Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
}
......@@ -621,7 +426,63 @@ static inline void VisitShift(InstructionSelector* selector, Node* node,
}
namespace {
void VisitMulHigh(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsFixed(node, edx),
g.UseFixed(node->InputAt(0), eax),
g.UseUniqueRegister(node->InputAt(1)));
}
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand* temps[] = {g.TempRegister(edx)};
selector->Emit(opcode, g.DefineAsFixed(node, eax),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
}
void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsFixed(node, edx),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)));
}
void EmitLea(InstructionSelector* selector, Node* result, Node* index,
int scale, Node* base, Node* displacement) {
IA32OperandGenerator g(selector);
InstructionOperand* inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
index, scale, base, displacement, inputs, &input_count);
DCHECK_NE(0, static_cast<int>(input_count));
DCHECK_GE(arraysize(inputs), input_count);
InstructionOperand* outputs[1];
outputs[0] = g.DefineAsRegister(result);
InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
selector->Emit(opcode, 1, outputs, input_count, inputs);
}
} // namespace
void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : NULL;
EmitLea(this, node, index, m.scale(), base, NULL);
return;
}
VisitShift(this, node, kIA32Shl);
}
......@@ -641,37 +502,30 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
}
static bool TryEmitLeaMultAdd(InstructionSelector* selector, Node* node) {
Int32BinopMatcher m(node);
if (!m.right().HasValue()) return false;
int32_t displacement_value = m.right().Value();
Node* left = m.left().node();
LeaMultiplyMatcher lmm(left);
if (!lmm.Matches()) return false;
AddressingMode mode;
size_t input_count;
IA32OperandGenerator g(selector);
InstructionOperand* index = g.UseRegister(lmm.Left());
InstructionOperand* displacement = g.TempImmediate(displacement_value);
InstructionOperand* inputs[] = {index, displacement, displacement};
if (lmm.Displacement() != 0) {
input_count = 3;
inputs[1] = index;
mode = kMode_MR1I;
} else {
input_count = 2;
mode = kMode_M1I;
}
mode = AdjustAddressingMode(mode, lmm.Power());
InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
selector->Emit(kIA32Lea | AddressingModeField::encode(mode), 1, outputs,
input_count, inputs);
return true;
}
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
// Try to match the Add to a lea pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
(m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
InstructionOperand* inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
void InstructionSelector::VisitInt32Add(Node* node) {
if (TryEmitLeaMultAdd(this, node)) return;
DCHECK_NE(0, static_cast<int>(input_count));
DCHECK_GE(arraysize(inputs), input_count);
InstructionOperand* outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
Emit(opcode, 1, outputs, input_count, inputs);
return;
}
// No lea pattern match, use add
VisitBinop(this, node, kIA32Add);
}
......@@ -687,36 +541,17 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
static bool TryEmitLeaMult(InstructionSelector* selector, Node* node) {
LeaMultiplyMatcher lea(node);
// Try to match lea.
if (!lea.Matches()) return false;
AddressingMode mode;
size_t input_count;
IA32OperandGenerator g(selector);
InstructionOperand* left = g.UseRegister(lea.Left());
InstructionOperand* inputs[] = {left, left};
if (lea.Displacement() != 0) {
input_count = 2;
mode = kMode_MR1;
} else {
input_count = 1;
mode = kMode_M1;
}
mode = AdjustAddressingMode(mode, lea.Power());
InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
selector->Emit(kIA32Lea | AddressingModeField::encode(mode), 1, outputs,
input_count, inputs);
return true;
}
void InstructionSelector::VisitInt32Mul(Node* node) {
if (TryEmitLeaMult(this, node)) return;
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : NULL;
EmitLea(this, node, index, m.scale(), base, NULL);
return;
}
IA32OperandGenerator g(this);
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (g.CanBeImmediate(right)) {
Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
g.UseImmediate(right));
......@@ -730,36 +565,6 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
}
namespace {
void VisitMulHigh(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsFixed(node, edx),
g.UseFixed(node->InputAt(0), eax),
g.UseUniqueRegister(node->InputAt(1)));
}
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand* temps[] = {g.TempRegister(edx)};
selector->Emit(opcode, g.DefineAsFixed(node, eax),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
}
void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsFixed(node, edx),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)));
}
} // namespace
void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitMulHigh(this, node, kIA32ImulHigh);
}
......
......@@ -133,6 +133,10 @@ struct BinopMatcher : public NodeMatcher {
: NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
}
BinopMatcher(Node* node, bool allow_input_swap)
: NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
if (allow_input_swap) PutConstantOnRight();
}
typedef Left LeftMatcher;
typedef Right RightMatcher;
......@@ -235,8 +239,32 @@ struct AddMatcher : public BinopMatcher {
static const IrOpcode::Value kOpcode = kAddOpcode;
typedef ScaleMatcher<BinopMatcher, kMulOpcode, kShiftOpcode> Matcher;
AddMatcher(Node* node, bool allow_input_swap)
: BinopMatcher(node, allow_input_swap),
scale_(-1),
power_of_two_plus_one_(false) {
Initialize(node, allow_input_swap);
}
explicit AddMatcher(Node* node)
: BinopMatcher(node), scale_(-1), power_of_two_plus_one_(false) {
: BinopMatcher(node, node->op()->HasProperty(Operator::kCommutative)),
scale_(-1),
power_of_two_plus_one_(false) {
Initialize(node, node->op()->HasProperty(Operator::kCommutative));
}
bool HasIndexInput() const { return scale_ != -1; }
Node* IndexInput() const {
DCHECK(HasIndexInput());
return this->left().node()->InputAt(0);
}
int scale() const {
DCHECK(HasIndexInput());
return scale_;
}
bool power_of_two_plus_one() const { return power_of_two_plus_one_; }
private:
void Initialize(Node* node, bool allow_input_swap) {
Matcher left_matcher(this->left().node(), true);
if (left_matcher.matches()) {
scale_ = left_matcher.scale();
......@@ -244,7 +272,7 @@ struct AddMatcher : public BinopMatcher {
return;
}
if (!this->HasProperty(Operator::kCommutative)) {
if (!allow_input_swap) {
return;
}
......@@ -262,18 +290,6 @@ struct AddMatcher : public BinopMatcher {
}
}
bool HasIndexInput() const { return scale_ != -1; }
Node* IndexInput() const {
DCHECK(HasIndexInput());
return this->left().node()->InputAt(0);
}
int scale() const {
DCHECK(HasIndexInput());
return scale_;
}
bool power_of_two_plus_one() const { return power_of_two_plus_one_; }
private:
int scale_;
bool power_of_two_plus_one_;
};
......@@ -286,12 +302,38 @@ typedef AddMatcher<Int64BinopMatcher, IrOpcode::kInt64Add, IrOpcode::kInt64Mul,
template <class AddMatcher>
struct BaseWithIndexAndDisplacementMatcher {
BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
: matches_(false),
index_(NULL),
scale_(0),
base_(NULL),
displacement_(NULL) {
Initialize(node, allow_input_swap);
}
explicit BaseWithIndexAndDisplacementMatcher(Node* node)
: matches_(false),
index_(NULL),
scale_(0),
base_(NULL),
displacement_(NULL) {
Initialize(node, node->op()->HasProperty(Operator::kCommutative));
}
bool matches() const { return matches_; }
Node* index() const { return index_; }
int scale() const { return scale_; }
Node* base() const { return base_; }
Node* displacement() const { return displacement_; }
private:
bool matches_;
Node* index_;
int scale_;
Node* base_;
Node* displacement_;
void Initialize(Node* node, bool allow_input_swap) {
// The BaseWithIndexAndDisplacementMatcher canonicalizes the order of
// displacements and scale factors that are used as inputs, so instead of
// enumerating all possible patterns by brute force, checking for node
......@@ -309,7 +351,7 @@ struct BaseWithIndexAndDisplacementMatcher {
// (B + D)
// (B + B)
if (node->InputCount() < 2) return;
AddMatcher m(node);
AddMatcher m(node, allow_input_swap);
Node* left = m.left().node();
Node* right = m.right().node();
Node* displacement = NULL;
......@@ -433,21 +475,6 @@ struct BaseWithIndexAndDisplacementMatcher {
scale_ = scale;
matches_ = true;
}
bool matches() const { return matches_; }
Node* index() const { return index_; }
int scale() const { return scale_; }
Node* base() const { return base_; }
Node* displacement() const { return displacement_; }
private:
bool matches_;
protected:
Node* index_;
int scale_;
Node* base_;
Node* displacement_;
};
typedef BaseWithIndexAndDisplacementMatcher<Int32AddMatcher>
......
......@@ -67,11 +67,11 @@ class X64OperandGenerator FINAL : public OperandGenerator {
inputs[(*input_count)++] = UseRegister(index);
if (displacement != NULL) {
inputs[(*input_count)++] = UseImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_M1I, kMode_M2I,
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
kMode_M4I, kMode_M8I};
mode = kMnI_modes[scale_exponent];
} else {
static const AddressingMode kMn_modes[] = {kMode_M1, kMode_MR1,
static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
kMode_M4, kMode_M8};
mode = kMn_modes[scale_exponent];
if (mode == kMode_MR1) {
......@@ -83,6 +83,21 @@ class X64OperandGenerator FINAL : public OperandGenerator {
return mode;
}
AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
InstructionOperand* inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement64Matcher m(operand, true);
DCHECK(m.matches());
if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
return kMode_MR1;
}
}
bool CanBeBetterLeftOperand(Node* node) const {
return !selector()->IsLive(node);
}
......@@ -93,8 +108,6 @@ void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
X64OperandGenerator g(this);
Node* const base = node->InputAt(0);
Node* const index = node->InputAt(1);
ArchOpcode opcode;
switch (rep) {
......@@ -122,19 +135,15 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index)) {
// load [%base + #index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else if (g.CanBeImmediate(base)) {
// load [#base + %index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
} else {
// load [%base + %index*1]
Emit(opcode | AddressingModeField::encode(kMode_MR1),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
InstructionOperand* outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand* inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
Emit(code, 1, outputs, input_count, inputs);
}
......@@ -184,21 +193,15 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
return;
}
InstructionOperand* inputs[4];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
InstructionOperand* value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
if (g.CanBeImmediate(index)) {
// store [%base + #index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(base), g.UseImmediate(index), value_operand);
} else if (g.CanBeImmediate(base)) {
// store [#base + %index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(index), g.UseImmediate(base), value_operand);
} else {
// store [%base + %index*1], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
g.UseRegister(base), g.UseRegister(index), value_operand);
}
inputs[input_count++] = value_operand;
Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
}
......
......@@ -23,7 +23,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
}
......@@ -34,18 +34,26 @@ TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
if (imm == 0) {
ASSERT_EQ(1U, s[0]->InputCount());
} else {
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
}
}
{
StreamBuilder m(this, kMachInt32, kMachInt32);
m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
if (imm == 0) {
ASSERT_EQ(1U, s[0]->InputCount());
} else {
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
}
}
}
}
......@@ -112,10 +120,13 @@ TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
m.Return(m.Int32Add(add, param1));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(2U, s[1]->InputCount());
EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0)));
}
......@@ -131,6 +142,7 @@ TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(1)));
}
......@@ -304,9 +316,10 @@ class AddressingModeUnitTest : public InstructionSelectorTest {
AddressingModeUnitTest() : m(NULL) { Reset(); }
~AddressingModeUnitTest() { delete m; }
void Run(Node* base, Node* index, AddressingMode mode) {
Node* load = m->Load(kMachInt32, base, index);
m->Store(kMachInt32, base, index, load);
void Run(Node* base, Node* load_index, Node* store_index,
AddressingMode mode) {
Node* load = m->Load(kMachInt32, base, load_index);
m->Store(kMachInt32, base, store_index, load);
m->Return(m->Int32Constant(0));
Stream s = m->Build();
ASSERT_EQ(2U, s.size());
......@@ -342,21 +355,21 @@ class AddressingModeUnitTest : public InstructionSelectorTest {
TEST_F(AddressingModeUnitTest, AddressingMode_MR) {
Node* base = base_reg;
Node* index = zero;
Run(base, index, kMode_MR);
Run(base, index, index, kMode_MR);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MRI) {
Node* base = base_reg;
Node* index = non_zero;
Run(base, index, kMode_MRI);
Run(base, index, index, kMode_MRI);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MR1) {
Node* base = base_reg;
Node* index = index_reg;
Run(base, index, kMode_MR1);
Run(base, index, index, kMode_MR1);
}
......@@ -365,16 +378,18 @@ TEST_F(AddressingModeUnitTest, AddressingMode_MRN) {
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = base_reg;
Node* index = m->Int32Mul(index_reg, scales[i]);
Run(base, index, expected[i]);
Node* load_index = m->Int32Mul(index_reg, scales[i]);
Node* store_index = m->Int32Mul(index_reg, scales[i]);
Run(base, load_index, store_index, expected[i]);
}
}
TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) {
Node* base = base_reg;
Node* index = m->Int32Add(index_reg, non_zero);
Run(base, index, kMode_MR1I);
Node* load_index = m->Int32Add(index_reg, non_zero);
Node* store_index = m->Int32Add(index_reg, non_zero);
Run(base, load_index, store_index, kMode_MR1I);
}
......@@ -383,44 +398,52 @@ TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) {
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = base_reg;
Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, index, expected[i]);
Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Node* store_index =
m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, load_index, store_index, expected[i]);
}
}
TEST_F(AddressingModeUnitTest, AddressingMode_M1) {
TEST_F(AddressingModeUnitTest, AddressingMode_M1ToMR) {
Node* base = null_ptr;
Node* index = index_reg;
Run(base, index, kMode_M1);
// M1 maps to MR
Run(base, index, index, kMode_MR);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MN) {
AddressingMode expected[] = {kMode_M1, kMode_M2, kMode_M4, kMode_M8};
AddressingMode expected[] = {kMode_MR, kMode_M2, kMode_M4, kMode_M8};
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = null_ptr;
Node* index = m->Int32Mul(index_reg, scales[i]);
Run(base, index, expected[i]);
Node* load_index = m->Int32Mul(index_reg, scales[i]);
Node* store_index = m->Int32Mul(index_reg, scales[i]);
Run(base, load_index, store_index, expected[i]);
}
}
TEST_F(AddressingModeUnitTest, AddressingMode_M1I) {
TEST_F(AddressingModeUnitTest, AddressingMode_M1IToMRI) {
Node* base = null_ptr;
Node* index = m->Int32Add(index_reg, non_zero);
Run(base, index, kMode_M1I);
Node* load_index = m->Int32Add(index_reg, non_zero);
Node* store_index = m->Int32Add(index_reg, non_zero);
// M1I maps to MRI
Run(base, load_index, store_index, kMode_MRI);
}
TEST_F(AddressingModeUnitTest, AddressingMode_MNI) {
AddressingMode expected[] = {kMode_M1I, kMode_M2I, kMode_M4I, kMode_M8I};
AddressingMode expected[] = {kMode_MRI, kMode_M2I, kMode_M4I, kMode_M8I};
for (size_t i = 0; i < arraysize(scales); ++i) {
Reset();
Node* base = null_ptr;
Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, index, expected[i]);
Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Node* store_index =
m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
Run(base, load_index, store_index, expected[i]);
}
}
......@@ -433,7 +456,7 @@ TEST_F(AddressingModeUnitTest, AddressingMode_MI) {
Reset();
Node* base = bases[i];
Node* index = indices[j];
Run(base, index, kMode_MI);
Run(base, index, index, kMode_MI);
}
}
}
......@@ -459,7 +482,7 @@ std::ostream& operator<<(std::ostream& os, const MultParam& m) {
const MultParam kMultParams[] = {{-1, false, kMode_None},
{0, false, kMode_None},
{1, true, kMode_M1},
{1, true, kMode_MR},
{2, true, kMode_M2},
{3, true, kMode_MR2},
{4, true, kMode_M4},
......@@ -493,11 +516,14 @@ static unsigned InputCountForLea(AddressingMode mode) {
case kMode_MR2:
case kMode_MR4:
case kMode_MR8:
case kMode_MRI:
return 2U;
case kMode_M1:
case kMode_M2:
case kMode_M4:
case kMode_M8:
case kMode_MI:
case kMode_MR:
return 1U;
default:
UNREACHABLE();
......@@ -506,7 +532,9 @@ static unsigned InputCountForLea(AddressingMode mode) {
}
static AddressingMode AddressingModeForAddMult(const MultParam& m) {
static AddressingMode AddressingModeForAddMult(int32_t imm,
const MultParam& m) {
if (imm == 0) return m.addressing_mode;
switch (m.addressing_mode) {
case kMode_MR1:
return kMode_MR1I;
......@@ -524,6 +552,8 @@ static AddressingMode AddressingModeForAddMult(const MultParam& m) {
return kMode_M4I;
case kMode_M8:
return kMode_M8I;
case kMode_MR:
return kMode_MRI;
default:
UNREACHABLE();
return kMode_None;
......@@ -563,16 +593,19 @@ TEST_P(InstructionSelectorMultTest, MultAdd32) {
if (m_param.lea_expected) {
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
EXPECT_EQ(AddressingModeForAddMult(imm, m_param),
s[0]->addressing_mode());
unsigned input_count = InputCountForLea(s[0]->addressing_mode());
ASSERT_EQ(input_count, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE,
s[0]->InputAt(input_count - 1)->kind());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
if (imm != 0) {
ASSERT_EQ(InstructionOperand::IMMEDIATE,
s[0]->InputAt(input_count - 1)->kind());
EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
}
} else {
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
EXPECT_EQ(kIA32Add, s[1]->arch_opcode());
EXPECT_EQ(kIA32Lea, s[1]->arch_opcode());
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment