Commit cba03ba8 authored by jyan's avatar jyan Committed by Commit bot

[compiler] Allow matcher to work on arch without scaling capability

Add an extra paramter to disable scale on BaseWithIndexAndDisplacementMatcher.

R=bmeurer@chromium.org, epertoso@chromium.org, jarin@chromium.org, mstarzinger@chromium.org, mtrofin@chromium.org, titzer@chromium.org
BUG=

Review-Url: https://codereview.chromium.org/2239813002
Cr-Commit-Position: refs/heads/master@{#38635}
parent 935340a4
......@@ -146,7 +146,7 @@ class IA32OperandGenerator final : public OperandGenerator {
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
InstructionOperand inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
DCHECK(m.matches());
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(
......
......@@ -394,16 +394,26 @@ typedef AddMatcher<Int64BinopMatcher, IrOpcode::kInt64Add, IrOpcode::kInt64Sub,
enum DisplacementMode { kPositiveDisplacement, kNegativeDisplacement };
enum class AddressOption : uint8_t {
kAllowNone = 0u,
kAllowInputSwap = 1u << 0,
kAllowScale = 1u << 1,
kAllowAll = kAllowInputSwap | kAllowScale
};
typedef base::Flags<AddressOption, uint8_t> AddressOptions;
DEFINE_OPERATORS_FOR_FLAGS(AddressOptions);
template <class AddMatcher>
struct BaseWithIndexAndDisplacementMatcher {
BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
BaseWithIndexAndDisplacementMatcher(Node* node, AddressOptions options)
: matches_(false),
index_(nullptr),
scale_(0),
base_(nullptr),
displacement_(nullptr),
displacement_mode_(kPositiveDisplacement) {
Initialize(node, allow_input_swap);
Initialize(node, options);
}
explicit BaseWithIndexAndDisplacementMatcher(Node* node)
......@@ -413,7 +423,10 @@ struct BaseWithIndexAndDisplacementMatcher {
base_(nullptr),
displacement_(nullptr),
displacement_mode_(kPositiveDisplacement) {
Initialize(node, node->op()->HasProperty(Operator::kCommutative));
Initialize(node, AddressOption::kAllowScale |
(node->op()->HasProperty(Operator::kCommutative)
? AddressOption::kAllowInputSwap
: AddressOption::kAllowNone));
}
bool matches() const { return matches_; }
......@@ -431,7 +444,7 @@ struct BaseWithIndexAndDisplacementMatcher {
Node* displacement_;
DisplacementMode displacement_mode_;
void Initialize(Node* node, bool allow_input_swap) {
void Initialize(Node* node, AddressOptions options) {
// The BaseWithIndexAndDisplacementMatcher canonicalizes the order of
// displacements and scale factors that are used as inputs, so instead of
// enumerating all possible patterns by brute force, checking for node
......@@ -449,7 +462,7 @@ struct BaseWithIndexAndDisplacementMatcher {
// (B + D)
// (B + B)
if (node->InputCount() < 2) return;
AddMatcher m(node, allow_input_swap);
AddMatcher m(node, options & AddressOption::kAllowInputSwap);
Node* left = m.left().node();
Node* right = m.right().node();
Node* displacement = nullptr;
......@@ -608,6 +621,10 @@ struct BaseWithIndexAndDisplacementMatcher {
base = index;
}
}
if (!(options & AddressOption::kAllowScale) && scale != 0) {
index = scale_expression;
scale = 0;
}
base_ = base;
displacement_ = displacement;
displacement_mode_ = displacement_mode;
......
......@@ -877,7 +877,8 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
m.right().Is(32)) {
// Just load and sign-extend the interesting 4 bytes instead. This happens,
// for example, when we're loading and untagging SMIs.
BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
AddressOption::kAllowAll);
if (mleft.matches() && mleft.index() == nullptr) {
int64_t offset = 0;
Node* displacement = mleft.displacement();
......
......@@ -128,7 +128,7 @@ class X64OperandGenerator final : public OperandGenerator {
AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
InstructionOperand inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement64Matcher m(operand, true);
BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
DCHECK(m.matches());
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(
......@@ -653,7 +653,8 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
m.right().Is(32)) {
// Just load and sign-extend the interesting 4 bytes instead. This happens,
// for example, when we're loading and untagging SMIs.
BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
AddressOption::kAllowAll);
if (mleft.matches() && (mleft.displacement() == nullptr ||
g.CanBeImmediate(mleft.displacement()))) {
size_t input_count = 0;
......
......@@ -150,7 +150,7 @@ class X87OperandGenerator final : public OperandGenerator {
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
InstructionOperand inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
DCHECK(m.matches());
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment