Commit 9a588070 authored by danno@chromium.org's avatar danno@chromium.org

[turbofan] Optimize add operations to use 'leal' instruction on x64

Add MemoryOperandMatcher that recognizes node clusters in the form
[%r1 + %r2*SCALE + OFFSET] and explicit support in the x64 Int32Add
selector to use it to translate complex adds to 'leal' instructions.

R=titzer@chromium.org

Review URL: https://codereview.chromium.org/704713003

Cr-Commit-Position: refs/heads/master@{#25223}
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@25223 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b86c30a2
......@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_NODE_MATCHERS_H_
#define V8_COMPILER_NODE_MATCHERS_H_
#include "src/compiler/generic-node.h"
#include "src/compiler/generic-node-inl.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/unique.h"
......@@ -116,7 +118,7 @@ struct HeapObjectMatcher FINAL
// right hand sides of a binary operation and can put constants on the right
// if they appear on the left hand side of a commutative operation.
template <typename Left, typename Right>
struct BinopMatcher FINAL : public NodeMatcher {
struct BinopMatcher : public NodeMatcher {
explicit BinopMatcher(Node* node)
: NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
......@@ -128,12 +130,17 @@ struct BinopMatcher FINAL : public NodeMatcher {
bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
bool LeftEqualsRight() const { return left().node() == right().node(); }
protected:
void SwapInputs() {
std::swap(left_, right_);
node()->ReplaceInput(0, left().node());
node()->ReplaceInput(1, right().node());
}
private:
void PutConstantOnRight() {
if (left().HasValue() && !right().HasValue()) {
std::swap(left_, right_);
node()->ReplaceInput(0, left().node());
node()->ReplaceInput(1, right().node());
SwapInputs();
}
}
......@@ -150,6 +157,189 @@ typedef BinopMatcher<UintPtrMatcher, UintPtrMatcher> UintPtrBinopMatcher;
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
struct Int32AddMatcher : public Int32BinopMatcher {
explicit Int32AddMatcher(Node* node)
: Int32BinopMatcher(node), scale_exponent_(-1) {
PutScaledInputOnLeft();
}
bool HasScaledInput() const { return scale_exponent_ != -1; }
Node* ScaledInput() const {
DCHECK(HasScaledInput());
return left().node()->InputAt(0);
}
int ScaleExponent() const {
DCHECK(HasScaledInput());
return scale_exponent_;
}
private:
int GetInputScaleExponent(Node* node) const {
if (node->opcode() == IrOpcode::kWord32Shl) {
Int32BinopMatcher m(node);
if (m.right().HasValue()) {
int32_t value = m.right().Value();
if (value >= 0 && value <= 3) {
return value;
}
}
} else if (node->opcode() == IrOpcode::kInt32Mul) {
Int32BinopMatcher m(node);
if (m.right().HasValue()) {
int32_t value = m.right().Value();
if (value == 1) {
return 0;
} else if (value == 2) {
return 1;
} else if (value == 4) {
return 2;
} else if (value == 8) {
return 3;
}
}
}
return -1;
}
void PutScaledInputOnLeft() {
scale_exponent_ = GetInputScaleExponent(right().node());
if (scale_exponent_ >= 0) {
int left_scale_exponent = GetInputScaleExponent(left().node());
if (left_scale_exponent == -1) {
SwapInputs();
} else {
scale_exponent_ = left_scale_exponent;
}
} else {
scale_exponent_ = GetInputScaleExponent(left().node());
if (scale_exponent_ == -1) {
if (right().opcode() == IrOpcode::kInt32Add &&
left().opcode() != IrOpcode::kInt32Add) {
SwapInputs();
}
}
}
}
int scale_exponent_;
};
struct ScaledWithOffsetMatcher {
explicit ScaledWithOffsetMatcher(Node* node)
: matches_(false),
scaled_(NULL),
scale_exponent_(0),
offset_(NULL),
constant_(NULL) {
if (node->opcode() != IrOpcode::kInt32Add) return;
// The Int32AddMatcher canonicalizes the order of constants and scale
// factors that are used as inputs, so instead of enumerating all possible
// patterns by brute force, checking for node clusters using the following
// templates in the following order suffices to find all of the interesting
// cases (S = scaled input, O = offset input, C = constant input):
// (S + (O + C))
// (S + (O + O))
// (S + C)
// (S + O)
// ((S + C) + O)
// ((S + O) + C)
// ((O + C) + O)
// ((O + O) + C)
// (O + C)
// (O + O)
Int32AddMatcher base_matcher(node);
Node* left = base_matcher.left().node();
Node* right = base_matcher.right().node();
if (base_matcher.HasScaledInput() && left->OwnedBy(node)) {
scaled_ = base_matcher.ScaledInput();
scale_exponent_ = base_matcher.ScaleExponent();
if (right->opcode() == IrOpcode::kInt32Add && right->OwnedBy(node)) {
Int32AddMatcher right_matcher(right);
if (right_matcher.right().HasValue()) {
// (S + (O + C))
offset_ = right_matcher.left().node();
constant_ = right_matcher.right().node();
} else {
// (S + (O + O))
offset_ = right;
}
} else if (base_matcher.right().HasValue()) {
// (S + C)
constant_ = right;
} else {
// (S + O)
offset_ = right;
}
} else {
if (left->opcode() == IrOpcode::kInt32Add && left->OwnedBy(node)) {
Int32AddMatcher left_matcher(left);
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
if (left_matcher.HasScaledInput() && left_left->OwnedBy(left)) {
scaled_ = left_matcher.ScaledInput();
scale_exponent_ = left_matcher.ScaleExponent();
if (left_matcher.right().HasValue()) {
// ((S + C) + O)
constant_ = left_right;
offset_ = right;
} else if (base_matcher.right().HasValue()) {
// ((S + O) + C)
offset_ = left_right;
constant_ = right;
} else {
// (O + O)
scaled_ = left;
offset_ = right;
}
} else {
if (left_matcher.right().HasValue()) {
// ((O + C) + O)
scaled_ = left_left;
constant_ = left_right;
offset_ = right;
} else if (base_matcher.right().HasValue()) {
// ((O + O) + C)
scaled_ = left_left;
offset_ = left_right;
constant_ = right;
} else {
// (O + O)
scaled_ = left;
offset_ = right;
}
}
} else {
if (base_matcher.right().HasValue()) {
// (O + C)
offset_ = left;
constant_ = right;
} else {
// (O + O)
offset_ = left;
scaled_ = right;
}
}
}
matches_ = true;
}
bool matches() const { return matches_; }
Node* scaled() const { return scaled_; }
int scale_exponent() const { return scale_exponent_; }
Node* offset() const { return offset_; }
Node* constant() const { return constant_; }
private:
bool matches_;
protected:
Node* scaled_;
int scale_exponent_;
Node* offset_;
Node* constant_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
......
......@@ -364,8 +364,79 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
VisitWord64Shift(this, node, kX64Ror);
}
namespace {
AddressingMode GenerateMemoryOperandInputs(X64OperandGenerator* g, Node* scaled,
int scale_exponent, Node* offset,
Node* constant,
InstructionOperand* inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
if (offset != NULL) {
inputs[(*input_count)++] = g->UseRegister(offset);
if (scaled != NULL) {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = g->UseRegister(scaled);
if (constant != NULL) {
inputs[(*input_count)++] = g->UseImmediate(constant);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
kMode_MR4I, kMode_MR8I};
mode = kMRnI_modes[scale_exponent];
} else {
static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
kMode_MR4, kMode_MR8};
mode = kMRn_modes[scale_exponent];
}
} else {
DCHECK(constant != NULL);
inputs[(*input_count)++] = g->UseImmediate(constant);
mode = kMode_MRI;
}
} else {
DCHECK(scaled != NULL);
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = g->UseRegister(scaled);
if (constant != NULL) {
inputs[(*input_count)++] = g->UseImmediate(constant);
static const AddressingMode kMnI_modes[] = {kMode_M1I, kMode_M2I,
kMode_M4I, kMode_M8I};
mode = kMnI_modes[scale_exponent];
} else {
static const AddressingMode kMn_modes[] = {kMode_M1, kMode_M2, kMode_M4,
kMode_M8};
mode = kMn_modes[scale_exponent];
}
}
return mode;
}
} // namespace
void InstructionSelector::VisitInt32Add(Node* node) {
// Try to match the Add to a leal pattern
ScaledWithOffsetMatcher m(node);
X64OperandGenerator g(this);
if (m.matches() && (m.constant() == NULL || g.CanBeImmediate(m.constant()))) {
InstructionOperand* inputs[4];
size_t input_count = 0;
AddressingMode mode = GenerateMemoryOperandInputs(
&g, m.scaled(), m.scale_exponent(), m.offset(), m.constant(), inputs,
&input_count);
DCHECK_NE(0, static_cast<int>(input_count));
DCHECK_GE(arraysize(inputs), input_count);
InstructionOperand* outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionCode opcode = AddressingModeField::encode(mode) | kX64Lea32;
Emit(opcode, 1, outputs, input_count, inputs);
return;
}
VisitBinop(this, node, kX64Add32);
}
......
......@@ -101,6 +101,7 @@ function CreateTestValues() {
// -----------------------------------------------------------------------------
function TestDivisionLike(ref, construct, values, divisor) {
// Define the function to test.
var OptFun = new Function("dividend", construct(divisor));
......@@ -111,12 +112,14 @@ function TestDivisionLike(ref, construct, values, divisor) {
%OptimizeFunctionOnNextCall(OptFun);
OptFun(13);
// Check results.
values.forEach(function(dividend) {
function dude(dividend) {
// Avoid deopt caused by overflow, we do not want to test this here.
if (dividend === -2147483648 && divisor === -1) return;
assertEquals(ref(dividend, divisor), OptFun(dividend));
});
}
// Check results.
values.forEach(dude);
}
function Test(ref, construct) {
......
This diff is collapsed.
......@@ -49,7 +49,7 @@
'compiler/js-operator-unittest.cc',
'compiler/js-typed-lowering-unittest.cc',
'compiler/machine-operator-reducer-unittest.cc',
'compiler/machine-operator-unittest.cc',
'compiler/node-matchers-unittest.cc',
'compiler/node-test-utils.cc',
'compiler/node-test-utils.h',
'compiler/register-allocator-unittest.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment