Commit db33f07f authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

[turbofan] Reenable value numbering.

Value numbering is now limited to eliminatable operators (i.e. operators
that don't throw and don't write), and uses linear probing instead of
separate chaining.

TEST=unittests
R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/630423002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24452 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent d78fab45
......@@ -290,14 +290,13 @@ Handle<Code> Pipeline::GenerateCode() {
SourcePositionTable::Scope pos(&source_positions,
SourcePosition::Unknown());
Linkage linkage(info());
// TODO(turbofan): Value numbering disabled for now.
// ValueNumberingReducer vn_reducer(zone());
ValueNumberingReducer vn_reducer(zone());
SimplifiedOperatorReducer simple_reducer(&jsgraph);
ChangeLowering lowering(&jsgraph, &linkage);
MachineOperatorReducer mach_reducer(&jsgraph);
GraphReducer graph_reducer(&graph);
// TODO(titzer): Figure out if we should run all reducers at once here.
// graph_reducer.AddReducer(&vn_reducer);
graph_reducer.AddReducer(&vn_reducer);
graph_reducer.AddReducer(&simple_reducer);
graph_reducer.AddReducer(&lowering);
graph_reducer.AddReducer(&mach_reducer);
......
......@@ -4,6 +4,9 @@
#include "src/compiler/value-numbering-reducer.h"
#include <cstring>
#include "src/base/functional.h"
#include "src/compiler/node.h"
namespace v8 {
......@@ -12,7 +15,13 @@ namespace compiler {
namespace {
size_t HashCode(Node* node) { return node->op()->HashCode(); }
size_t HashCode(Node* node) {
size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
for (int j = 0; j < node->InputCount(); ++j) {
h = base::hash_combine(h, node->InputAt(j)->id());
}
return h;
}
bool Equals(Node* a, Node* b) {
......@@ -33,40 +42,96 @@ bool Equals(Node* a, Node* b) {
} // namespace
class ValueNumberingReducer::Entry FINAL : public ZoneObject {
public:
Entry(Node* node, Entry* next) : node_(node), next_(next) {}
ValueNumberingReducer::ValueNumberingReducer(Zone* zone)
: entries_(nullptr), capacity_(0), size_(0), zone_(zone) {}
Node* node() const { return node_; }
Entry* next() const { return next_; }
private:
Node* node_;
Entry* next_;
};
ValueNumberingReducer::~ValueNumberingReducer() {}
ValueNumberingReducer::ValueNumberingReducer(Zone* zone) : zone_(zone) {
for (size_t i = 0; i < arraysize(buckets_); ++i) {
buckets_[i] = NULL;
Reduction ValueNumberingReducer::Reduce(Node* node) {
if (!node->op()->HasProperty(Operator::kEliminatable)) return NoChange();
const size_t hash = HashCode(node);
if (!entries_) {
DCHECK(size_ == 0);
DCHECK(capacity_ == 0);
// Allocate the initial entries and insert the first entry.
capacity_ = kInitialCapacity;
entries_ = zone()->NewArray<Node*>(kInitialCapacity);
memset(entries_, 0, sizeof(*entries_) * kInitialCapacity);
entries_[hash & (kInitialCapacity - 1)] = node;
size_ = 1;
return NoChange();
}
}
DCHECK(size_ < capacity_);
DCHECK(size_ * kCapacityToSizeRatio < capacity_);
const size_t mask = capacity_ - 1;
size_t dead = capacity_;
for (size_t i = hash & mask;; i = (i + 1) & mask) {
Node* entry = entries_[i];
if (!entry) {
if (dead != capacity_) {
// Reuse dead entry that we discovered on the way.
entries_[dead] = node;
} else {
// Have to insert a new entry.
entries_[i] = node;
size_++;
// Resize to keep load factor below 1/kCapacityToSizeRatio.
if (size_ * kCapacityToSizeRatio >= capacity_) Grow();
}
DCHECK(size_ * kCapacityToSizeRatio < capacity_);
return NoChange();
}
if (entry == node) {
return NoChange();
}
ValueNumberingReducer::~ValueNumberingReducer() {}
// Skip dead entries, but remember their indices so we can reuse them.
if (entry->IsDead()) {
dead = i;
continue;
}
if (Equals(entry, node)) {
return Replace(entry);
}
}
}
Reduction ValueNumberingReducer::Reduce(Node* node) {
Entry** head = &buckets_[HashCode(node) % arraysize(buckets_)];
for (Entry* entry = *head; entry; entry = entry->next()) {
if (entry->node()->IsDead()) continue;
if (entry->node() == node) return NoChange();
if (Equals(node, entry->node())) {
return Replace(entry->node());
void ValueNumberingReducer::Grow() {
// Allocate a new block of entries kCapacityToSizeRatio times the previous
// capacity.
Node** const old_entries = entries_;
size_t const old_capacity = capacity_;
capacity_ *= kCapacityToSizeRatio;
entries_ = zone()->NewArray<Node*>(static_cast<int>(capacity_));
memset(entries_, 0, sizeof(*entries_) * capacity_);
size_ = 0;
size_t const mask = capacity_ - 1;
// Insert the old entries into the new block (skipping dead nodes).
for (size_t i = 0; i < old_capacity; ++i) {
Node* const old_entry = old_entries[i];
if (!old_entry || old_entry->IsDead()) continue;
for (size_t j = HashCode(old_entry) & mask;; j = (j + 1) & mask) {
Node* const entry = entries_[j];
if (entry == old_entry) {
// Skip duplicate of the old entry.
break;
}
if (!entry) {
entries_[j] = old_entry;
size_++;
break;
}
}
}
*head = new (zone()) Entry(node, *head);
return NoChange();
}
} // namespace compiler
......
......@@ -19,13 +19,14 @@ class ValueNumberingReducer FINAL : public Reducer {
virtual Reduction Reduce(Node* node) OVERRIDE;
private:
enum { kInitialCapacity = 256u, kCapacityToSizeRatio = 2u };
void Grow();
Zone* zone() const { return zone_; }
// TODO(turbofan): We currently use separate chaining with linked lists here,
// we may want to replace that with a more sophisticated data structure at
// some point in the future.
class Entry;
Entry* buckets_[117u];
Node** entries_;
size_t capacity_;
size_t size_;
Zone* zone_;
};
......
......@@ -14,8 +14,8 @@ namespace compiler {
namespace {
const SimpleOperator kOp0(0, Operator::kNoProperties, 0, 1, "op0");
const SimpleOperator kOp1(1, Operator::kNoProperties, 1, 1, "op1");
const SimpleOperator kOp0(0, Operator::kEliminatable, 0, 1, "op0");
const SimpleOperator kOp1(1, Operator::kEliminatable, 1, 1, "op1");
} // namespace
......@@ -54,6 +54,15 @@ TEST_F(ValueNumberingReducerTest, DeadNodesAreNeverReturned) {
}
TEST_F(ValueNumberingReducerTest, OnlyEliminatableNodesAreReduced) {
SimpleOperator op(0, Operator::kNoProperties, 0, 1, "op");
Node* n0 = graph()->NewNode(&op);
Node* n1 = graph()->NewNode(&op);
EXPECT_FALSE(Reduce(n0).Changed());
EXPECT_FALSE(Reduce(n1).Changed());
}
TEST_F(ValueNumberingReducerTest, OperatorEqualityNotIdentity) {
static const size_t kMaxInputCount = 16;
Node* inputs[kMaxInputCount];
......@@ -61,18 +70,18 @@ TEST_F(ValueNumberingReducerTest, OperatorEqualityNotIdentity) {
Operator::Opcode opcode = static_cast<Operator::Opcode>(
std::numeric_limits<Operator::Opcode>::max() - i);
inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
opcode, Operator::kNoProperties, 0, 1, "Operator"));
opcode, Operator::kEliminatable, 0, 1, "Operator"));
}
TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
const SimpleOperator op1(static_cast<Operator::Opcode>(input_count),
Operator::kNoProperties,
Operator::kEliminatable,
static_cast<int>(input_count), 1, "op");
Node* n1 = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
Reduction r1 = Reduce(n1);
EXPECT_FALSE(r1.Changed());
const SimpleOperator op2(static_cast<Operator::Opcode>(input_count),
Operator::kNoProperties,
Operator::kEliminatable,
static_cast<int>(input_count), 1, "op");
Node* n2 = graph()->NewNode(&op2, static_cast<int>(input_count), inputs);
Reduction r2 = Reduce(n2);
......@@ -89,10 +98,10 @@ TEST_F(ValueNumberingReducerTest, SubsequentReductionsYieldTheSameNode) {
Operator::Opcode opcode = static_cast<Operator::Opcode>(
std::numeric_limits<Operator::Opcode>::max() - i);
inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
opcode, Operator::kNoProperties, 0, 1, "Operator"));
opcode, Operator::kEliminatable, 0, 1, "Operator"));
}
TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
const SimpleOperator op1(1, Operator::kNoProperties,
const SimpleOperator op1(1, Operator::kEliminatable,
static_cast<int>(input_count), 1, "op1");
Node* n = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
Reduction r = Reduce(n);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment