Commit 00aec790 authored by dcarney's avatar dcarney Committed by Commit bot

[turbofan] cleanup ParallelMove

- make ParallelMove into a ZoneVector, removing an annoying level of indirection
- make MoveOperands hold InstructionOperands instead of pointers, so there's no more operand aliasing for moves
- opens up possibility of storing MachineType in allocated operands

R=bmeurer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1081373002

Cr-Commit-Position: refs/heads/master@{#27842}
parent 6198bbc5
......@@ -12,47 +12,30 @@ namespace v8 {
namespace internal {
namespace compiler {
typedef ZoneList<MoveOperands>::iterator op_iterator;
namespace {
#ifdef ENABLE_SLOW_DCHECKS
struct InstructionOperandComparator {
bool operator()(const InstructionOperand* x,
const InstructionOperand* y) const {
return *x < *y;
}
};
#endif
// No operand should be the destination for more than one move.
static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) {
#ifdef ENABLE_SLOW_DCHECKS
std::set<InstructionOperand*, InstructionOperandComparator> seen;
for (op_iterator i = moves->begin(); i != moves->end(); ++i) {
SLOW_DCHECK(seen.find(i->destination()) == seen.end());
seen.insert(i->destination());
}
#endif
inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
return move->Blocks(destination);
}
void GapResolver::Resolve(ParallelMove* parallel_move) const {
ZoneList<MoveOperands>* moves = parallel_move->move_operands();
// TODO(svenpanne) Use the member version of remove_if when we use real lists.
op_iterator end =
std::remove_if(moves->begin(), moves->end(),
std::mem_fun_ref(&MoveOperands::IsRedundant));
moves->Rewind(static_cast<int>(end - moves->begin()));
inline bool IsRedundant(MoveOperands* move) { return move->IsRedundant(); }
} // namespace
VerifyMovesAreInjective(moves);
for (op_iterator move = moves->begin(); move != moves->end(); ++move) {
if (!move->IsEliminated()) PerformMove(moves, &*move);
void GapResolver::Resolve(ParallelMove* moves) const {
// Clear redundant moves.
auto it =
std::remove_if(moves->begin(), moves->end(), std::ptr_fun(IsRedundant));
moves->erase(it, moves->end());
for (auto move : *moves) {
if (!move->IsEliminated()) PerformMove(moves, move);
}
}
void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
MoveOperands* move) const {
void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We mark a
// move as "pending" on entry to PerformMove in order to detect cycles in the
......@@ -63,14 +46,14 @@ void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
// Clear this move's destination to indicate a pending move. The actual
// destination is saved on the side.
DCHECK_NOT_NULL(move->source()); // Or else it will look eliminated.
InstructionOperand* destination = move->destination();
move->set_destination(NULL);
DCHECK(!move->source().IsInvalid()); // Or else it will look eliminated.
InstructionOperand destination = move->destination();
move->SetPending();
// Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's
// destination blocks this one so recursively perform all such moves.
for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
for (auto other : *moves) {
if (other->Blocks(destination) && !other->IsPending()) {
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does not
......@@ -91,8 +74,8 @@ void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
InstructionOperand* source = move->source();
if (source->Equals(destination)) {
InstructionOperand source = move->source();
if (source == destination) {
move->Eliminate();
return;
}
......@@ -100,28 +83,27 @@ void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
// The move may be blocked on a (at most one) pending move, in which case we
// have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
op_iterator blocker = std::find_if(
moves->begin(), moves->end(),
std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination));
auto blocker = std::find_if(moves->begin(), moves->end(),
std::bind2nd(std::ptr_fun(&Blocks), destination));
if (blocker == moves->end()) {
// The easy case: This move is not blocked.
assembler_->AssembleMove(source, destination);
assembler_->AssembleMove(&source, &destination);
move->Eliminate();
return;
}
DCHECK(blocker->IsPending());
DCHECK((*blocker)->IsPending());
// Ensure source is a register or both are stack slots, to limit swap cases.
if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
std::swap(source, destination);
}
assembler_->AssembleSwap(source, destination);
assembler_->AssembleSwap(&source, &destination);
move->Eliminate();
// Any unperformed (including pending) move with a source of either this
// move's source or destination needs to have their source changed to
// reflect the state of affairs after the swap.
for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
for (auto other : *moves) {
if (other->Blocks(source)) {
other->set_source(destination);
} else if (other->Blocks(destination)) {
......
......@@ -34,7 +34,7 @@ class GapResolver FINAL {
private:
// Perform the given move, possibly requiring other moves to satisfy
// dependencies.
void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move) const;
void PerformMove(ParallelMove* moves, MoveOperands* move) const;
// Assembler used to emit moves and save registers.
Assembler* const assembler_;
......
......@@ -13,7 +13,7 @@ namespace compiler {
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& printable) {
const InstructionOperand& op = *printable.op_;
const InstructionOperand& op = printable.op_;
const RegisterConfiguration* conf = printable.register_configuration_;
switch (op.kind()) {
case InstructionOperand::UNALLOCATED: {
......@@ -82,9 +82,8 @@ std::ostream& operator<<(std::ostream& os,
const MoveOperands& mo = *printable.move_operands_;
PrintableInstructionOperand printable_op = {printable.register_configuration_,
mo.destination()};
os << printable_op;
if (!mo.source()->Equals(mo.destination())) {
if (mo.source() != mo.destination()) {
printable_op.op_ = mo.source();
os << " = " << printable_op;
}
......@@ -93,24 +92,23 @@ std::ostream& operator<<(std::ostream& os,
bool ParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsRedundant()) return false;
for (auto move : *this) {
if (!move->IsRedundant()) return false;
}
return true;
}
MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
auto move_ops = move_operands();
MoveOperands* replacement = nullptr;
MoveOperands* to_eliminate = nullptr;
for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) {
for (auto curr : *this) {
if (curr->IsEliminated()) continue;
if (curr->destination()->Equals(move->source())) {
if (curr->destination() == move->source()) {
DCHECK(!replacement);
replacement = curr;
if (to_eliminate != nullptr) break;
} else if (curr->destination()->Equals(move->destination())) {
} else if (curr->destination() == move->destination()) {
DCHECK(!to_eliminate);
to_eliminate = curr;
if (replacement != nullptr) break;
......@@ -175,8 +173,7 @@ std::ostream& operator<<(std::ostream& os,
const PrintableParallelMove& printable) {
const ParallelMove& pm = *printable.parallel_move_;
bool first = true;
for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
move != pm.move_operands()->end(); ++move) {
for (auto move : pm) {
if (move->IsEliminated()) continue;
if (!first) os << " ";
first = false;
......@@ -199,14 +196,14 @@ std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
os << "{";
bool first = true;
PrintableInstructionOperand poi = {RegisterConfiguration::ArchDefault(),
nullptr};
InstructionOperand()};
for (auto& op : pm.reference_operands_) {
if (!first) {
os << ";";
} else {
first = false;
}
poi.op_ = &op;
poi.op_ = op;
os << poi;
}
return os << "}";
......@@ -295,7 +292,7 @@ std::ostream& operator<<(std::ostream& os,
const PrintableInstruction& printable) {
const Instruction& instr = *printable.instr_;
PrintableInstructionOperand printable_op = {printable.register_configuration_,
NULL};
InstructionOperand()};
os << "gap ";
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
......@@ -312,7 +309,7 @@ std::ostream& operator<<(std::ostream& os,
if (instr.OutputCount() > 1) os << "(";
for (size_t i = 0; i < instr.OutputCount(); i++) {
if (i > 0) os << ", ";
printable_op.op_ = instr.OutputAt(i);
printable_op.op_ = *instr.OutputAt(i);
os << printable_op;
}
......@@ -330,7 +327,7 @@ std::ostream& operator<<(std::ostream& os,
}
if (instr.InputCount() > 0) {
for (size_t i = 0; i < instr.InputCount(); i++) {
printable_op.op_ = instr.InputAt(i);
printable_op.op_ = *instr.InputAt(i);
os << " " << printable_op;
}
}
......@@ -368,14 +365,12 @@ PhiInstruction::PhiInstruction(Zone* zone, int virtual_register,
size_t input_count)
: virtual_register_(virtual_register),
output_(UnallocatedOperand(UnallocatedOperand::NONE, virtual_register)),
operands_(input_count, zone),
inputs_(input_count, zone) {}
operands_(input_count, InstructionOperand::kInvalidVirtualRegister,
zone) {}
void PhiInstruction::SetInput(size_t offset, int virtual_register) {
DCHECK(inputs_[offset].IsInvalid());
auto input = UnallocatedOperand(UnallocatedOperand::ANY, virtual_register);
inputs_[offset] = input;
DCHECK_EQ(InstructionOperand::kInvalidVirtualRegister, operands_[offset]);
operands_[offset] = virtual_register;
}
......@@ -726,11 +721,10 @@ std::ostream& operator<<(std::ostream& os,
for (auto phi : block->phis()) {
PrintableInstructionOperand printable_op = {
printable.register_configuration_, &phi->output()};
printable.register_configuration_, phi->output()};
os << " phi: " << printable_op << " =";
for (auto input : phi->inputs()) {
printable_op.op_ = &input;
os << " " << printable_op;
for (auto input : phi->operands()) {
os << " v" << input;
}
os << "\n";
}
......
......@@ -50,10 +50,6 @@ class InstructionOperand {
inline bool IsStackSlot() const;
inline bool IsDoubleStackSlot() const;
bool Equals(const InstructionOperand* other) const {
return value_ == other->value_;
}
// Useful for map/set keys.
bool operator<(const InstructionOperand& op) const {
return value_ < op.value_;
......@@ -63,6 +59,10 @@ class InstructionOperand {
return value_ == op.value_;
}
bool operator!=(const InstructionOperand& op) const {
return value_ != op.value_;
}
template <typename SubKindOperand>
static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
void* buffer = zone->New(sizeof(op));
......@@ -84,7 +84,7 @@ class InstructionOperand {
struct PrintableInstructionOperand {
const RegisterConfiguration* register_configuration_;
const InstructionOperand* op_;
InstructionOperand op_;
};
std::ostream& operator<<(std::ostream& os,
......@@ -167,12 +167,6 @@ class UnallocatedOperand : public InstructionOperand {
value_ |= LifetimeField::encode(lifetime);
}
UnallocatedOperand* Copy(Zone* zone) { return New(zone, *this); }
UnallocatedOperand* CopyUnconstrained(Zone* zone) {
return New(zone, UnallocatedOperand(ANY, virtual_register()));
}
// Predicates for the operand policy.
bool HasAnyPolicy() const {
return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
......@@ -435,43 +429,55 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_CLASS)
#undef ALLOCATED_OPERAND_CLASS
class MoveOperands FINAL {
class MoveOperands FINAL : public ZoneObject {
public:
MoveOperands(InstructionOperand* source, InstructionOperand* destination)
: source_(source), destination_(destination) {}
MoveOperands(const InstructionOperand& source,
const InstructionOperand& destination)
: source_(source), destination_(destination) {
DCHECK(!source.IsInvalid() && !destination.IsInvalid());
}
InstructionOperand* source() const { return source_; }
void set_source(InstructionOperand* operand) { source_ = operand; }
const InstructionOperand& source() const { return source_; }
InstructionOperand& source() { return source_; }
void set_source(const InstructionOperand& operand) { source_ = operand; }
InstructionOperand* destination() const { return destination_; }
void set_destination(InstructionOperand* operand) { destination_ = operand; }
const InstructionOperand& destination() const { return destination_; }
InstructionOperand& destination() { return destination_; }
void set_destination(const InstructionOperand& operand) {
destination_ = operand;
}
// The gap resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
bool IsPending() const { return destination_ == NULL && source_ != NULL; }
bool IsPending() const {
return destination_.IsInvalid() && !source_.IsInvalid();
}
void SetPending() { destination_ = InstructionOperand(); }
// True if this move a move into the given destination operand.
bool Blocks(InstructionOperand* operand) const {
return !IsEliminated() && source()->Equals(operand);
bool Blocks(const InstructionOperand& operand) const {
return !IsEliminated() && source() == operand;
}
// A move is redundant if it's been eliminated or if its source and
// destination are the same.
bool IsRedundant() const {
DCHECK_IMPLIES(destination_ != nullptr, !destination_->IsConstant());
return IsEliminated() || source_->Equals(destination_);
DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
return IsEliminated() || source_ == destination_;
}
// We clear both operands to indicate move that's been eliminated.
void Eliminate() { source_ = destination_ = NULL; }
void Eliminate() { source_ = destination_ = InstructionOperand(); }
bool IsEliminated() const {
DCHECK(source_ != NULL || destination_ == NULL);
return source_ == NULL;
DCHECK_IMPLIES(source_.IsInvalid(), destination_.IsInvalid());
return source_.IsInvalid();
}
private:
InstructionOperand* source_;
InstructionOperand* destination_;
InstructionOperand source_;
InstructionOperand destination_;
DISALLOW_COPY_AND_ASSIGN(MoveOperands);
};
......@@ -484,29 +490,29 @@ struct PrintableMoveOperands {
std::ostream& operator<<(std::ostream& os, const PrintableMoveOperands& mo);
class ParallelMove FINAL : public ZoneObject {
class ParallelMove FINAL : public ZoneVector<MoveOperands*>, public ZoneObject {
public:
explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {
reserve(4);
}
void AddMove(InstructionOperand* from, InstructionOperand* to, Zone* zone) {
move_operands_.Add(MoveOperands(from, to), zone);
MoveOperands* AddMove(const InstructionOperand& from,
const InstructionOperand& to) {
auto zone = get_allocator().zone();
auto move = new (zone) MoveOperands(from, to);
push_back(move);
return move;
}
bool IsRedundant() const;
ZoneList<MoveOperands>* move_operands() { return &move_operands_; }
const ZoneList<MoveOperands>* move_operands() const {
return &move_operands_;
}
// Prepare this ParallelMove to insert move as if it happened in a subsequent
// ParallelMove. move->source() may be changed. The MoveOperand returned
// must be Eliminated and, as it points directly into move_operands_, it must
// be Eliminated before any further mutation.
// must be Eliminated.
MoveOperands* PrepareInsertAfter(MoveOperands* move) const;
private:
ZoneList<MoveOperands> move_operands_;
DISALLOW_COPY_AND_ASSIGN(ParallelMove);
};
......@@ -856,18 +862,15 @@ class PhiInstruction FINAL : public ZoneObject {
int virtual_register() const { return virtual_register_; }
const IntVector& operands() const { return operands_; }
// TODO(dcarney): this has no real business being here, since it's internal to
// the register allocator, but putting it here was convenient.
const InstructionOperand& output() const { return output_; }
InstructionOperand& output() { return output_; }
const Inputs& inputs() const { return inputs_; }
Inputs& inputs() { return inputs_; }
private:
// TODO(dcarney): some of these fields are only for verification, move them to
// verifier.
const int virtual_register_;
InstructionOperand output_;
IntVector operands_;
Inputs inputs_;
};
......
......@@ -21,16 +21,13 @@ bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); }
int FindFirstNonEmptySlot(Instruction* instr) {
int i = Instruction::FIRST_GAP_POSITION;
for (; i <= Instruction::LAST_GAP_POSITION; i++) {
auto move = instr->parallel_moves()[i];
if (move == nullptr) continue;
auto move_ops = move->move_operands();
auto op = move_ops->begin();
for (; op != move_ops->end(); ++op) {
if (!op->IsRedundant()) break;
op->Eliminate();
auto moves = instr->parallel_moves()[i];
if (moves == nullptr) continue;
for (auto move : *moves) {
if (!move->IsRedundant()) return i;
move->Eliminate();
}
if (op != move_ops->end()) break; // Found non-redundant move.
move_ops->Rewind(0); // Clear this redundant move.
moves->clear(); // Clear this redundant move.
}
return i;
}
......@@ -63,29 +60,27 @@ void MoveOptimizer::Run() {
void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
ParallelMove* right) {
DCHECK(eliminated->empty());
auto move_ops = right->move_operands();
if (!left->move_operands()->is_empty()) {
if (!left->empty()) {
// Modify the right moves in place and collect moves that will be killed by
// merging the two gaps.
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
auto to_eliminate = left->PrepareInsertAfter(op);
for (auto move : *right) {
if (move->IsRedundant()) continue;
auto to_eliminate = left->PrepareInsertAfter(move);
if (to_eliminate != nullptr) eliminated->push_back(to_eliminate);
}
// Eliminate dead moves. Must happen before insertion of new moves as the
// contents of eliminated are pointers into a list.
// Eliminate dead moves.
for (auto to_eliminate : *eliminated) {
to_eliminate->Eliminate();
}
eliminated->clear();
}
// Add all possibly modified moves from right side.
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
left->move_operands()->Add(*op, code_zone());
for (auto move : *right) {
if (move->IsRedundant()) continue;
left->push_back(move);
}
// Nuke right.
move_ops->Rewind(0);
right->clear();
}
......@@ -159,14 +154,13 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
auto pred = code()->InstructionBlockAt(pred_index);
auto instr = LastInstruction(pred);
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->move_operands()->is_empty()) {
instr->parallel_moves()[0]->empty()) {
return;
}
auto move_ops = instr->parallel_moves()[0]->move_operands();
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
auto src = *op->source();
auto dst = *op->destination();
for (auto move : *instr->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
auto src = move->source();
auto dst = move->destination();
MoveKey key = {src, dst};
auto res = move_map.insert(std::make_pair(key, 1));
if (!res.second) {
......@@ -188,30 +182,29 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
DCHECK(instr != nullptr);
bool gap_initialized = true;
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->move_operands()->is_empty()) {
instr->parallel_moves()[0]->empty()) {
to_finalize_.push_back(instr);
} else {
// Will compress after insertion.
gap_initialized = false;
std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
}
auto move = instr->GetOrCreateParallelMove(
auto moves = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(0), code_zone());
// Delete relevant entries in predecessors and move everything to block.
bool first_iteration = true;
for (auto pred_index : block->predecessors()) {
auto pred = code()->InstructionBlockAt(pred_index);
auto move_ops = LastInstruction(pred)->parallel_moves()[0]->move_operands();
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
MoveKey key = {*op->source(), *op->destination()};
for (auto move : *LastInstruction(pred)->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
MoveKey key = {move->source(), move->destination()};
auto it = move_map.find(key);
USE(it);
DCHECK(it != move_map.end());
if (first_iteration) {
move->AddMove(op->source(), op->destination(), code_zone());
moves->AddMove(move->source(), move->destination());
}
op->Eliminate();
move->Eliminate();
}
first_iteration = false;
}
......@@ -223,70 +216,55 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
}
namespace {
bool IsSlot(const InstructionOperand& op) {
return op.IsStackSlot() || op.IsDoubleStackSlot();
}
bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
if (a->source() != b->source()) return a->source() < b->source();
if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
return a->destination() < b->destination();
}
} // namespace
// Split multiple loads of the same constant or stack slot off into the second
// slot and keep remaining moves in the first slot.
void MoveOptimizer::FinalizeMoves(Instruction* instr) {
auto loads = temp_vector_0();
DCHECK(loads.empty());
auto new_moves = temp_vector_1();
DCHECK(new_moves.empty());
auto move_ops = instr->parallel_moves()[0]->move_operands();
for (auto move = move_ops->begin(); move != move_ops->end(); ++move) {
if (move->IsRedundant()) {
move->Eliminate();
continue;
}
if (!(move->source()->IsConstant() || move->source()->IsStackSlot() ||
move->source()->IsDoubleStackSlot()))
continue;
// Search for existing move to this slot.
MoveOperands* found = nullptr;
for (auto load : loads) {
if (load->source()->Equals(move->source())) {
found = load;
break;
}
}
// Not found so insert.
if (found == nullptr) {
// Find all the loads.
for (auto move : *instr->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
if (move->source().IsConstant() || IsSlot(move->source())) {
loads.push_back(move);
// Replace source with copy for later use.
auto dest = move->destination();
move->set_destination(InstructionOperand::New(code_zone(), *dest));
continue;
}
if ((found->destination()->IsStackSlot() ||
found->destination()->IsDoubleStackSlot()) &&
!(move->destination()->IsStackSlot() ||
move->destination()->IsDoubleStackSlot())) {
// Found a better source for this load. Smash it in place to affect other
// loads that have already been split.
auto next_dest =
InstructionOperand::New(code_zone(), *found->destination());
auto dest = move->destination();
InstructionOperand::ReplaceWith(found->destination(), dest);
move->set_destination(next_dest);
}
if (loads.empty()) return;
// Group the loads by source, moving the preferred destination to the
// beginning of the group.
std::sort(loads.begin(), loads.end(), LoadCompare);
MoveOperands* group_begin = nullptr;
for (auto load : loads) {
// New group.
if (group_begin == nullptr || load->source() != group_begin->source()) {
group_begin = load;
continue;
}
// move from load destination.
move->set_source(found->destination());
new_moves.push_back(move);
// Nothing to be gained from splitting here.
if (IsSlot(group_begin->destination())) continue;
// Insert new move into slot 1.
auto slot_1 = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(1), code_zone());
slot_1->AddMove(group_begin->destination(), load->destination());
load->Eliminate();
}
loads.clear();
if (new_moves.empty()) return;
// Insert all new moves into slot 1.
auto slot_1 = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(1), code_zone());
DCHECK(slot_1->move_operands()->is_empty());
slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr),
static_cast<int>(new_moves.size()),
code_zone());
auto it = slot_1->move_operands()->begin();
for (auto new_move : new_moves) {
std::swap(*new_move, *it);
++it;
}
DCHECK_EQ(it, slot_1->move_operands()->end());
new_moves.clear();
}
} // namespace compiler
......
......@@ -10,12 +10,14 @@ namespace v8 {
namespace internal {
namespace compiler {
static size_t OperandCount(const Instruction* instr) {
namespace {
size_t OperandCount(const Instruction* instr) {
return instr->InputCount() + instr->OutputCount() + instr->TempCount();
}
static void VerifyEmptyGaps(const Instruction* instr) {
void VerifyEmptyGaps(const Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
......@@ -25,6 +27,24 @@ static void VerifyEmptyGaps(const Instruction* instr) {
}
void VerifyAllocatedGaps(const Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
static_cast<Instruction::GapPosition>(i);
auto moves = instr->GetParallelMove(inner_pos);
if (moves == nullptr) continue;
for (auto move : *moves) {
if (move->IsRedundant()) continue;
CHECK(move->source().IsAllocated() || move->source().IsConstant());
CHECK(move->destination().IsAllocated());
}
}
}
} // namespace
void RegisterAllocatorVerifier::VerifyInput(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
......@@ -94,6 +114,8 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
auto instr_it = sequence()->begin();
for (const auto& instr_constraint : *constraints()) {
const auto* instr = instr_constraint.instruction_;
// All gaps should be totally allocated at this point.
VerifyAllocatedGaps(instr);
const size_t operand_count = instr_constraint.operand_constaints_size_;
const auto* op_constraints = instr_constraint.operand_constraints_;
CHECK_EQ(instr, *instr_it);
......@@ -298,7 +320,7 @@ class OperandMap : public ZoneObject {
this->erase(it++);
if (it == this->end()) return;
}
if (it->first->Equals(o.first)) {
if (*it->first == *o.first) {
++it;
if (it == this->end()) return;
} else {
......@@ -312,23 +334,22 @@ class OperandMap : public ZoneObject {
Map& map() { return map_; }
void RunParallelMoves(Zone* zone, const ParallelMove* move) {
void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
// Compute outgoing mappings.
Map to_insert(zone);
auto moves = move->move_operands();
for (auto i = moves->begin(); i != moves->end(); ++i) {
if (i->IsEliminated()) continue;
auto cur = map().find(i->source());
for (auto move : *moves) {
if (move->IsEliminated()) continue;
auto cur = map().find(&move->source());
CHECK(cur != map().end());
auto res =
to_insert.insert(std::make_pair(i->destination(), cur->second));
to_insert.insert(std::make_pair(&move->destination(), cur->second));
// Ensure injectivity of moves.
CHECK(res.second);
}
// Drop current mappings.
for (auto i = moves->begin(); i != moves->end(); ++i) {
if (i->IsEliminated()) continue;
auto cur = map().find(i->destination());
for (auto move : *moves) {
if (move->IsEliminated()) continue;
auto cur = map().find(&move->destination());
if (cur != map().end()) map().erase(cur);
}
// Insert new values.
......
This diff is collapsed.
......@@ -509,8 +509,9 @@ class RegisterAllocator FINAL : public ZoneObject {
InstructionOperand* hint);
void Use(LifetimePosition block_start, LifetimePosition position,
InstructionOperand* operand, InstructionOperand* hint);
void AddGapMove(int index, Instruction::GapPosition position,
InstructionOperand* from, InstructionOperand* to);
MoveOperands* AddGapMove(int index, Instruction::GapPosition position,
const InstructionOperand& from,
const InstructionOperand& to);
// Helper methods for updating the life range lists.
void AddToActive(LiveRange* range);
......@@ -574,9 +575,9 @@ class RegisterAllocator FINAL : public ZoneObject {
// Helper methods for resolving control flow.
void ResolveControlFlow(const InstructionBlock* block,
InstructionOperand* cur_op,
const InstructionOperand& cur_op,
const InstructionBlock* pred,
InstructionOperand* pred_op);
const InstructionOperand& pred_op);
void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
......@@ -595,6 +596,7 @@ class RegisterAllocator FINAL : public ZoneObject {
const char* RegisterName(int allocation_index);
Instruction* InstructionAt(int index) { return code()->InstructionAt(index); }
void AssignPhiInput(LiveRange* range, const InstructionOperand& assignment);
Frame* frame() const { return frame_; }
const char* debug_name() const { return debug_name_; }
......@@ -613,13 +615,17 @@ class RegisterAllocator FINAL : public ZoneObject {
}
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
struct PhiMapValue {
PhiMapValue(PhiInstruction* phi, const InstructionBlock* block)
: phi(phi), block(block) {}
class PhiMapValue : public ZoneObject {
public:
PhiMapValue(PhiInstruction* phi, const InstructionBlock* block, Zone* zone)
: phi(phi), block(block), incoming_moves(zone) {
incoming_moves.reserve(phi->operands().size());
}
PhiInstruction* const phi;
const InstructionBlock* const block;
ZoneVector<MoveOperands*> incoming_moves;
};
typedef ZoneMap<int, PhiMapValue> PhiMap;
typedef ZoneMap<int, PhiMapValue*> PhiMap;
Zone* const local_zone_;
Frame* const frame_;
......
......@@ -57,6 +57,8 @@ class zone_allocator {
return zone_ != other.zone_;
}
Zone* zone() { return zone_; }
private:
zone_allocator();
Zone* zone_;
......
......@@ -14,12 +14,10 @@ using namespace v8::internal::compiler;
// that the actual values don't really matter, all we care about is equality.
class InterpreterState {
public:
typedef std::vector<MoveOperands> Moves;
void ExecuteInParallel(Moves moves) {
void ExecuteInParallel(const ParallelMove* moves) {
InterpreterState copy(*this);
for (Moves::iterator it = moves.begin(); it != moves.end(); ++it) {
if (!it->IsRedundant()) write(it->destination(), copy.read(it->source()));
for (const auto m : *moves) {
if (!m->IsRedundant()) write(m->destination(), copy.read(m->source()));
}
}
......@@ -57,12 +55,12 @@ class InterpreterState {
typedef Key Value;
typedef std::map<Key, Value> OperandMap;
Value read(const InstructionOperand* op) const {
Value read(const InstructionOperand& op) const {
OperandMap::const_iterator it = values_.find(KeyFor(op));
return (it == values_.end()) ? ValueFor(op) : it->second;
}
void write(const InstructionOperand* op, Value v) {
void write(const InstructionOperand& op, Value v) {
if (v == ValueFor(op)) {
values_.erase(KeyFor(op));
} else {
......@@ -70,22 +68,22 @@ class InterpreterState {
}
}
static Key KeyFor(const InstructionOperand* op) {
bool is_constant = op->IsConstant();
static Key KeyFor(const InstructionOperand& op) {
bool is_constant = op.IsConstant();
AllocatedOperand::AllocatedKind kind;
int index;
if (!is_constant) {
index = AllocatedOperand::cast(op)->index();
kind = AllocatedOperand::cast(op)->allocated_kind();
index = AllocatedOperand::cast(op).index();
kind = AllocatedOperand::cast(op).allocated_kind();
} else {
index = ConstantOperand::cast(op)->virtual_register();
index = ConstantOperand::cast(op).virtual_register();
kind = AllocatedOperand::REGISTER;
}
Key key = {is_constant, kind, index};
return key;
}
static Value ValueFor(const InstructionOperand* op) { return KeyFor(op); }
static Value ValueFor(const InstructionOperand& op) { return KeyFor(op); }
static InstructionOperand FromKey(Key key) {
if (key.is_constant) {
......@@ -101,7 +99,7 @@ class InterpreterState {
if (it != is.values_.begin()) os << " ";
InstructionOperand source = FromKey(it->first);
InstructionOperand destination = FromKey(it->second);
MoveOperands mo(&source, &destination);
MoveOperands mo(source, destination);
PrintableMoveOperands pmo = {RegisterConfiguration::ArchDefault(), &mo};
os << pmo;
}
......@@ -115,30 +113,31 @@ class InterpreterState {
// An abstract interpreter for moves, swaps and parallel moves.
class MoveInterpreter : public GapResolver::Assembler {
public:
explicit MoveInterpreter(Zone* zone) : zone_(zone) {}
virtual void AssembleMove(InstructionOperand* source,
InstructionOperand* destination) OVERRIDE {
InterpreterState::Moves moves;
moves.push_back(MoveOperands(source, destination));
ParallelMove* moves = new (zone_) ParallelMove(zone_);
moves->AddMove(*source, *destination);
state_.ExecuteInParallel(moves);
}
virtual void AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) OVERRIDE {
InterpreterState::Moves moves;
moves.push_back(MoveOperands(source, destination));
moves.push_back(MoveOperands(destination, source));
ParallelMove* moves = new (zone_) ParallelMove(zone_);
moves->AddMove(*source, *destination);
moves->AddMove(*destination, *source);
state_.ExecuteInParallel(moves);
}
void AssembleParallelMove(const ParallelMove* pm) {
InterpreterState::Moves moves(pm->move_operands()->begin(),
pm->move_operands()->end());
void AssembleParallelMove(const ParallelMove* moves) {
state_.ExecuteInParallel(moves);
}
InterpreterState state() const { return state_; }
private:
Zone* const zone_;
InterpreterState state_;
};
......@@ -149,11 +148,11 @@ class ParallelMoveCreator : public HandleAndZoneScope {
ParallelMove* Create(int size) {
ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
std::set<InstructionOperand*, InstructionOperandComparator> seen;
std::set<InstructionOperand> seen;
for (int i = 0; i < size; ++i) {
MoveOperands mo(CreateRandomOperand(true), CreateRandomOperand(false));
if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
parallel_move->AddMove(mo.source(), mo.destination(), main_zone());
parallel_move->AddMove(mo.source(), mo.destination());
seen.insert(mo.destination());
}
}
......@@ -161,30 +160,23 @@ class ParallelMoveCreator : public HandleAndZoneScope {
}
private:
struct InstructionOperandComparator {
bool operator()(const InstructionOperand* x,
const InstructionOperand* y) const {
return *x < *y;
}
};
InstructionOperand* CreateRandomOperand(bool is_source) {
InstructionOperand CreateRandomOperand(bool is_source) {
int index = rng_->NextInt(6);
// destination can't be Constant.
switch (rng_->NextInt(is_source ? 5 : 4)) {
case 0:
return StackSlotOperand::New(main_zone(), index);
return StackSlotOperand(index);
case 1:
return DoubleStackSlotOperand::New(main_zone(), index);
return DoubleStackSlotOperand(index);
case 2:
return RegisterOperand::New(main_zone(), index);
return RegisterOperand(index);
case 3:
return DoubleRegisterOperand::New(main_zone(), index);
return DoubleRegisterOperand(index);
case 4:
return ConstantOperand::New(main_zone(), index);
return ConstantOperand(index);
}
UNREACHABLE();
return NULL;
return InstructionOperand();
}
private:
......@@ -199,10 +191,10 @@ TEST(FuzzResolver) {
ParallelMove* pm = pmc.Create(size);
// Note: The gap resolver modifies the ParallelMove, so interpret first.
MoveInterpreter mi1;
MoveInterpreter mi1(pmc.main_zone());
mi1.AssembleParallelMove(pm);
MoveInterpreter mi2;
MoveInterpreter mi2(pmc.main_zone());
GapResolver resolver(&mi2);
resolver.Resolve(pm);
......
......@@ -83,8 +83,8 @@ class InstructionTester : public HandleAndZoneScope {
return code->AddInstruction(instr);
}
UnallocatedOperand* NewUnallocated(int vreg) {
return UnallocatedOperand(UnallocatedOperand::ANY, vreg).Copy(zone());
UnallocatedOperand Unallocated(int vreg) {
return UnallocatedOperand(UnallocatedOperand::ANY, vreg);
}
RpoNumber RpoFor(BasicBlock* block) {
......@@ -255,17 +255,16 @@ TEST(InstructionAddGapMove) {
int index = 0;
for (auto instr : R.code->instructions()) {
UnallocatedOperand* op1 = R.NewUnallocated(index++);
UnallocatedOperand* op2 = R.NewUnallocated(index++);
UnallocatedOperand op1 = R.Unallocated(index++);
UnallocatedOperand op2 = R.Unallocated(index++);
instr->GetOrCreateParallelMove(TestInstr::START, R.zone())
->AddMove(op1, op2, R.zone());
->AddMove(op1, op2);
ParallelMove* move = instr->GetParallelMove(TestInstr::START);
CHECK(move);
const ZoneList<MoveOperands>* move_operands = move->move_operands();
CHECK_EQ(1, move_operands->length());
MoveOperands* cur = &move_operands->at(0);
CHECK_EQ(op1, cur->source());
CHECK_EQ(op2, cur->destination());
CHECK_EQ(1u, move->size());
MoveOperands* cur = move->at(0);
CHECK(op1 == cur->source());
CHECK(op2 == cur->destination());
}
}
......@@ -309,15 +308,15 @@ TEST(InstructionOperands) {
CHECK(k == m->TempCount());
for (size_t z = 0; z < i; z++) {
CHECK(outputs[z].Equals(m->OutputAt(z)));
CHECK(outputs[z] == *m->OutputAt(z));
}
for (size_t z = 0; z < j; z++) {
CHECK(inputs[z].Equals(m->InputAt(z)));
CHECK(inputs[z] == *m->InputAt(z));
}
for (size_t z = 0; z < k; z++) {
CHECK(temps[z].Equals(m->TempAt(z)));
CHECK(temps[z] == *m->TempAt(z));
}
}
}
......
......@@ -59,15 +59,13 @@ class TestCode : public HandleAndZoneScope {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, RegisterOperand::New(main_zone(), 13),
RegisterOperand::New(main_zone(), 13));
AddGapMove(index, RegisterOperand(13), RegisterOperand(13));
}
void NonRedundantMoves() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, ConstantOperand::New(main_zone(), 11),
RegisterOperand::New(main_zone(), 11));
AddGapMove(index, ConstantOperand(11), RegisterOperand(11));
}
void Other() {
Start();
......@@ -95,10 +93,11 @@ class TestCode : public HandleAndZoneScope {
CHECK(current_ == NULL);
Start(true);
}
void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to) {
void AddGapMove(int index, const InstructionOperand& from,
const InstructionOperand& to) {
sequence_.InstructionAt(index)
->GetOrCreateParallelMove(Instruction::START, main_zone())
->AddMove(from, to, main_zone());
->AddMove(from, to);
}
};
......
......@@ -16,26 +16,24 @@ class MoveOptimizerTest : public InstructionSequenceTest {
void AddMove(Instruction* instr, TestOperand from, TestOperand to,
Instruction::GapPosition pos = Instruction::START) {
auto parallel_move = instr->GetOrCreateParallelMove(pos, zone());
parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to), zone());
parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to));
}
int NonRedundantSize(ParallelMove* move) {
int NonRedundantSize(ParallelMove* moves) {
int i = 0;
auto ops = move->move_operands();
for (auto op = ops->begin(); op != ops->end(); ++op) {
if (op->IsRedundant()) continue;
for (auto move : *moves) {
if (move->IsRedundant()) continue;
i++;
}
return i;
}
bool Contains(ParallelMove* move, TestOperand from_op, TestOperand to_op) {
bool Contains(ParallelMove* moves, TestOperand from_op, TestOperand to_op) {
auto from = ConvertMoveArg(from_op);
auto to = ConvertMoveArg(to_op);
auto ops = move->move_operands();
for (auto op = ops->begin(); op != ops->end(); ++op) {
if (op->IsRedundant()) continue;
if (op->source()->Equals(from) && op->destination()->Equals(to)) {
for (auto move : *moves) {
if (move->IsRedundant()) continue;
if (move->source() == from && move->destination() == to) {
return true;
}
}
......@@ -62,22 +60,22 @@ class MoveOptimizerTest : public InstructionSequenceTest {
}
private:
InstructionOperand* ConvertMoveArg(TestOperand op) {
InstructionOperand ConvertMoveArg(TestOperand op) {
CHECK_EQ(kNoValue, op.vreg_.value_);
CHECK_NE(kNoValue, op.value_);
switch (op.type_) {
case kConstant:
return ConstantOperand::New(zone(), op.value_);
return ConstantOperand(op.value_);
case kFixedSlot:
return StackSlotOperand::New(zone(), op.value_);
return StackSlotOperand(op.value_);
case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers());
return RegisterOperand::New(zone(), op.value_);
return RegisterOperand(op.value_);
default:
break;
}
CHECK(false);
return nullptr;
return InstructionOperand();
}
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment