Commit e39750a2 authored by dcarney's avatar dcarney Committed by Commit bot

[turbofan] smash GapInstruction into Instruction

R=titzer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1041163002

Cr-Commit-Position: refs/heads/master@{#27538}
parent e9e8ac7a
......@@ -186,10 +186,8 @@ void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
void CodeGenerator::AssembleInstruction(Instruction* instr) {
if (instr->IsGapMoves()) {
// Handle parallel moves associated with the gap instruction.
AssembleGap(GapInstruction::cast(instr));
} else if (instr->IsSourcePosition()) {
AssembleGaps(instr);
if (instr->IsSourcePosition()) {
AssembleSourcePosition(SourcePositionInstruction::cast(instr));
} else {
// Assemble architecture-specific code for the instruction.
......@@ -258,13 +256,13 @@ void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
}
void CodeGenerator::AssembleGap(GapInstruction* instr) {
for (int i = GapInstruction::FIRST_INNER_POSITION;
i <= GapInstruction::LAST_INNER_POSITION; i++) {
GapInstruction::InnerPosition inner_pos =
static_cast<GapInstruction::InnerPosition>(i);
void CodeGenerator::AssembleGaps(Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
static_cast<Instruction::GapPosition>(i);
ParallelMove* move = instr->GetParallelMove(inner_pos);
if (move != NULL) resolver()->Resolve(move);
if (move != nullptr) resolver()->Resolve(move);
}
}
......
......@@ -61,7 +61,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
// Assemble code for the specified instruction.
void AssembleInstruction(Instruction* instr);
void AssembleSourcePosition(SourcePositionInstruction* instr);
void AssembleGap(GapInstruction* gap);
void AssembleGaps(Instruction* instr);
// ===========================================================================
// ============= Architecture-specific code generation methods. ==============
......
......@@ -593,10 +593,12 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
if (instruction_block->code_start() >= 0) {
int first_index = instruction_block->first_instruction_index();
int last_index = instruction_block->last_instruction_index();
PrintIntProperty("first_lir_id", LifetimePosition::FromInstructionIndex(
first_index).Value());
PrintIntProperty("last_lir_id", LifetimePosition::FromInstructionIndex(
last_index).Value());
PrintIntProperty(
"first_lir_id",
LifetimePosition::GapFromInstructionIndex(first_index).Value());
PrintIntProperty("last_lir_id",
LifetimePosition::InstructionFromInstructionIndex(
last_index).Value());
}
{
......
......@@ -110,7 +110,10 @@ Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
TempCountField::encode(0) | IsCallField::encode(false)),
pointer_map_(NULL) {}
pointer_map_(NULL) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
}
Instruction::Instruction(InstructionCode opcode, size_t output_count,
......@@ -123,6 +126,8 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
TempCountField::encode(temp_count) |
IsCallField::encode(false)),
pointer_map_(NULL) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
size_t offset = 0;
for (size_t i = 0; i < output_count; ++i) {
DCHECK(!outputs[i].IsInvalid());
......@@ -139,11 +144,12 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
}
bool GapInstruction::IsRedundant() const {
for (int i = GapInstruction::FIRST_INNER_POSITION;
i <= GapInstruction::LAST_INNER_POSITION; i++) {
if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant())
bool Instruction::AreMovesRedundant() const {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
if (parallel_moves_[i] != nullptr && !parallel_moves_[i]->IsRedundant()) {
return false;
}
}
return true;
}
......@@ -289,6 +295,19 @@ std::ostream& operator<<(std::ostream& os,
const Instruction& instr = *printable.instr_;
PrintableInstructionOperand printable_op = {printable.register_configuration_,
NULL};
os << "gap ";
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
os << "(";
if (instr.parallel_moves()[i] != NULL) {
PrintableParallelMove ppm = {printable.register_configuration_,
instr.parallel_moves()[i]};
os << ppm;
}
os << ") ";
}
os << "\n ";
if (instr.OutputCount() > 1) os << "(";
for (size_t i = 0; i < instr.OutputCount(); i++) {
if (i > 0) os << ", ";
......@@ -299,20 +318,7 @@ std::ostream& operator<<(std::ostream& os,
if (instr.OutputCount() > 1) os << ") = ";
if (instr.OutputCount() == 1) os << " = ";
if (instr.IsGapMoves()) {
const GapInstruction* gap = GapInstruction::cast(&instr);
os << "gap ";
for (int i = GapInstruction::FIRST_INNER_POSITION;
i <= GapInstruction::LAST_INNER_POSITION; i++) {
os << "(";
if (gap->parallel_moves_[i] != NULL) {
PrintableParallelMove ppm = {printable.register_configuration_,
gap->parallel_moves_[i]};
os << ppm;
}
os << ") ";
}
} else if (instr.IsSourcePosition()) {
if (instr.IsSourcePosition()) {
const SourcePositionInstruction* pos =
SourcePositionInstruction::cast(&instr);
os << "position (" << pos->source_position().raw() << ")";
......@@ -494,9 +500,9 @@ int InstructionSequence::NextVirtualRegister() {
}
GapInstruction* InstructionSequence::GetBlockStart(RpoNumber rpo) const {
Instruction* InstructionSequence::GetBlockStart(RpoNumber rpo) const {
const InstructionBlock* block = InstructionBlockAt(rpo);
return GapInstruction::cast(InstructionAt(block->code_start()));
return InstructionAt(block->code_start());
}
......@@ -522,8 +528,6 @@ void InstructionSequence::EndBlock(RpoNumber rpo) {
int InstructionSequence::AddInstruction(Instruction* instr) {
GapInstruction* gap = GapInstruction::New(zone());
instructions_.push_back(gap);
int index = static_cast<int>(instructions_.size());
instructions_.push_back(instr);
if (instr->NeedsPointerMap()) {
......@@ -571,13 +575,6 @@ void InstructionSequence::MarkAsDouble(int virtual_register) {
}
void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
InstructionOperand* to) {
GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove(
from, to, zone());
}
InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
FrameStateDescriptor* descriptor) {
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
......
......@@ -25,8 +25,7 @@ namespace compiler {
class Schedule;
// A couple of reserved opcodes are used for internal use.
const InstructionCode kGapInstruction = -1;
const InstructionCode kSourcePositionInstruction = -2;
const InstructionCode kSourcePositionInstruction = -1;
#define INSTRUCTION_OPERAND_LIST(V) \
V(Constant, CONSTANT) \
......@@ -543,7 +542,6 @@ class Instruction {
bool NeedsPointerMap() const { return IsCall(); }
bool HasPointerMap() const { return pointer_map_ != NULL; }
bool IsGapMoves() const { return opcode() == kGapInstruction; }
bool IsSourcePosition() const {
return opcode() == kSourcePositionInstruction;
}
......@@ -570,8 +568,37 @@ class Instruction {
OutputCount() == 0 && TempCount() == 0;
}
enum GapPosition {
START,
END,
FIRST_GAP_POSITION = START,
LAST_GAP_POSITION = END
};
ParallelMove* GetOrCreateParallelMove(GapPosition pos, Zone* zone) {
if (parallel_moves_[pos] == nullptr) {
parallel_moves_[pos] = new (zone) ParallelMove(zone);
}
return parallel_moves_[pos];
}
ParallelMove* GetParallelMove(GapPosition pos) {
return parallel_moves_[pos];
}
const ParallelMove* GetParallelMove(GapPosition pos) const {
return parallel_moves_[pos];
}
bool AreMovesRedundant() const;
ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
protected:
explicit Instruction(InstructionCode opcode);
private:
Instruction(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, size_t temp_count,
......@@ -584,6 +611,7 @@ class Instruction {
InstructionCode opcode_;
uint32_t bit_field_;
ParallelMove* parallel_moves_[2];
PointerMap* pointer_map_;
InstructionOperand operands_[1];
......@@ -599,65 +627,6 @@ struct PrintableInstruction {
std::ostream& operator<<(std::ostream& os, const PrintableInstruction& instr);
// Represents moves inserted before an instruction due to register allocation.
// TODO(titzer): squash GapInstruction back into Instruction, since essentially
// every instruction can possibly have moves inserted before it.
class GapInstruction : public Instruction {
public:
enum InnerPosition {
START,
END,
FIRST_INNER_POSITION = START,
LAST_INNER_POSITION = END
};
ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
if (parallel_moves_[pos] == NULL) {
parallel_moves_[pos] = new (zone) ParallelMove(zone);
}
return parallel_moves_[pos];
}
ParallelMove* GetParallelMove(InnerPosition pos) {
return parallel_moves_[pos];
}
const ParallelMove* GetParallelMove(InnerPosition pos) const {
return parallel_moves_[pos];
}
bool IsRedundant() const;
ParallelMove** parallel_moves() { return parallel_moves_; }
static GapInstruction* New(Zone* zone) {
void* buffer = zone->New(sizeof(GapInstruction));
return new (buffer) GapInstruction(kGapInstruction);
}
static GapInstruction* cast(Instruction* instr) {
DCHECK(instr->IsGapMoves());
return static_cast<GapInstruction*>(instr);
}
static const GapInstruction* cast(const Instruction* instr) {
DCHECK(instr->IsGapMoves());
return static_cast<const GapInstruction*>(instr);
}
protected:
explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
parallel_moves_[START] = NULL;
parallel_moves_[END] = NULL;
}
private:
friend std::ostream& operator<<(std::ostream& os,
const PrintableInstruction& instr);
ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
};
class SourcePositionInstruction FINAL : public Instruction {
public:
static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
......@@ -982,19 +951,13 @@ class InstructionSequence FINAL : public ZoneObject {
void MarkAsReference(int virtual_register);
void MarkAsDouble(int virtual_register);
void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
GapInstruction* GetBlockStart(RpoNumber rpo) const;
Instruction* GetBlockStart(RpoNumber rpo) const;
typedef InstructionDeque::const_iterator const_iterator;
const_iterator begin() const { return instructions_.begin(); }
const_iterator end() const { return instructions_.end(); }
const InstructionDeque& instructions() const { return instructions_; }
GapInstruction* GapAt(int index) const {
return GapInstruction::cast(InstructionAt(index));
}
bool IsGapAt(int index) const { return InstructionAt(index)->IsGapMoves(); }
Instruction* InstructionAt(int index) const {
DCHECK(index >= 0);
DCHECK(index < static_cast<int>(instructions_.size()));
......
......@@ -76,10 +76,10 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
RpoNumber fw = block->rpo_number();
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
if (instr->IsGapMoves() && GapInstruction::cast(instr)->IsRedundant()) {
// skip redundant gap moves.
TRACE(" nop gap\n");
continue;
if (!instr->AreMovesRedundant()) {
// can't skip instructions with non redundant moves.
TRACE(" parallel move\n");
fallthru = false;
} else if (instr->IsSourcePosition()) {
// skip source positions.
TRACE(" src pos\n");
......
......@@ -16,15 +16,14 @@ typedef ZoneSet<InstructionOperand> OperandSet;
bool GapsCanMoveOver(Instruction* instr) {
DCHECK(!instr->IsGapMoves());
return instr->IsSourcePosition() || instr->IsNop();
}
int FindFirstNonEmptySlot(GapInstruction* gap) {
int i = GapInstruction::FIRST_INNER_POSITION;
for (; i <= GapInstruction::LAST_INNER_POSITION; i++) {
auto move = gap->parallel_moves()[i];
int FindFirstNonEmptySlot(Instruction* instr) {
int i = Instruction::FIRST_GAP_POSITION;
for (; i <= Instruction::LAST_GAP_POSITION; i++) {
auto move = instr->parallel_moves()[i];
if (move == nullptr) continue;
auto move_ops = move->move_operands();
auto op = move_ops->begin();
......@@ -97,52 +96,45 @@ void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
void MoveOptimizer::CompressBlock(InstructionBlock* block) {
auto temp_vector = temp_vector_0();
DCHECK(temp_vector.empty());
GapInstruction* prev_gap = nullptr;
Instruction* prev_instr = nullptr;
for (int index = block->code_start(); index < block->code_end(); ++index) {
auto instr = code()->instructions()[index];
if (!instr->IsGapMoves()) {
if (GapsCanMoveOver(instr)) continue;
if (prev_gap != nullptr) to_finalize_.push_back(prev_gap);
prev_gap = nullptr;
continue;
}
auto gap = GapInstruction::cast(instr);
int i = FindFirstNonEmptySlot(gap);
// Nothing to do here.
if (i == GapInstruction::LAST_INNER_POSITION + 1) {
if (prev_gap != nullptr) {
// Slide prev_gap down so we always know where to look for it.
std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
prev_gap = gap;
int i = FindFirstNonEmptySlot(instr);
if (i <= Instruction::LAST_GAP_POSITION) {
// Move the first non-empty gap to position 0.
std::swap(instr->parallel_moves()[0], instr->parallel_moves()[i]);
auto left = instr->parallel_moves()[0];
// Compress everything into position 0.
for (++i; i <= Instruction::LAST_GAP_POSITION; ++i) {
auto move = instr->parallel_moves()[i];
if (move == nullptr) continue;
CompressMoves(&temp_vector, left, move);
}
if (prev_instr != nullptr) {
// Smash left into prev_instr, killing left.
auto pred_moves = prev_instr->parallel_moves()[0];
CompressMoves(&temp_vector, pred_moves, left);
}
continue;
}
// Move the first non-empty gap to position 0.
std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]);
auto left = gap->parallel_moves()[0];
// Compress everything into position 0.
for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) {
auto move = gap->parallel_moves()[i];
if (move == nullptr) continue;
CompressMoves(&temp_vector, left, move);
if (prev_instr != nullptr) {
// Slide prev_instr down so we always know where to look for it.
std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
}
if (prev_gap != nullptr) {
// Smash left into prev_gap, killing left.
auto pred_moves = prev_gap->parallel_moves()[0];
CompressMoves(&temp_vector, pred_moves, left);
// Slide prev_gap down so we always know where to look for it.
std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
if (GapsCanMoveOver(instr)) continue;
if (prev_instr != nullptr) {
to_finalize_.push_back(prev_instr);
prev_instr = nullptr;
}
prev_gap = gap;
}
if (prev_gap != nullptr) to_finalize_.push_back(prev_gap);
if (prev_instr != nullptr) {
to_finalize_.push_back(prev_instr);
}
}
GapInstruction* MoveOptimizer::LastGap(InstructionBlock* block) {
int gap_index = block->last_instruction_index() - 1;
auto instr = code()->instructions()[gap_index];
return GapInstruction::cast(instr);
Instruction* MoveOptimizer::LastInstruction(InstructionBlock* block) {
return code()->instructions()[block->last_instruction_index()];
}
......@@ -153,7 +145,6 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
for (auto pred_index : block->predecessors()) {
auto pred = code()->InstructionBlockAt(pred_index);
auto last_instr = code()->instructions()[pred->last_instruction_index()];
DCHECK(!last_instr->IsGapMoves());
if (last_instr->IsSourcePosition()) continue;
if (last_instr->IsCall()) return;
if (last_instr->TempCount() != 0) return;
......@@ -169,12 +160,12 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
// Accumulate set of shared moves.
for (auto pred_index : block->predecessors()) {
auto pred = code()->InstructionBlockAt(pred_index);
auto gap = LastGap(pred);
if (gap->parallel_moves()[0] == nullptr ||
gap->parallel_moves()[0]->move_operands()->is_empty()) {
auto instr = LastInstruction(pred);
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->move_operands()->is_empty()) {
return;
}
auto move_ops = gap->parallel_moves()[0]->move_operands();
auto move_ops = instr->parallel_moves()[0]->move_operands();
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
auto src = *op->source();
......@@ -191,34 +182,30 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
}
if (move_map.empty() || correct_counts != move_map.size()) return;
// Find insertion point.
GapInstruction* gap = nullptr;
Instruction* instr = nullptr;
for (int i = block->first_instruction_index();
i <= block->last_instruction_index(); ++i) {
auto instr = code()->instructions()[i];
if (instr->IsGapMoves()) {
gap = GapInstruction::cast(instr);
continue;
}
if (!GapsCanMoveOver(instr)) break;
instr = code()->instructions()[i];
if (!GapsCanMoveOver(instr) || !instr->AreMovesRedundant()) break;
}
DCHECK(gap != nullptr);
DCHECK(instr != nullptr);
bool gap_initialized = true;
if (gap->parallel_moves()[0] == nullptr ||
gap->parallel_moves()[0]->move_operands()->is_empty()) {
to_finalize_.push_back(gap);
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->move_operands()->is_empty()) {
to_finalize_.push_back(instr);
} else {
// Will compress after insertion.
gap_initialized = false;
std::swap(gap->parallel_moves()[0], gap->parallel_moves()[1]);
std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
}
auto move = gap->GetOrCreateParallelMove(
static_cast<GapInstruction::InnerPosition>(0), code_zone());
auto move = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(0), code_zone());
// Delete relevant entries in predecessors and move everything to block.
bool first_iteration = true;
for (auto pred_index : block->predecessors()) {
auto pred = code()->InstructionBlockAt(pred_index);
auto gap = LastGap(pred);
auto move_ops = gap->parallel_moves()[0]->move_operands();
auto instr = LastInstruction(pred);
auto move_ops = instr->parallel_moves()[0]->move_operands();
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
MoveKey key = {*op->source(), *op->destination()};
......@@ -234,20 +221,20 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
}
// Compress.
if (!gap_initialized) {
CompressMoves(&temp_vector_0(), gap->parallel_moves()[0],
gap->parallel_moves()[1]);
CompressMoves(&temp_vector_0(), instr->parallel_moves()[0],
instr->parallel_moves()[1]);
}
}
// Split multiple loads of the same constant or stack slot off into the second
// slot and keep remaining moves in the first slot.
void MoveOptimizer::FinalizeMoves(GapInstruction* gap) {
void MoveOptimizer::FinalizeMoves(Instruction* instr) {
auto loads = temp_vector_0();
DCHECK(loads.empty());
auto new_moves = temp_vector_1();
DCHECK(new_moves.empty());
auto move_ops = gap->parallel_moves()[0]->move_operands();
auto move_ops = instr->parallel_moves()[0]->move_operands();
for (auto move = move_ops->begin(); move != move_ops->end(); ++move) {
if (move->IsRedundant()) {
move->Eliminate();
......@@ -294,8 +281,8 @@ void MoveOptimizer::FinalizeMoves(GapInstruction* gap) {
loads.clear();
if (new_moves.empty()) return;
// Insert all new moves into slot 1.
auto slot_1 = gap->GetOrCreateParallelMove(
static_cast<GapInstruction::InnerPosition>(1), code_zone());
auto slot_1 = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(1), code_zone());
DCHECK(slot_1->move_operands()->is_empty());
slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr),
static_cast<int>(new_moves.size()),
......
......@@ -19,7 +19,7 @@ class MoveOptimizer FINAL {
private:
typedef ZoneVector<MoveOperands*> MoveOpVector;
typedef ZoneVector<GapInstruction*> GapInstructions;
typedef ZoneVector<Instruction*> Instructions;
InstructionSequence* code() const { return code_; }
Zone* local_zone() const { return local_zone_; }
......@@ -30,13 +30,13 @@ class MoveOptimizer FINAL {
void CompressBlock(InstructionBlock* blocke);
void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
ParallelMove* right);
GapInstruction* LastGap(InstructionBlock* block);
Instruction* LastInstruction(InstructionBlock* block);
void OptimizeMerge(InstructionBlock* block);
void FinalizeMoves(GapInstruction* gap);
void FinalizeMoves(Instruction* instr);
Zone* const local_zone_;
InstructionSequence* const code_;
GapInstructions to_finalize_;
Instructions to_finalize_;
MoveOpVector temp_vector_0_;
MoveOpVector temp_vector_1_;
......
......@@ -15,12 +15,12 @@ static size_t OperandCount(const Instruction* instr) {
}
static void VerifyGapEmpty(const GapInstruction* gap) {
for (int i = GapInstruction::FIRST_INNER_POSITION;
i <= GapInstruction::LAST_INNER_POSITION; i++) {
GapInstruction::InnerPosition inner_pos =
static_cast<GapInstruction::InnerPosition>(i);
CHECK(!gap->GetParallelMove(inner_pos));
static void VerifyEmptyGaps(const Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
static_cast<Instruction::GapPosition>(i);
CHECK(instr->GetParallelMove(inner_pos) == nullptr);
}
}
......@@ -60,6 +60,8 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
// Construct OperandConstraints for all InstructionOperands, eliminating
// kSameAsFirst along the way.
for (const auto* instr : sequence->instructions()) {
// All gaps should be totally unallocated at this point.
VerifyEmptyGaps(instr);
const size_t operand_count = OperandCount(instr);
auto* op_constraints = zone->NewArray<OperandConstraint>(operand_count);
size_t count = 0;
......@@ -80,11 +82,6 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
}
VerifyOutput(op_constraints[count]);
}
// All gaps should be totally unallocated at this point.
if (instr->IsGapMoves()) {
CHECK(operand_count == 0);
VerifyGapEmpty(GapInstruction::cast(instr));
}
InstructionConstraint instr_constraint = {instr, operand_count,
op_constraints};
constraints()->push_back(instr_constraint);
......@@ -329,11 +326,11 @@ class OperandMap : public ZoneObject {
map().insert(to_insert.begin(), to_insert.end());
}
void RunGapInstruction(Zone* zone, const GapInstruction* gap) {
for (int i = GapInstruction::FIRST_INNER_POSITION;
i <= GapInstruction::LAST_INNER_POSITION; i++) {
auto inner_pos = static_cast<GapInstruction::InnerPosition>(i);
auto move = gap->GetParallelMove(inner_pos);
void RunGaps(Zone* zone, const Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
auto inner_pos = static_cast<Instruction::GapPosition>(i);
auto move = instr->GetParallelMove(inner_pos);
if (move == nullptr) continue;
RunParallelMoves(zone, move);
}
......@@ -648,11 +645,7 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
++instr_index) {
const auto& instr_constraint = constraints_[instr_index];
const auto instr = instr_constraint.instruction_;
if (instr->IsSourcePosition()) continue;
if (instr->IsGapMoves()) {
current->RunGapInstruction(zone(), GapInstruction::cast(instr));
continue;
}
current->RunGaps(zone(), instr);
const auto op_constraints = instr_constraint.operand_constraints_;
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
......
This diff is collapsed.
......@@ -20,57 +20,84 @@ enum RegisterKind {
// This class represents a single point of a InstructionOperand's lifetime. For
// each instruction there are exactly two lifetime positions: the beginning and
// the end of the instruction. Lifetime positions for different instructions are
// disjoint.
// each instruction there are four lifetime positions:
//
// [[START, END], [START, END]]
//
// Where the first half position corresponds to
//
// [GapPosition::START, GapPosition::END]
//
// and the second half position corresponds to
//
// [Lifetime::USED_AT_START, Lifetime::USED_AT_END]
//
class LifetimePosition FINAL {
public:
// Return the lifetime position that corresponds to the beginning of
// the instruction with the given index.
static LifetimePosition FromInstructionIndex(int index) {
// the gap with the given index.
static LifetimePosition GapFromInstructionIndex(int index) {
return LifetimePosition(index * kStep);
}
// Return the lifetime position that corresponds to the beginning of
// the instruction with the given index.
static LifetimePosition InstructionFromInstructionIndex(int index) {
return LifetimePosition(index * kStep + kHalfStep);
}
// Returns a numeric representation of this lifetime position.
int Value() const { return value_; }
// Returns the index of the instruction to which this lifetime position
// corresponds.
int InstructionIndex() const {
int ToInstructionIndex() const {
DCHECK(IsValid());
return value_ / kStep;
}
// Returns true if this lifetime position corresponds to the instruction
// start.
bool IsInstructionStart() const { return (value_ & (kStep - 1)) == 0; }
// Returns true if this lifetime position corresponds to a START value
bool IsStart() const { return (value_ & (kHalfStep - 1)) == 0; }
// Returns true if this lifetime position corresponds to a gap START value
bool IsFullStart() const { return (value_ & (kStep - 1)) == 0; }
bool IsGapPosition() { return (value_ & 0x2) == 0; }
bool IsInstructionPosition() { return !IsGapPosition(); }
// Returns the lifetime position for the current START.
LifetimePosition Start() const {
DCHECK(IsValid());
return LifetimePosition(value_ & ~(kHalfStep - 1));
}
// Returns the lifetime position for the start of the instruction which
// corresponds to this lifetime position.
LifetimePosition InstructionStart() const {
// Returns the lifetime position for the current gap START.
LifetimePosition FullStart() const {
DCHECK(IsValid());
return LifetimePosition(value_ & ~(kStep - 1));
}
// Returns the lifetime position for the end of the instruction which
// corresponds to this lifetime position.
LifetimePosition InstructionEnd() const {
// Returns the lifetime position for the current END.
LifetimePosition End() const {
DCHECK(IsValid());
return LifetimePosition(InstructionStart().Value() + kStep / 2);
return LifetimePosition(Start().Value() + kHalfStep / 2);
}
// Returns the lifetime position for the beginning of the next instruction.
LifetimePosition NextInstruction() const {
// Returns the lifetime position for the beginning of the next START.
LifetimePosition NextStart() const {
DCHECK(IsValid());
return LifetimePosition(InstructionStart().Value() + kStep);
return LifetimePosition(Start().Value() + kHalfStep);
}
// Returns the lifetime position for the beginning of the previous
// instruction.
LifetimePosition PrevInstruction() const {
// Returns the lifetime position for the beginning of the next gap START.
LifetimePosition NextFullStart() const {
DCHECK(IsValid());
return LifetimePosition(FullStart().Value() + kStep);
}
// Returns the lifetime position for the beginning of the previous START.
LifetimePosition PrevStart() const {
DCHECK(IsValid());
DCHECK(value_ > 1);
return LifetimePosition(InstructionStart().Value() - kStep);
DCHECK(value_ >= kHalfStep);
return LifetimePosition(Start().Value() - kHalfStep);
}
// Constructs the lifetime position which does not correspond to any
......@@ -90,10 +117,11 @@ class LifetimePosition FINAL {
}
private:
static const int kStep = 2;
static const int kHalfStep = 2;
static const int kStep = 2 * kHalfStep;
// Code relies on kStep being a power of two.
STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
// Code relies on kStep and kHalfStep being a power of two.
STATIC_ASSERT(IS_POWER_OF_TWO(kHalfStep));
explicit LifetimePosition(int value) : value_(value) {}
......@@ -495,8 +523,8 @@ class RegisterAllocator FINAL : public ZoneObject {
bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
void ProcessInstructions(const InstructionBlock* block, BitVector* live);
void MeetRegisterConstraints(const InstructionBlock* block);
void MeetConstraintsBetween(Instruction* first, Instruction* second,
int gap_index);
void MeetConstraintsBefore(int index);
void MeetConstraintsAfter(int index);
void MeetRegisterConstraintsForLastInstructionInBlock(
const InstructionBlock* block);
void ResolvePhis(const InstructionBlock* block);
......@@ -509,7 +537,7 @@ class RegisterAllocator FINAL : public ZoneObject {
InstructionOperand* hint);
void Use(LifetimePosition block_start, LifetimePosition position,
InstructionOperand* operand, InstructionOperand* hint);
void AddGapMove(int index, GapInstruction::InnerPosition position,
void AddGapMove(int index, Instruction::GapPosition position,
InstructionOperand* from, InstructionOperand* to);
// Helper methods for updating the life range lists.
......@@ -590,7 +618,7 @@ class RegisterAllocator FINAL : public ZoneObject {
LiveRange* FixedLiveRangeFor(int index);
LiveRange* FixedDoubleLiveRangeFor(int index);
LiveRange* LiveRangeFor(int index);
GapInstruction* GetLastGap(const InstructionBlock* block);
Instruction* GetLastInstruction(const InstructionBlock* block);
const char* RegisterName(int allocation_index);
......
......@@ -206,10 +206,7 @@ TEST(InstructionIsGapAt) {
R.code->AddInstruction(g);
R.code->EndBlock(R.RpoFor(b0));
CHECK(R.code->instructions().size() == 4);
for (size_t i = 0; i < R.code->instructions().size(); ++i) {
CHECK_EQ(i % 2 == 0, R.code->instructions()[i]->IsGapMoves());
}
CHECK(R.code->instructions().size() == 2);
}
......@@ -236,10 +233,7 @@ TEST(InstructionIsGapAt2) {
R.code->AddInstruction(g1);
R.code->EndBlock(R.RpoFor(b1));
CHECK(R.code->instructions().size() == 8);
for (size_t i = 0; i < R.code->instructions().size(); ++i) {
CHECK_EQ(i % 2 == 0, R.code->instructions()[i]->IsGapMoves());
}
CHECK(R.code->instructions().size() == 4);
}
......@@ -257,21 +251,15 @@ TEST(InstructionAddGapMove) {
R.code->AddInstruction(g);
R.code->EndBlock(R.RpoFor(b0));
CHECK(R.code->instructions().size() == 4);
for (size_t i = 0; i < R.code->instructions().size(); ++i) {
CHECK_EQ(i % 2 == 0, R.code->instructions()[i]->IsGapMoves());
}
int indexes[] = {0, 2, -1};
for (int i = 0; indexes[i] >= 0; i++) {
int index = indexes[i];
UnallocatedOperand* op1 = R.NewUnallocated(index + 6);
UnallocatedOperand* op2 = R.NewUnallocated(index + 12);
CHECK(R.code->instructions().size() == 2);
R.code->AddGapMove(index, op1, op2);
GapInstruction* gap = R.code->GapAt(index);
ParallelMove* move = gap->GetParallelMove(GapInstruction::START);
int index = 0;
for (auto instr : R.code->instructions()) {
UnallocatedOperand* op1 = R.NewUnallocated(index++);
UnallocatedOperand* op2 = R.NewUnallocated(index++);
instr->GetOrCreateParallelMove(TestInstr::START, R.zone())
->AddMove(op1, op2, R.zone());
ParallelMove* move = instr->GetParallelMove(TestInstr::START);
CHECK(move);
const ZoneList<MoveOperands>* move_operands = move->move_operands();
CHECK_EQ(1, move_operands->length());
......
......@@ -58,16 +58,16 @@ class TestCode : public HandleAndZoneScope {
void RedundantMoves() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 2;
sequence_.AddGapMove(index, RegisterOperand::New(13, main_zone()),
RegisterOperand::New(13, main_zone()));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, RegisterOperand::New(13, main_zone()),
RegisterOperand::New(13, main_zone()));
}
void NonRedundantMoves() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 2;
sequence_.AddGapMove(index, ImmediateOperand::New(11, main_zone()),
RegisterOperand::New(11, main_zone()));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, ImmediateOperand::New(11, main_zone()),
RegisterOperand::New(11, main_zone()));
}
void Other() {
Start();
......@@ -96,6 +96,11 @@ class TestCode : public HandleAndZoneScope {
CHECK(current_ == NULL);
Start(true);
}
void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to) {
sequence_.InstructionAt(index)
->GetOrCreateParallelMove(Instruction::START, main_zone())
->AddMove(from, to, main_zone());
}
};
......
......@@ -11,13 +11,11 @@ namespace compiler {
class MoveOptimizerTest : public InstructionSequenceTest {
public:
GapInstruction* LastGap() {
return GapInstruction::cast(*(sequence()->instructions().rbegin() + 1));
}
Instruction* LastInstruction() { return sequence()->instructions().back(); }
void AddMove(GapInstruction* gap, TestOperand from, TestOperand to,
GapInstruction::InnerPosition pos = GapInstruction::START) {
auto parallel_move = gap->GetOrCreateParallelMove(pos, zone());
void AddMove(Instruction* instr, TestOperand from, TestOperand to,
Instruction::GapPosition pos = Instruction::START) {
auto parallel_move = instr->GetOrCreateParallelMove(pos, zone());
parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to), zone());
}
......@@ -86,16 +84,16 @@ class MoveOptimizerTest : public InstructionSequenceTest {
TEST_F(MoveOptimizerTest, RemovesRedundant) {
StartBlock();
EmitNop();
AddMove(LastGap(), Reg(0), Reg(1));
EmitNop();
AddMove(LastGap(), Reg(1), Reg(0));
auto first_instr = EmitNop();
AddMove(first_instr, Reg(0), Reg(1));
auto last_instr = EmitNop();
AddMove(last_instr, Reg(1), Reg(0));
EndBlock(Last());
Optimize();
auto gap = LastGap();
auto move = gap->parallel_moves()[0];
CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0]));
auto move = last_instr->parallel_moves()[0];
CHECK_EQ(1, NonRedundantSize(move));
CHECK(Contains(move, Reg(0), Reg(1)));
}
......@@ -105,7 +103,7 @@ TEST_F(MoveOptimizerTest, SplitsConstants) {
StartBlock();
EndBlock(Last());
auto gap = LastGap();
auto gap = LastInstruction();
AddMove(gap, Const(1), Slot(0));
AddMove(gap, Const(1), Slot(1));
AddMove(gap, Const(1), Reg(0));
......@@ -131,18 +129,18 @@ TEST_F(MoveOptimizerTest, SimpleMerge) {
StartBlock();
EndBlock(Jump(2));
AddMove(LastGap(), Reg(0), Reg(1));
AddMove(LastInstruction(), Reg(0), Reg(1));
StartBlock();
EndBlock(Jump(1));
AddMove(LastGap(), Reg(0), Reg(1));
AddMove(LastInstruction(), Reg(0), Reg(1));
StartBlock();
EndBlock(Last());
Optimize();
auto move = LastGap()->parallel_moves()[0];
auto move = LastInstruction()->parallel_moves()[0];
CHECK_EQ(1, NonRedundantSize(move));
CHECK(Contains(move, Reg(0), Reg(1)));
}
......@@ -154,13 +152,13 @@ TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
StartBlock();
EndBlock(Jump(2));
auto gap_0 = LastGap();
auto gap_0 = LastInstruction();
AddMove(gap_0, Reg(0), Reg(1));
AddMove(LastGap(), Reg(1), Reg(0));
AddMove(LastInstruction(), Reg(1), Reg(0));
StartBlock();
EndBlock(Jump(1));
auto gap_1 = LastGap();
auto gap_1 = LastInstruction();
AddMove(gap_1, Reg(0), Reg(1));
AddMove(gap_1, Reg(1), Reg(0));
......@@ -169,9 +167,9 @@ TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
Optimize();
CHECK(gap_0->IsRedundant());
CHECK(gap_1->IsRedundant());
auto move = LastGap()->parallel_moves()[0];
CHECK(gap_0->AreMovesRedundant());
CHECK(gap_1->AreMovesRedundant());
auto move = LastInstruction()->parallel_moves()[0];
CHECK_EQ(2, NonRedundantSize(move));
CHECK(Contains(move, Reg(0), Reg(1)));
CHECK(Contains(move, Reg(1), Reg(0)));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment