Commit 81345f1a authored by dcarney's avatar dcarney Committed by Commit bot

Reland: [turbofan] add MachineType to AllocatedOperand

- allows the optimization of emitted gap move code since the representation of the value in the register is known
- necessary preparation for vector register allocation
- prepare for slot sharing for any value of the same byte width

TBR=jarin@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1111323003

Cr-Commit-Position: refs/heads/master@{#28140}
parent 7eccb181
......@@ -20,13 +20,11 @@ class Frame : public ZoneObject {
Frame()
: register_save_area_size_(0),
spill_slot_count_(0),
double_spill_slot_count_(0),
osr_stack_slot_count_(0),
allocated_registers_(NULL),
allocated_double_registers_(NULL) {}
inline int GetSpillSlotCount() { return spill_slot_count_; }
inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) {
DCHECK(allocated_registers_ == NULL);
......@@ -57,15 +55,13 @@ class Frame : public ZoneObject {
int GetOsrStackSlotCount() { return osr_stack_slot_count_; }
int AllocateSpillSlot(bool is_double) {
// If 32-bit, skip one if the new slot is a double.
if (is_double) {
if (kDoubleSize > kPointerSize) {
DCHECK(kDoubleSize == kPointerSize * 2);
spill_slot_count_++;
spill_slot_count_ |= 1;
}
double_spill_slot_count_++;
int AllocateSpillSlot(int width) {
DCHECK(width == 4 || width == 8);
// Skip one slot if necessary.
if (width > kPointerSize) {
DCHECK(width == kPointerSize * 2);
spill_slot_count_++;
spill_slot_count_ |= 1;
}
return spill_slot_count_++;
}
......@@ -78,7 +74,6 @@ class Frame : public ZoneObject {
private:
int register_save_area_size_;
int spill_slot_count_;
int double_spill_slot_count_;
int osr_stack_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
......
......@@ -75,7 +75,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
InstructionOperand source = move->source();
if (source == destination) {
if (source.EqualsModuloType(destination)) {
move->Eliminate();
return;
}
......
......@@ -137,7 +137,7 @@ class OperandGenerator {
UnallocatedOperand op = UnallocatedOperand(
UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
sequence()->MarkAsDouble(op.virtual_register());
sequence()->MarkAsRepresentation(kRepFloat64, op.virtual_register());
return op;
}
......
This diff is collapsed.
......@@ -146,21 +146,14 @@ class InstructionSelector final {
// will need to generate code for it.
void MarkAsUsed(Node* node);
// Checks if {node} is marked as double.
bool IsDouble(const Node* node) const;
// Inform the register allocator of a double result.
void MarkAsDouble(Node* node);
// Checks if {node} is marked as reference.
bool IsReference(const Node* node) const;
// Inform the register allocator of a reference result.
void MarkAsReference(Node* node);
// Inform the register allocation of the representation of the value produced
// by {node}.
void MarkAsRepresentation(MachineType rep, Node* node);
void MarkAsWord32(Node* node) { MarkAsRepresentation(kRepWord32, node); }
void MarkAsWord64(Node* node) { MarkAsRepresentation(kRepWord64, node); }
void MarkAsFloat32(Node* node) { MarkAsRepresentation(kRepFloat32, node); }
void MarkAsFloat64(Node* node) { MarkAsRepresentation(kRepFloat64, node); }
void MarkAsReference(Node* node) { MarkAsRepresentation(kRepTagged, node); }
// Inform the register allocation of the representation of the unallocated
// operand {op}.
......
......@@ -53,22 +53,48 @@ std::ostream& operator<<(std::ostream& os,
return os << "[immediate:" << imm.indexed_value() << "]";
}
}
case InstructionOperand::ALLOCATED:
switch (AllocatedOperand::cast(op).allocated_kind()) {
case InstructionOperand::ALLOCATED: {
auto allocated = AllocatedOperand::cast(op);
switch (allocated.allocated_kind()) {
case AllocatedOperand::STACK_SLOT:
return os << "[stack:" << StackSlotOperand::cast(op).index() << "]";
os << "[stack:" << StackSlotOperand::cast(op).index();
break;
case AllocatedOperand::DOUBLE_STACK_SLOT:
return os << "[double_stack:"
<< DoubleStackSlotOperand::cast(op).index() << "]";
os << "[double_stack:" << DoubleStackSlotOperand::cast(op).index();
break;
case AllocatedOperand::REGISTER:
return os << "["
<< conf->general_register_name(
RegisterOperand::cast(op).index()) << "|R]";
os << "["
<< conf->general_register_name(RegisterOperand::cast(op).index())
<< "|R";
break;
case AllocatedOperand::DOUBLE_REGISTER:
return os << "["
<< conf->double_register_name(
DoubleRegisterOperand::cast(op).index()) << "|R]";
os << "["
<< conf->double_register_name(
DoubleRegisterOperand::cast(op).index()) << "|R";
break;
}
switch (allocated.machine_type()) {
case kRepWord32:
os << "|w32";
break;
case kRepWord64:
os << "|w64";
break;
case kRepFloat32:
os << "|f32";
break;
case kRepFloat64:
os << "|f64";
break;
case kRepTagged:
os << "|t";
break;
default:
os << "|?";
break;
}
return os << "]";
}
case InstructionOperand::INVALID:
return os << "(x)";
}
......@@ -83,7 +109,7 @@ std::ostream& operator<<(std::ostream& os,
PrintableInstructionOperand printable_op = {printable.register_configuration_,
mo.destination()};
os << printable_op;
if (mo.source() != mo.destination()) {
if (!mo.source().Equals(mo.destination())) {
printable_op.op_ = mo.source();
os << " = " << printable_op;
}
......@@ -104,11 +130,11 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
MoveOperands* to_eliminate = nullptr;
for (auto curr : *this) {
if (curr->IsEliminated()) continue;
if (curr->destination() == move->source()) {
if (curr->destination().EqualsModuloType(move->source())) {
DCHECK(!replacement);
replacement = curr;
if (to_eliminate != nullptr) break;
} else if (curr->destination() == move->destination()) {
} else if (curr->destination().EqualsModuloType(move->destination())) {
DCHECK(!to_eliminate);
to_eliminate = curr;
if (replacement != nullptr) break;
......@@ -479,8 +505,7 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
instructions_(zone()),
next_virtual_register_(0),
reference_maps_(zone()),
doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
references_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
representations_(zone()),
deoptimization_entries_(zone()) {
block_starts_.reserve(instruction_blocks_->size());
}
......@@ -548,23 +573,48 @@ const InstructionBlock* InstructionSequence::GetInstructionBlock(
}
bool InstructionSequence::IsReference(int virtual_register) const {
return references_.find(virtual_register) != references_.end();
}
bool InstructionSequence::IsDouble(int virtual_register) const {
return doubles_.find(virtual_register) != doubles_.end();
static MachineType FilterRepresentation(MachineType rep) {
DCHECK_EQ(rep, RepresentationOf(rep));
switch (rep) {
case kRepBit:
case kRepWord8:
case kRepWord16:
return InstructionSequence::DefaultRepresentation();
case kRepWord32:
case kRepWord64:
case kRepFloat32:
case kRepFloat64:
case kRepTagged:
return rep;
default:
break;
}
UNREACHABLE();
return kMachNone;
}
void InstructionSequence::MarkAsReference(int virtual_register) {
references_.insert(virtual_register);
MachineType InstructionSequence::GetRepresentation(int virtual_register) const {
DCHECK_LE(0, virtual_register);
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
return DefaultRepresentation();
}
return representations_[virtual_register];
}
void InstructionSequence::MarkAsDouble(int virtual_register) {
doubles_.insert(virtual_register);
void InstructionSequence::MarkAsRepresentation(MachineType machine_type,
int virtual_register) {
DCHECK_LE(0, virtual_register);
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
representations_.resize(VirtualRegisterCount(), DefaultRepresentation());
}
machine_type = FilterRepresentation(machine_type);
DCHECK_IMPLIES(representations_[virtual_register] != machine_type,
representations_[virtual_register] == DefaultRepresentation());
representations_[virtual_register] = machine_type;
}
......
......@@ -50,19 +50,6 @@ class InstructionOperand {
inline bool IsStackSlot() const;
inline bool IsDoubleStackSlot() const;
// Useful for map/set keys.
bool operator<(const InstructionOperand& op) const {
return value_ < op.value_;
}
bool operator==(const InstructionOperand& op) const {
return value_ == op.value_;
}
bool operator!=(const InstructionOperand& op) const {
return value_ != op.value_;
}
template <typename SubKindOperand>
static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
void* buffer = zone->New(sizeof(op));
......@@ -74,22 +61,43 @@ class InstructionOperand {
*dest = *src;
}
bool Equals(const InstructionOperand& that) const {
return this->value_ == that.value_;
}
bool Compare(const InstructionOperand& that) const {
return this->value_ < that.value_;
}
bool EqualsModuloType(const InstructionOperand& that) const {
return this->GetValueModuloType() == that.GetValueModuloType();
}
bool CompareModuloType(const InstructionOperand& that) const {
return this->GetValueModuloType() < that.GetValueModuloType();
}
protected:
explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
inline uint64_t GetValueModuloType() const;
class KindField : public BitField64<Kind, 0, 3> {};
uint64_t value_;
};
struct PrintableInstructionOperand {
const RegisterConfiguration* register_configuration_;
InstructionOperand op_;
};
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& op);
#define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind) \
\
static OperandType* cast(InstructionOperand* op) { \
......@@ -346,6 +354,8 @@ class ImmediateOperand : public InstructionOperand {
class AllocatedOperand : public InstructionOperand {
public:
// TODO(dcarney): machine_type makes this now redundant. Just need to know is
// the operand is a slot or a register.
enum AllocatedKind {
STACK_SLOT,
DOUBLE_STACK_SLOT,
......@@ -353,10 +363,12 @@ class AllocatedOperand : public InstructionOperand {
DOUBLE_REGISTER
};
AllocatedOperand(AllocatedKind kind, int index)
AllocatedOperand(AllocatedKind kind, MachineType machine_type, int index)
: InstructionOperand(ALLOCATED) {
DCHECK_IMPLIES(kind == REGISTER || kind == DOUBLE_REGISTER, index >= 0);
DCHECK(IsSupportedMachineType(machine_type));
value_ |= AllocatedKindField::encode(kind);
value_ |= MachineTypeField::encode(machine_type);
value_ |= static_cast<int64_t>(index) << IndexField::kShift;
}
......@@ -368,14 +380,33 @@ class AllocatedOperand : public InstructionOperand {
return AllocatedKindField::decode(value_);
}
static AllocatedOperand* New(Zone* zone, AllocatedKind kind, int index) {
return InstructionOperand::New(zone, AllocatedOperand(kind, index));
MachineType machine_type() const { return MachineTypeField::decode(value_); }
static AllocatedOperand* New(Zone* zone, AllocatedKind kind,
MachineType machine_type, int index) {
return InstructionOperand::New(zone,
AllocatedOperand(kind, machine_type, index));
}
static bool IsSupportedMachineType(MachineType machine_type) {
if (RepresentationOf(machine_type) != machine_type) return false;
switch (machine_type) {
case kRepWord32:
case kRepWord64:
case kRepFloat32:
case kRepFloat64:
case kRepTagged:
return true;
default:
return false;
}
}
INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
STATIC_ASSERT(KindField::kSize == 3);
class AllocatedKindField : public BitField64<AllocatedKind, 3, 2> {};
class MachineTypeField : public BitField64<MachineType, 5, 16> {};
class IndexField : public BitField64<int32_t, 35, 29> {};
};
......@@ -400,14 +431,17 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_IS)
#undef ALLOCATED_OPERAND_IS
// TODO(dcarney): these subkinds are now pretty useless, nuke.
#define ALLOCATED_OPERAND_CLASS(SubKind, kOperandKind) \
class SubKind##Operand final : public AllocatedOperand { \
public: \
explicit SubKind##Operand(int index) \
: AllocatedOperand(kOperandKind, index) {} \
explicit SubKind##Operand(MachineType machine_type, int index) \
: AllocatedOperand(kOperandKind, machine_type, index) {} \
\
static SubKind##Operand* New(Zone* zone, int index) { \
return InstructionOperand::New(zone, SubKind##Operand(index)); \
static SubKind##Operand* New(Zone* zone, MachineType machine_type, \
int index) { \
return InstructionOperand::New(zone, \
SubKind##Operand(machine_type, index)); \
} \
\
static SubKind##Operand* cast(InstructionOperand* op) { \
......@@ -429,6 +463,24 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_CLASS)
#undef ALLOCATED_OPERAND_CLASS
uint64_t InstructionOperand::GetValueModuloType() const {
if (IsAllocated()) {
// TODO(dcarney): put machine type last and mask.
return AllocatedOperand::MachineTypeField::update(this->value_, kMachNone);
}
return this->value_;
}
// Required for maps that don't care about machine type.
struct CompareOperandModuloType {
bool operator()(const InstructionOperand& a,
const InstructionOperand& b) const {
return a.CompareModuloType(b);
}
};
class MoveOperands final : public ZoneObject {
public:
MoveOperands(const InstructionOperand& source,
......@@ -456,14 +508,14 @@ class MoveOperands final : public ZoneObject {
// True if this move a move into the given destination operand.
bool Blocks(const InstructionOperand& operand) const {
return !IsEliminated() && source() == operand;
return !IsEliminated() && source().EqualsModuloType(operand);
}
// A move is redundant if it's been eliminated or if its source and
// destination are the same.
bool IsRedundant() const {
DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
return IsEliminated() || source_ == destination_;
return IsEliminated() || source_.EqualsModuloType(destination_);
}
// We clear both operands to indicate move that's been eliminated.
......@@ -551,7 +603,7 @@ class ReferenceMap final : public ZoneObject {
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
class Instruction {
class Instruction final {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
const InstructionOperand* OutputAt(size_t i) const {
......@@ -676,10 +728,9 @@ class Instruction {
ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
protected:
private:
explicit Instruction(InstructionCode opcode);
private:
Instruction(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, size_t temp_count,
......@@ -696,7 +747,6 @@ class Instruction {
ReferenceMap* reference_map_;
InstructionOperand operands_[1];
private:
DISALLOW_COPY_AND_ASSIGN(Instruction);
};
......@@ -1004,11 +1054,24 @@ class InstructionSequence final : public ZoneObject {
const InstructionBlock* GetInstructionBlock(int instruction_index) const;
bool IsReference(int virtual_register) const;
bool IsDouble(int virtual_register) const;
static MachineType DefaultRepresentation() {
return kPointerSize == 8 ? kRepWord64 : kRepWord32;
}
MachineType GetRepresentation(int virtual_register) const;
void MarkAsRepresentation(MachineType machine_type, int virtual_register);
void MarkAsReference(int virtual_register);
void MarkAsDouble(int virtual_register);
bool IsReference(int virtual_register) const {
return GetRepresentation(virtual_register) == kRepTagged;
}
bool IsFloat(int virtual_register) const {
switch (GetRepresentation(virtual_register)) {
case kRepFloat32:
case kRepFloat64:
return true;
default:
return false;
}
}
Instruction* GetBlockStart(RpoNumber rpo) const;
......@@ -1111,8 +1174,7 @@ class InstructionSequence final : public ZoneObject {
InstructionDeque instructions_;
int next_virtual_register_;
ReferenceMapDeque reference_maps_;
VirtualRegisterSet doubles_;
VirtualRegisterSet references_;
ZoneVector<MachineType> representations_;
DeoptimizationVector deoptimization_entries_;
DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
......
......@@ -11,8 +11,18 @@ namespace compiler {
namespace {
typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
typedef ZoneMap<MoveKey, unsigned> MoveMap;
typedef ZoneSet<InstructionOperand> OperandSet;
struct MoveKeyCompare {
bool operator()(const MoveKey& a, const MoveKey& b) const {
if (a.first.EqualsModuloType(b.first)) {
return a.second.CompareModuloType(b.second);
}
return a.first.CompareModuloType(b.first);
}
};
typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); }
......@@ -224,10 +234,12 @@ bool IsSlot(const InstructionOperand& op) {
bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
if (a->source() != b->source()) return a->source() < b->source();
if (!a->source().EqualsModuloType(b->source())) {
return a->source().CompareModuloType(b->source());
}
if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
return a->destination() < b->destination();
return a->destination().CompareModuloType(b->destination());
}
} // namespace
......@@ -252,7 +264,8 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
MoveOperands* group_begin = nullptr;
for (auto load : loads) {
// New group.
if (group_begin == nullptr || load->source() != group_begin->source()) {
if (group_begin == nullptr ||
!load->source().EqualsModuloType(group_begin->source())) {
group_begin = load;
continue;
}
......
......@@ -163,7 +163,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
CHECK(false);
break;
case UnallocatedOperand::NONE:
if (sequence()->IsDouble(vreg)) {
if (sequence()->IsFloat(vreg)) {
constraint->type_ = kNoneDouble;
} else {
constraint->type_ = kNone;
......@@ -178,14 +178,14 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->value_ = unallocated->fixed_register_index();
break;
case UnallocatedOperand::MUST_HAVE_REGISTER:
if (sequence()->IsDouble(vreg)) {
if (sequence()->IsFloat(vreg)) {
constraint->type_ = kDoubleRegister;
} else {
constraint->type_ = kRegister;
}
break;
case UnallocatedOperand::MUST_HAVE_SLOT:
if (sequence()->IsDouble(vreg)) {
if (sequence()->IsFloat(vreg)) {
constraint->type_ = kDoubleSlot;
} else {
constraint->type_ = kSlot;
......@@ -286,7 +286,7 @@ class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
struct OperandLess {
bool operator()(const InstructionOperand* a,
const InstructionOperand* b) const {
return *a < *b;
return a->CompareModuloType(*b);
}
};
......@@ -320,7 +320,7 @@ class OperandMap : public ZoneObject {
this->erase(it++);
if (it == this->end()) return;
}
if (*it->first == *o.first) {
if (it->first->EqualsModuloType(*o.first)) {
++it;
if (it == this->end()) return;
} else {
......@@ -372,13 +372,14 @@ class OperandMap : public ZoneObject {
}
void DropRegisters(const RegisterConfiguration* config) {
for (int i = 0; i < config->num_general_registers(); ++i) {
RegisterOperand op(i);
Drop(&op);
}
for (int i = 0; i < config->num_double_registers(); ++i) {
DoubleRegisterOperand op(i);
Drop(&op);
// TODO(dcarney): sort map by kind and drop range.
for (auto it = map().begin(); it != map().end();) {
auto op = it->first;
if (op->IsRegister() || op->IsDoubleRegister()) {
map().erase(it++);
} else {
++it;
}
}
}
......
This diff is collapsed.
......@@ -13,7 +13,6 @@ namespace internal {
namespace compiler {
enum RegisterKind {
UNALLOCATED_REGISTERS,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
......@@ -272,7 +271,7 @@ class SpillRange;
// intervals over the instruction ordering.
class LiveRange final : public ZoneObject {
public:
explicit LiveRange(int id);
explicit LiveRange(int id, MachineType machine_type);
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
......@@ -289,6 +288,8 @@ class LiveRange final : public ZoneObject {
InstructionOperand GetAssignedOperand() const;
int spill_start_index() const { return spill_start_index_; }
MachineType machine_type() const { return MachineTypeField::decode(bits_); }
int assigned_register() const { return AssignedRegisterField::decode(bits_); }
bool HasRegisterAssigned() const {
return assigned_register() != kUnassignedRegister;
......@@ -299,10 +300,7 @@ class LiveRange final : public ZoneObject {
bool spilled() const { return SpilledField::decode(bits_); }
void Spill();
RegisterKind kind() const { return RegisterKindField::decode(bits_); }
void set_kind(RegisterKind kind) {
bits_ = RegisterKindField::update(bits_, kind);
}
RegisterKind kind() const;
// Correct only for parent.
bool is_phi() const { return IsPhiField::decode(bits_); }
......@@ -386,14 +384,14 @@ class LiveRange final : public ZoneObject {
return spill_type() == SpillType::kSpillOperand;
}
bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; }
AllocatedOperand GetSpillRangeOperand() const;
void SpillAtDefinition(Zone* zone, int gap_index,
InstructionOperand* operand);
void SetSpillOperand(InstructionOperand* operand);
void SetSpillRange(SpillRange* spill_range);
void CommitSpillOperand(AllocatedOperand* operand);
void CommitSpillsAtDefinition(InstructionSequence* sequence,
InstructionOperand* operand,
const InstructionOperand& operand,
bool might_be_duplicated);
void SetSpillStartIndex(int start) {
......@@ -416,7 +414,7 @@ class LiveRange final : public ZoneObject {
void Verify() const;
void ConvertUsesToOperand(const InstructionOperand& op,
InstructionOperand* spill_op);
const InstructionOperand& spill_op);
void SetUseHints(int register_index);
void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
......@@ -437,9 +435,9 @@ class LiveRange final : public ZoneObject {
typedef BitField<bool, 1, 1> HasSlotUseField;
typedef BitField<bool, 2, 1> IsPhiField;
typedef BitField<bool, 3, 1> IsNonLoopPhiField;
typedef BitField<RegisterKind, 4, 2> RegisterKindField;
typedef BitField<SpillType, 6, 2> SpillTypeField;
typedef BitField<int32_t, 8, 6> AssignedRegisterField;
typedef BitField<SpillType, 4, 2> SpillTypeField;
typedef BitField<int32_t, 6, 6> AssignedRegisterField;
typedef BitField<MachineType, 12, 15> MachineTypeField;
int id_;
int spill_start_index_;
......@@ -468,13 +466,23 @@ class LiveRange final : public ZoneObject {
class SpillRange final : public ZoneObject {
public:
static const int kUnassignedSlot = -1;
SpillRange(LiveRange* range, Zone* zone);
UseInterval* interval() const { return use_interval_; }
RegisterKind kind() const { return live_ranges_[0]->kind(); }
// Currently, only 4 or 8 byte slots are supported.
int ByteWidth() const;
bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other);
void SetOperand(AllocatedOperand* op);
void set_assigned_slot(int index) {
DCHECK_EQ(kUnassignedSlot, assigned_slot_);
assigned_slot_ = index;
}
int assigned_slot() {
DCHECK_NE(kUnassignedSlot, assigned_slot_);
return assigned_slot_;
}
private:
LifetimePosition End() const { return end_position_; }
......@@ -486,6 +494,7 @@ class SpillRange final : public ZoneObject {
ZoneVector<LiveRange*> live_ranges_;
UseInterval* use_interval_;
LifetimePosition end_position_;
int assigned_slot_;
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
......@@ -549,7 +558,12 @@ class RegisterAllocationData final : public ZoneObject {
const char* debug_name() const { return debug_name_; }
const RegisterConfiguration* config() const { return config_; }
MachineType MachineTypeFor(int virtual_register);
LiveRange* LiveRangeFor(int index);
// Creates a new live range.
LiveRange* NewLiveRange(int index, MachineType machine_type);
LiveRange* NewChildRangeFor(LiveRange* range);
SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
......@@ -563,9 +577,6 @@ class RegisterAllocationData final : public ZoneObject {
bool ExistsUseWithoutDefinition();
// Creates a new live range.
LiveRange* NewLiveRange(int index);
void MarkAllocated(RegisterKind kind, int index);
PhiMapValue* InitializePhiMap(const InstructionBlock* block,
......@@ -586,6 +597,7 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<SpillRange*> spill_ranges_;
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
int virtual_register_count_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};
......@@ -664,9 +676,6 @@ class LiveRangeBuilder final : public ZoneObject {
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
// Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const;
UsePosition* NewUsePosition(LifetimePosition pos, InstructionOperand* operand,
void* hint, UsePositionHintType hint_type);
UsePosition* NewUsePosition(LifetimePosition pos) {
......
......@@ -89,7 +89,8 @@ class InterpreterState {
if (key.is_constant) {
return ConstantOperand(key.index);
}
return AllocatedOperand(key.kind, key.index);
return AllocatedOperand(
key.kind, InstructionSequence::DefaultRepresentation(), key.index);
}
friend std::ostream& operator<<(std::ostream& os,
......@@ -148,7 +149,7 @@ class ParallelMoveCreator : public HandleAndZoneScope {
ParallelMove* Create(int size) {
ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
std::set<InstructionOperand> seen;
std::set<InstructionOperand, CompareOperandModuloType> seen;
for (int i = 0; i < size; ++i) {
MoveOperands mo(CreateRandomOperand(true), CreateRandomOperand(false));
if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
......@@ -160,18 +161,38 @@ class ParallelMoveCreator : public HandleAndZoneScope {
}
private:
MachineType RandomType() {
int index = rng_->NextInt(3);
switch (index) {
case 0:
return kRepWord32;
case 1:
return kRepWord64;
case 2:
return kRepTagged;
}
UNREACHABLE();
return kMachNone;
}
MachineType RandomDoubleType() {
int index = rng_->NextInt(2);
if (index == 0) return kRepFloat64;
return kRepFloat32;
}
InstructionOperand CreateRandomOperand(bool is_source) {
int index = rng_->NextInt(6);
// destination can't be Constant.
switch (rng_->NextInt(is_source ? 5 : 4)) {
case 0:
return StackSlotOperand(index);
return StackSlotOperand(RandomType(), index);
case 1:
return DoubleStackSlotOperand(index);
return DoubleStackSlotOperand(RandomDoubleType(), index);
case 2:
return RegisterOperand(index);
return RegisterOperand(RandomType(), index);
case 3:
return DoubleRegisterOperand(index);
return DoubleRegisterOperand(RandomDoubleType(), index);
case 4:
return ConstantOperand(index);
}
......
......@@ -263,8 +263,8 @@ TEST(InstructionAddGapMove) {
CHECK(move);
CHECK_EQ(1u, move->size());
MoveOperands* cur = move->at(0);
CHECK(op1 == cur->source());
CHECK(op2 == cur->destination());
CHECK(op1.Equals(cur->source()));
CHECK(op2.Equals(cur->destination()));
}
}
......@@ -308,15 +308,15 @@ TEST(InstructionOperands) {
CHECK(k == m->TempCount());
for (size_t z = 0; z < i; z++) {
CHECK(outputs[z] == *m->OutputAt(z));
CHECK(outputs[z].Equals(*m->OutputAt(z)));
}
for (size_t z = 0; z < j; z++) {
CHECK(inputs[z] == *m->InputAt(z));
CHECK(inputs[z].Equals(*m->InputAt(z)));
}
for (size_t z = 0; z < k; z++) {
CHECK(temps[z] == *m->TempAt(z));
CHECK(temps[z].Equals(*m->TempAt(z)));
}
}
}
......
......@@ -59,13 +59,14 @@ class TestCode : public HandleAndZoneScope {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, RegisterOperand(13), RegisterOperand(13));
AddGapMove(index, RegisterOperand(kRepWord32, 13),
RegisterOperand(kRepWord32, 13));
}
void NonRedundantMoves() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, ConstantOperand(11), RegisterOperand(11));
AddGapMove(index, ConstantOperand(11), RegisterOperand(kRepWord32, 11));
}
void Other() {
Start();
......
......@@ -96,12 +96,12 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
}
for (auto i : s.virtual_registers_) {
int const virtual_register = i.second;
if (sequence.IsDouble(virtual_register)) {
if (sequence.IsFloat(virtual_register)) {
EXPECT_FALSE(sequence.IsReference(virtual_register));
s.doubles_.insert(virtual_register);
}
if (sequence.IsReference(virtual_register)) {
EXPECT_FALSE(sequence.IsDouble(virtual_register));
EXPECT_FALSE(sequence.IsFloat(virtual_register));
s.references_.insert(virtual_register);
}
}
......
......@@ -33,7 +33,7 @@ class MoveOptimizerTest : public InstructionSequenceTest {
auto to = ConvertMoveArg(to_op);
for (auto move : *moves) {
if (move->IsRedundant()) continue;
if (move->source() == from && move->destination() == to) {
if (move->source().Equals(from) && move->destination().Equals(to)) {
return true;
}
}
......@@ -67,10 +67,10 @@ class MoveOptimizerTest : public InstructionSequenceTest {
case kConstant:
return ConstantOperand(op.value_);
case kFixedSlot:
return StackSlotOperand(op.value_);
return StackSlotOperand(kRepWord32, op.value_);
case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers());
return RegisterOperand(op.value_);
return RegisterOperand(kRepWord32, op.value_);
default:
break;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment