Commit 7eccb181 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [turbofan] add MachineType to AllocatedOperand (patchset #17...

Revert of [turbofan] add MachineType to AllocatedOperand (patchset #17 id:310001 of https://codereview.chromium.org/1087793002/)

Reason for revert:
[Sheriff] Breaks compile on chromium asan and v8 msan:
http://build.chromium.org/p/client.v8/builders/Linux%20ASAN%20Builder/builds/3446
http://build.chromium.org/p/client.v8/builders/V8%20Linux%20-%20arm64%20-%20sim%20-%20MSAN/builds/2085

Original issue's description:
> [turbofan] add MachineType to AllocatedOperand
>
> - allows the optimization of emitted gap move code since the representation of the value in the register is known
> - necessary preparation for vector register allocation
> - prepare for slot sharing for any value of the same byte width
>
> BUG=
>
> Committed: https://crrev.com/3a025d1ab6437559f86a464767aa03d2d9789f6f
> Cr-Commit-Position: refs/heads/master@{#28137}

TBR=jarin@chromium.org,dcarney@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1119483003

Cr-Commit-Position: refs/heads/master@{#28139}
parent de889843
...@@ -20,11 +20,13 @@ class Frame : public ZoneObject { ...@@ -20,11 +20,13 @@ class Frame : public ZoneObject {
Frame() Frame()
: register_save_area_size_(0), : register_save_area_size_(0),
spill_slot_count_(0), spill_slot_count_(0),
double_spill_slot_count_(0),
osr_stack_slot_count_(0), osr_stack_slot_count_(0),
allocated_registers_(NULL), allocated_registers_(NULL),
allocated_double_registers_(NULL) {} allocated_double_registers_(NULL) {}
inline int GetSpillSlotCount() { return spill_slot_count_; } inline int GetSpillSlotCount() { return spill_slot_count_; }
inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) { void SetAllocatedRegisters(BitVector* regs) {
DCHECK(allocated_registers_ == NULL); DCHECK(allocated_registers_ == NULL);
...@@ -55,14 +57,16 @@ class Frame : public ZoneObject { ...@@ -55,14 +57,16 @@ class Frame : public ZoneObject {
int GetOsrStackSlotCount() { return osr_stack_slot_count_; } int GetOsrStackSlotCount() { return osr_stack_slot_count_; }
int AllocateSpillSlot(int width) { int AllocateSpillSlot(bool is_double) {
DCHECK(width == 4 || width == 8); // If 32-bit, skip one if the new slot is a double.
// Skip one slot if necessary. if (is_double) {
if (width > kPointerSize) { if (kDoubleSize > kPointerSize) {
DCHECK(width == kPointerSize * 2); DCHECK(kDoubleSize == kPointerSize * 2);
spill_slot_count_++; spill_slot_count_++;
spill_slot_count_ |= 1; spill_slot_count_ |= 1;
} }
double_spill_slot_count_++;
}
return spill_slot_count_++; return spill_slot_count_++;
} }
...@@ -74,6 +78,7 @@ class Frame : public ZoneObject { ...@@ -74,6 +78,7 @@ class Frame : public ZoneObject {
private: private:
int register_save_area_size_; int register_save_area_size_;
int spill_slot_count_; int spill_slot_count_;
int double_spill_slot_count_;
int osr_stack_slot_count_; int osr_stack_slot_count_;
BitVector* allocated_registers_; BitVector* allocated_registers_;
BitVector* allocated_double_registers_; BitVector* allocated_double_registers_;
......
...@@ -75,7 +75,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const { ...@@ -75,7 +75,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// This move's source may have changed due to swaps to resolve cycles and so // This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it. // it may now be the last move in the cycle. If so remove it.
InstructionOperand source = move->source(); InstructionOperand source = move->source();
if (source.EqualsModuloType(destination)) { if (source == destination) {
move->Eliminate(); move->Eliminate();
return; return;
} }
......
...@@ -137,7 +137,7 @@ class OperandGenerator { ...@@ -137,7 +137,7 @@ class OperandGenerator {
UnallocatedOperand op = UnallocatedOperand( UnallocatedOperand op = UnallocatedOperand(
UnallocatedOperand::MUST_HAVE_REGISTER, UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister()); UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
sequence()->MarkAsRepresentation(kRepFloat64, op.virtual_register()); sequence()->MarkAsDouble(op.virtual_register());
return op; return op;
} }
......
This diff is collapsed.
...@@ -146,14 +146,21 @@ class InstructionSelector final { ...@@ -146,14 +146,21 @@ class InstructionSelector final {
// will need to generate code for it. // will need to generate code for it.
void MarkAsUsed(Node* node); void MarkAsUsed(Node* node);
// Checks if {node} is marked as double.
bool IsDouble(const Node* node) const;
// Inform the register allocator of a double result.
void MarkAsDouble(Node* node);
// Checks if {node} is marked as reference.
bool IsReference(const Node* node) const;
// Inform the register allocator of a reference result.
void MarkAsReference(Node* node);
// Inform the register allocation of the representation of the value produced // Inform the register allocation of the representation of the value produced
// by {node}. // by {node}.
void MarkAsRepresentation(MachineType rep, Node* node); void MarkAsRepresentation(MachineType rep, Node* node);
void MarkAsWord32(Node* node) { MarkAsRepresentation(kRepWord32, node); }
void MarkAsWord64(Node* node) { MarkAsRepresentation(kRepWord64, node); }
void MarkAsFloat32(Node* node) { MarkAsRepresentation(kRepFloat32, node); }
void MarkAsFloat64(Node* node) { MarkAsRepresentation(kRepFloat64, node); }
void MarkAsReference(Node* node) { MarkAsRepresentation(kRepTagged, node); }
// Inform the register allocation of the representation of the unallocated // Inform the register allocation of the representation of the unallocated
// operand {op}. // operand {op}.
......
...@@ -53,47 +53,21 @@ std::ostream& operator<<(std::ostream& os, ...@@ -53,47 +53,21 @@ std::ostream& operator<<(std::ostream& os,
return os << "[immediate:" << imm.indexed_value() << "]"; return os << "[immediate:" << imm.indexed_value() << "]";
} }
} }
case InstructionOperand::ALLOCATED: { case InstructionOperand::ALLOCATED:
auto allocated = AllocatedOperand::cast(op); switch (AllocatedOperand::cast(op).allocated_kind()) {
switch (allocated.allocated_kind()) {
case AllocatedOperand::STACK_SLOT: case AllocatedOperand::STACK_SLOT:
os << "[stack:" << StackSlotOperand::cast(op).index(); return os << "[stack:" << StackSlotOperand::cast(op).index() << "]";
break;
case AllocatedOperand::DOUBLE_STACK_SLOT: case AllocatedOperand::DOUBLE_STACK_SLOT:
os << "[double_stack:" << DoubleStackSlotOperand::cast(op).index(); return os << "[double_stack:"
break; << DoubleStackSlotOperand::cast(op).index() << "]";
case AllocatedOperand::REGISTER: case AllocatedOperand::REGISTER:
os << "[" return os << "["
<< conf->general_register_name(RegisterOperand::cast(op).index()) << conf->general_register_name(
<< "|R"; RegisterOperand::cast(op).index()) << "|R]";
break;
case AllocatedOperand::DOUBLE_REGISTER: case AllocatedOperand::DOUBLE_REGISTER:
os << "[" return os << "["
<< conf->double_register_name( << conf->double_register_name(
DoubleRegisterOperand::cast(op).index()) << "|R"; DoubleRegisterOperand::cast(op).index()) << "|R]";
break;
}
switch (allocated.machine_type()) {
case kRepWord32:
os << "|w32";
break;
case kRepWord64:
os << "|w64";
break;
case kRepFloat32:
os << "|f32";
break;
case kRepFloat64:
os << "|f64";
break;
case kRepTagged:
os << "|t";
break;
default:
os << "|?";
break;
}
return os << "]";
} }
case InstructionOperand::INVALID: case InstructionOperand::INVALID:
return os << "(x)"; return os << "(x)";
...@@ -109,7 +83,7 @@ std::ostream& operator<<(std::ostream& os, ...@@ -109,7 +83,7 @@ std::ostream& operator<<(std::ostream& os,
PrintableInstructionOperand printable_op = {printable.register_configuration_, PrintableInstructionOperand printable_op = {printable.register_configuration_,
mo.destination()}; mo.destination()};
os << printable_op; os << printable_op;
if (!mo.source().Equals(mo.destination())) { if (mo.source() != mo.destination()) {
printable_op.op_ = mo.source(); printable_op.op_ = mo.source();
os << " = " << printable_op; os << " = " << printable_op;
} }
...@@ -130,11 +104,11 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const { ...@@ -130,11 +104,11 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
MoveOperands* to_eliminate = nullptr; MoveOperands* to_eliminate = nullptr;
for (auto curr : *this) { for (auto curr : *this) {
if (curr->IsEliminated()) continue; if (curr->IsEliminated()) continue;
if (curr->destination().EqualsModuloType(move->source())) { if (curr->destination() == move->source()) {
DCHECK(!replacement); DCHECK(!replacement);
replacement = curr; replacement = curr;
if (to_eliminate != nullptr) break; if (to_eliminate != nullptr) break;
} else if (curr->destination().EqualsModuloType(move->destination())) { } else if (curr->destination() == move->destination()) {
DCHECK(!to_eliminate); DCHECK(!to_eliminate);
to_eliminate = curr; to_eliminate = curr;
if (replacement != nullptr) break; if (replacement != nullptr) break;
...@@ -505,7 +479,8 @@ InstructionSequence::InstructionSequence(Isolate* isolate, ...@@ -505,7 +479,8 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
instructions_(zone()), instructions_(zone()),
next_virtual_register_(0), next_virtual_register_(0),
reference_maps_(zone()), reference_maps_(zone()),
representations_(zone()), doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
references_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
deoptimization_entries_(zone()) { deoptimization_entries_(zone()) {
block_starts_.reserve(instruction_blocks_->size()); block_starts_.reserve(instruction_blocks_->size());
} }
...@@ -573,48 +548,23 @@ const InstructionBlock* InstructionSequence::GetInstructionBlock( ...@@ -573,48 +548,23 @@ const InstructionBlock* InstructionSequence::GetInstructionBlock(
} }
static MachineType FilterRepresentation(MachineType rep) { bool InstructionSequence::IsReference(int virtual_register) const {
DCHECK_EQ(rep, RepresentationOf(rep)); return references_.find(virtual_register) != references_.end();
switch (rep) {
case kRepBit:
case kRepWord8:
case kRepWord16:
return InstructionSequence::DefaultRepresentation();
case kRepWord32:
case kRepWord64:
case kRepFloat32:
case kRepFloat64:
case kRepTagged:
return rep;
default:
break;
}
UNREACHABLE();
return kMachNone;
} }
MachineType InstructionSequence::GetRepresentation(int virtual_register) const { bool InstructionSequence::IsDouble(int virtual_register) const {
DCHECK_LE(0, virtual_register); return doubles_.find(virtual_register) != doubles_.end();
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
return DefaultRepresentation();
}
return representations_[virtual_register];
} }
void InstructionSequence::MarkAsRepresentation(MachineType machine_type, void InstructionSequence::MarkAsReference(int virtual_register) {
int virtual_register) { references_.insert(virtual_register);
DCHECK_LE(0, virtual_register); }
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
representations_.resize(VirtualRegisterCount(), DefaultRepresentation()); void InstructionSequence::MarkAsDouble(int virtual_register) {
} doubles_.insert(virtual_register);
machine_type = FilterRepresentation(machine_type);
DCHECK_IMPLIES(representations_[virtual_register] != machine_type,
representations_[virtual_register] == DefaultRepresentation());
representations_[virtual_register] = machine_type;
} }
......
...@@ -50,6 +50,19 @@ class InstructionOperand { ...@@ -50,6 +50,19 @@ class InstructionOperand {
inline bool IsStackSlot() const; inline bool IsStackSlot() const;
inline bool IsDoubleStackSlot() const; inline bool IsDoubleStackSlot() const;
// Useful for map/set keys.
bool operator<(const InstructionOperand& op) const {
return value_ < op.value_;
}
bool operator==(const InstructionOperand& op) const {
return value_ == op.value_;
}
bool operator!=(const InstructionOperand& op) const {
return value_ != op.value_;
}
template <typename SubKindOperand> template <typename SubKindOperand>
static SubKindOperand* New(Zone* zone, const SubKindOperand& op) { static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
void* buffer = zone->New(sizeof(op)); void* buffer = zone->New(sizeof(op));
...@@ -61,43 +74,22 @@ class InstructionOperand { ...@@ -61,43 +74,22 @@ class InstructionOperand {
*dest = *src; *dest = *src;
} }
bool Equals(const InstructionOperand& that) const {
return this->value_ == that.value_;
}
bool Compare(const InstructionOperand& that) const {
return this->value_ < that.value_;
}
bool EqualsModuloType(const InstructionOperand& that) const {
return this->GetValueModuloType() == that.GetValueModuloType();
}
bool CompareModuloType(const InstructionOperand& that) const {
return this->GetValueModuloType() < that.GetValueModuloType();
}
protected: protected:
explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {} explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
inline uint64_t GetValueModuloType() const;
class KindField : public BitField64<Kind, 0, 3> {}; class KindField : public BitField64<Kind, 0, 3> {};
uint64_t value_; uint64_t value_;
}; };
struct PrintableInstructionOperand { struct PrintableInstructionOperand {
const RegisterConfiguration* register_configuration_; const RegisterConfiguration* register_configuration_;
InstructionOperand op_; InstructionOperand op_;
}; };
std::ostream& operator<<(std::ostream& os, std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& op); const PrintableInstructionOperand& op);
#define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind) \ #define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind) \
\ \
static OperandType* cast(InstructionOperand* op) { \ static OperandType* cast(InstructionOperand* op) { \
...@@ -354,8 +346,6 @@ class ImmediateOperand : public InstructionOperand { ...@@ -354,8 +346,6 @@ class ImmediateOperand : public InstructionOperand {
class AllocatedOperand : public InstructionOperand { class AllocatedOperand : public InstructionOperand {
public: public:
// TODO(dcarney): machine_type makes this now redundant. Just need to know is
// the operand is a slot or a register.
enum AllocatedKind { enum AllocatedKind {
STACK_SLOT, STACK_SLOT,
DOUBLE_STACK_SLOT, DOUBLE_STACK_SLOT,
...@@ -363,12 +353,10 @@ class AllocatedOperand : public InstructionOperand { ...@@ -363,12 +353,10 @@ class AllocatedOperand : public InstructionOperand {
DOUBLE_REGISTER DOUBLE_REGISTER
}; };
AllocatedOperand(AllocatedKind kind, MachineType machine_type, int index) AllocatedOperand(AllocatedKind kind, int index)
: InstructionOperand(ALLOCATED) { : InstructionOperand(ALLOCATED) {
DCHECK_IMPLIES(kind == REGISTER || kind == DOUBLE_REGISTER, index >= 0); DCHECK_IMPLIES(kind == REGISTER || kind == DOUBLE_REGISTER, index >= 0);
DCHECK(IsSupportedMachineType(machine_type));
value_ |= AllocatedKindField::encode(kind); value_ |= AllocatedKindField::encode(kind);
value_ |= MachineTypeField::encode(machine_type);
value_ |= static_cast<int64_t>(index) << IndexField::kShift; value_ |= static_cast<int64_t>(index) << IndexField::kShift;
} }
...@@ -380,33 +368,14 @@ class AllocatedOperand : public InstructionOperand { ...@@ -380,33 +368,14 @@ class AllocatedOperand : public InstructionOperand {
return AllocatedKindField::decode(value_); return AllocatedKindField::decode(value_);
} }
MachineType machine_type() const { return MachineTypeField::decode(value_); } static AllocatedOperand* New(Zone* zone, AllocatedKind kind, int index) {
return InstructionOperand::New(zone, AllocatedOperand(kind, index));
static AllocatedOperand* New(Zone* zone, AllocatedKind kind,
MachineType machine_type, int index) {
return InstructionOperand::New(zone,
AllocatedOperand(kind, machine_type, index));
}
static bool IsSupportedMachineType(MachineType machine_type) {
if (RepresentationOf(machine_type) != machine_type) return false;
switch (machine_type) {
case kRepWord32:
case kRepWord64:
case kRepFloat32:
case kRepFloat64:
case kRepTagged:
return true;
default:
return false;
}
} }
INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED); INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
STATIC_ASSERT(KindField::kSize == 3); STATIC_ASSERT(KindField::kSize == 3);
class AllocatedKindField : public BitField64<AllocatedKind, 3, 2> {}; class AllocatedKindField : public BitField64<AllocatedKind, 3, 2> {};
class MachineTypeField : public BitField64<MachineType, 5, 16> {};
class IndexField : public BitField64<int32_t, 35, 29> {}; class IndexField : public BitField64<int32_t, 35, 29> {};
}; };
...@@ -431,17 +400,14 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_IS) ...@@ -431,17 +400,14 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_IS)
#undef ALLOCATED_OPERAND_IS #undef ALLOCATED_OPERAND_IS
// TODO(dcarney): these subkinds are now pretty useless, nuke.
#define ALLOCATED_OPERAND_CLASS(SubKind, kOperandKind) \ #define ALLOCATED_OPERAND_CLASS(SubKind, kOperandKind) \
class SubKind##Operand final : public AllocatedOperand { \ class SubKind##Operand final : public AllocatedOperand { \
public: \ public: \
explicit SubKind##Operand(MachineType machine_type, int index) \ explicit SubKind##Operand(int index) \
: AllocatedOperand(kOperandKind, machine_type, index) {} \ : AllocatedOperand(kOperandKind, index) {} \
\ \
static SubKind##Operand* New(Zone* zone, MachineType machine_type, \ static SubKind##Operand* New(Zone* zone, int index) { \
int index) { \ return InstructionOperand::New(zone, SubKind##Operand(index)); \
return InstructionOperand::New(zone, \
SubKind##Operand(machine_type, index)); \
} \ } \
\ \
static SubKind##Operand* cast(InstructionOperand* op) { \ static SubKind##Operand* cast(InstructionOperand* op) { \
...@@ -463,24 +429,6 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_CLASS) ...@@ -463,24 +429,6 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_CLASS)
#undef ALLOCATED_OPERAND_CLASS #undef ALLOCATED_OPERAND_CLASS
uint64_t InstructionOperand::GetValueModuloType() const {
if (IsAllocated()) {
// TODO(dcarney): put machine type last and mask.
return AllocatedOperand::MachineTypeField::update(this->value_, kMachNone);
}
return this->value_;
}
// Required for maps that don't care about machine type.
struct CompareOperandModuloType {
bool operator()(const InstructionOperand& a,
const InstructionOperand& b) const {
return a.CompareModuloType(b);
}
};
class MoveOperands final : public ZoneObject { class MoveOperands final : public ZoneObject {
public: public:
MoveOperands(const InstructionOperand& source, MoveOperands(const InstructionOperand& source,
...@@ -508,14 +456,14 @@ class MoveOperands final : public ZoneObject { ...@@ -508,14 +456,14 @@ class MoveOperands final : public ZoneObject {
// True if this move a move into the given destination operand. // True if this move a move into the given destination operand.
bool Blocks(const InstructionOperand& operand) const { bool Blocks(const InstructionOperand& operand) const {
return !IsEliminated() && source().EqualsModuloType(operand); return !IsEliminated() && source() == operand;
} }
// A move is redundant if it's been eliminated or if its source and // A move is redundant if it's been eliminated or if its source and
// destination are the same. // destination are the same.
bool IsRedundant() const { bool IsRedundant() const {
DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant()); DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
return IsEliminated() || source_.EqualsModuloType(destination_); return IsEliminated() || source_ == destination_;
} }
// We clear both operands to indicate move that's been eliminated. // We clear both operands to indicate move that's been eliminated.
...@@ -603,7 +551,7 @@ class ReferenceMap final : public ZoneObject { ...@@ -603,7 +551,7 @@ class ReferenceMap final : public ZoneObject {
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm); std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
class Instruction final { class Instruction {
public: public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); } size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
const InstructionOperand* OutputAt(size_t i) const { const InstructionOperand* OutputAt(size_t i) const {
...@@ -728,9 +676,10 @@ class Instruction final { ...@@ -728,9 +676,10 @@ class Instruction final {
ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; } ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
ParallelMove** parallel_moves() { return &parallel_moves_[0]; } ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
private: protected:
explicit Instruction(InstructionCode opcode); explicit Instruction(InstructionCode opcode);
private:
Instruction(InstructionCode opcode, size_t output_count, Instruction(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count, InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, size_t temp_count, InstructionOperand* inputs, size_t temp_count,
...@@ -747,6 +696,7 @@ class Instruction final { ...@@ -747,6 +696,7 @@ class Instruction final {
ReferenceMap* reference_map_; ReferenceMap* reference_map_;
InstructionOperand operands_[1]; InstructionOperand operands_[1];
private:
DISALLOW_COPY_AND_ASSIGN(Instruction); DISALLOW_COPY_AND_ASSIGN(Instruction);
}; };
...@@ -1054,24 +1004,11 @@ class InstructionSequence final : public ZoneObject { ...@@ -1054,24 +1004,11 @@ class InstructionSequence final : public ZoneObject {
const InstructionBlock* GetInstructionBlock(int instruction_index) const; const InstructionBlock* GetInstructionBlock(int instruction_index) const;
static MachineType DefaultRepresentation() { bool IsReference(int virtual_register) const;
return kPointerSize == 8 ? kRepWord64 : kRepWord32; bool IsDouble(int virtual_register) const;
}
MachineType GetRepresentation(int virtual_register) const;
void MarkAsRepresentation(MachineType machine_type, int virtual_register);
bool IsReference(int virtual_register) const { void MarkAsReference(int virtual_register);
return GetRepresentation(virtual_register) == kRepTagged; void MarkAsDouble(int virtual_register);
}
bool IsFloat(int virtual_register) const {
switch (GetRepresentation(virtual_register)) {
case kRepFloat32:
case kRepFloat64:
return true;
default:
return false;
}
}
Instruction* GetBlockStart(RpoNumber rpo) const; Instruction* GetBlockStart(RpoNumber rpo) const;
...@@ -1174,7 +1111,8 @@ class InstructionSequence final : public ZoneObject { ...@@ -1174,7 +1111,8 @@ class InstructionSequence final : public ZoneObject {
InstructionDeque instructions_; InstructionDeque instructions_;
int next_virtual_register_; int next_virtual_register_;
ReferenceMapDeque reference_maps_; ReferenceMapDeque reference_maps_;
ZoneVector<MachineType> representations_; VirtualRegisterSet doubles_;
VirtualRegisterSet references_;
DeoptimizationVector deoptimization_entries_; DeoptimizationVector deoptimization_entries_;
DISALLOW_COPY_AND_ASSIGN(InstructionSequence); DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
......
...@@ -11,18 +11,8 @@ namespace compiler { ...@@ -11,18 +11,8 @@ namespace compiler {
namespace { namespace {
typedef std::pair<InstructionOperand, InstructionOperand> MoveKey; typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
typedef ZoneMap<MoveKey, unsigned> MoveMap;
struct MoveKeyCompare { typedef ZoneSet<InstructionOperand> OperandSet;
bool operator()(const MoveKey& a, const MoveKey& b) const {
if (a.first.EqualsModuloType(b.first)) {
return a.second.CompareModuloType(b.second);
}
return a.first.CompareModuloType(b.first);
}
};
typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); } bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); }
...@@ -234,12 +224,10 @@ bool IsSlot(const InstructionOperand& op) { ...@@ -234,12 +224,10 @@ bool IsSlot(const InstructionOperand& op) {
bool LoadCompare(const MoveOperands* a, const MoveOperands* b) { bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
if (!a->source().EqualsModuloType(b->source())) { if (a->source() != b->source()) return a->source() < b->source();
return a->source().CompareModuloType(b->source());
}
if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false; if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true; if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
return a->destination().CompareModuloType(b->destination()); return a->destination() < b->destination();
} }
} // namespace } // namespace
...@@ -264,8 +252,7 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) { ...@@ -264,8 +252,7 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
MoveOperands* group_begin = nullptr; MoveOperands* group_begin = nullptr;
for (auto load : loads) { for (auto load : loads) {
// New group. // New group.
if (group_begin == nullptr || if (group_begin == nullptr || load->source() != group_begin->source()) {
!load->source().EqualsModuloType(group_begin->source())) {
group_begin = load; group_begin = load;
continue; continue;
} }
......
...@@ -163,7 +163,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op, ...@@ -163,7 +163,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
CHECK(false); CHECK(false);
break; break;
case UnallocatedOperand::NONE: case UnallocatedOperand::NONE:
if (sequence()->IsFloat(vreg)) { if (sequence()->IsDouble(vreg)) {
constraint->type_ = kNoneDouble; constraint->type_ = kNoneDouble;
} else { } else {
constraint->type_ = kNone; constraint->type_ = kNone;
...@@ -178,14 +178,14 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op, ...@@ -178,14 +178,14 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->value_ = unallocated->fixed_register_index(); constraint->value_ = unallocated->fixed_register_index();
break; break;
case UnallocatedOperand::MUST_HAVE_REGISTER: case UnallocatedOperand::MUST_HAVE_REGISTER:
if (sequence()->IsFloat(vreg)) { if (sequence()->IsDouble(vreg)) {
constraint->type_ = kDoubleRegister; constraint->type_ = kDoubleRegister;
} else { } else {
constraint->type_ = kRegister; constraint->type_ = kRegister;
} }
break; break;
case UnallocatedOperand::MUST_HAVE_SLOT: case UnallocatedOperand::MUST_HAVE_SLOT:
if (sequence()->IsFloat(vreg)) { if (sequence()->IsDouble(vreg)) {
constraint->type_ = kDoubleSlot; constraint->type_ = kDoubleSlot;
} else { } else {
constraint->type_ = kSlot; constraint->type_ = kSlot;
...@@ -286,7 +286,7 @@ class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject { ...@@ -286,7 +286,7 @@ class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
struct OperandLess { struct OperandLess {
bool operator()(const InstructionOperand* a, bool operator()(const InstructionOperand* a,
const InstructionOperand* b) const { const InstructionOperand* b) const {
return a->CompareModuloType(*b); return *a < *b;
} }
}; };
...@@ -320,7 +320,7 @@ class OperandMap : public ZoneObject { ...@@ -320,7 +320,7 @@ class OperandMap : public ZoneObject {
this->erase(it++); this->erase(it++);
if (it == this->end()) return; if (it == this->end()) return;
} }
if (it->first->EqualsModuloType(*o.first)) { if (*it->first == *o.first) {
++it; ++it;
if (it == this->end()) return; if (it == this->end()) return;
} else { } else {
...@@ -372,14 +372,13 @@ class OperandMap : public ZoneObject { ...@@ -372,14 +372,13 @@ class OperandMap : public ZoneObject {
} }
void DropRegisters(const RegisterConfiguration* config) { void DropRegisters(const RegisterConfiguration* config) {
// TODO(dcarney): sort map by kind and drop range. for (int i = 0; i < config->num_general_registers(); ++i) {
for (auto it = map().begin(); it != map().end();) { RegisterOperand op(i);
auto op = it->first; Drop(&op);
if (op->IsRegister() || op->IsDoubleRegister()) {
map().erase(it++);
} else {
++it;
} }
for (int i = 0; i < config->num_double_registers(); ++i) {
DoubleRegisterOperand op(i);
Drop(&op);
} }
} }
......
This diff is collapsed.
...@@ -13,6 +13,7 @@ namespace internal { ...@@ -13,6 +13,7 @@ namespace internal {
namespace compiler { namespace compiler {
enum RegisterKind { enum RegisterKind {
UNALLOCATED_REGISTERS,
GENERAL_REGISTERS, GENERAL_REGISTERS,
DOUBLE_REGISTERS DOUBLE_REGISTERS
}; };
...@@ -271,7 +272,7 @@ class SpillRange; ...@@ -271,7 +272,7 @@ class SpillRange;
// intervals over the instruction ordering. // intervals over the instruction ordering.
class LiveRange final : public ZoneObject { class LiveRange final : public ZoneObject {
public: public:
explicit LiveRange(int id, MachineType machine_type); explicit LiveRange(int id);
UseInterval* first_interval() const { return first_interval_; } UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; } UsePosition* first_pos() const { return first_pos_; }
...@@ -288,8 +289,6 @@ class LiveRange final : public ZoneObject { ...@@ -288,8 +289,6 @@ class LiveRange final : public ZoneObject {
InstructionOperand GetAssignedOperand() const; InstructionOperand GetAssignedOperand() const;
int spill_start_index() const { return spill_start_index_; } int spill_start_index() const { return spill_start_index_; }
MachineType machine_type() const { return MachineTypeField::decode(bits_); }
int assigned_register() const { return AssignedRegisterField::decode(bits_); } int assigned_register() const { return AssignedRegisterField::decode(bits_); }
bool HasRegisterAssigned() const { bool HasRegisterAssigned() const {
return assigned_register() != kUnassignedRegister; return assigned_register() != kUnassignedRegister;
...@@ -300,7 +299,10 @@ class LiveRange final : public ZoneObject { ...@@ -300,7 +299,10 @@ class LiveRange final : public ZoneObject {
bool spilled() const { return SpilledField::decode(bits_); } bool spilled() const { return SpilledField::decode(bits_); }
void Spill(); void Spill();
RegisterKind kind() const; RegisterKind kind() const { return RegisterKindField::decode(bits_); }
void set_kind(RegisterKind kind) {
bits_ = RegisterKindField::update(bits_, kind);
}
// Correct only for parent. // Correct only for parent.
bool is_phi() const { return IsPhiField::decode(bits_); } bool is_phi() const { return IsPhiField::decode(bits_); }
...@@ -384,14 +386,14 @@ class LiveRange final : public ZoneObject { ...@@ -384,14 +386,14 @@ class LiveRange final : public ZoneObject {
return spill_type() == SpillType::kSpillOperand; return spill_type() == SpillType::kSpillOperand;
} }
bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; } bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; }
AllocatedOperand GetSpillRangeOperand() const;
void SpillAtDefinition(Zone* zone, int gap_index, void SpillAtDefinition(Zone* zone, int gap_index,
InstructionOperand* operand); InstructionOperand* operand);
void SetSpillOperand(InstructionOperand* operand); void SetSpillOperand(InstructionOperand* operand);
void SetSpillRange(SpillRange* spill_range); void SetSpillRange(SpillRange* spill_range);
void CommitSpillOperand(AllocatedOperand* operand);
void CommitSpillsAtDefinition(InstructionSequence* sequence, void CommitSpillsAtDefinition(InstructionSequence* sequence,
const InstructionOperand& operand, InstructionOperand* operand,
bool might_be_duplicated); bool might_be_duplicated);
void SetSpillStartIndex(int start) { void SetSpillStartIndex(int start) {
...@@ -414,7 +416,7 @@ class LiveRange final : public ZoneObject { ...@@ -414,7 +416,7 @@ class LiveRange final : public ZoneObject {
void Verify() const; void Verify() const;
void ConvertUsesToOperand(const InstructionOperand& op, void ConvertUsesToOperand(const InstructionOperand& op,
const InstructionOperand& spill_op); InstructionOperand* spill_op);
void SetUseHints(int register_index); void SetUseHints(int register_index);
void UnsetUseHints() { SetUseHints(kUnassignedRegister); } void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
...@@ -435,9 +437,9 @@ class LiveRange final : public ZoneObject { ...@@ -435,9 +437,9 @@ class LiveRange final : public ZoneObject {
typedef BitField<bool, 1, 1> HasSlotUseField; typedef BitField<bool, 1, 1> HasSlotUseField;
typedef BitField<bool, 2, 1> IsPhiField; typedef BitField<bool, 2, 1> IsPhiField;
typedef BitField<bool, 3, 1> IsNonLoopPhiField; typedef BitField<bool, 3, 1> IsNonLoopPhiField;
typedef BitField<SpillType, 4, 2> SpillTypeField; typedef BitField<RegisterKind, 4, 2> RegisterKindField;
typedef BitField<int32_t, 6, 6> AssignedRegisterField; typedef BitField<SpillType, 6, 2> SpillTypeField;
typedef BitField<MachineType, 12, 15> MachineTypeField; typedef BitField<int32_t, 8, 6> AssignedRegisterField;
int id_; int id_;
int spill_start_index_; int spill_start_index_;
...@@ -466,23 +468,13 @@ class LiveRange final : public ZoneObject { ...@@ -466,23 +468,13 @@ class LiveRange final : public ZoneObject {
class SpillRange final : public ZoneObject { class SpillRange final : public ZoneObject {
public: public:
static const int kUnassignedSlot = -1;
SpillRange(LiveRange* range, Zone* zone); SpillRange(LiveRange* range, Zone* zone);
UseInterval* interval() const { return use_interval_; } UseInterval* interval() const { return use_interval_; }
// Currently, only 4 or 8 byte slots are supported. RegisterKind kind() const { return live_ranges_[0]->kind(); }
int ByteWidth() const;
bool IsEmpty() const { return live_ranges_.empty(); } bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other); bool TryMerge(SpillRange* other);
void SetOperand(AllocatedOperand* op);
void set_assigned_slot(int index) {
DCHECK_EQ(kUnassignedSlot, assigned_slot_);
assigned_slot_ = index;
}
int assigned_slot() {
DCHECK_NE(kUnassignedSlot, assigned_slot_);
return assigned_slot_;
}
private: private:
LifetimePosition End() const { return end_position_; } LifetimePosition End() const { return end_position_; }
...@@ -494,7 +486,6 @@ class SpillRange final : public ZoneObject { ...@@ -494,7 +486,6 @@ class SpillRange final : public ZoneObject {
ZoneVector<LiveRange*> live_ranges_; ZoneVector<LiveRange*> live_ranges_;
UseInterval* use_interval_; UseInterval* use_interval_;
LifetimePosition end_position_; LifetimePosition end_position_;
int assigned_slot_;
DISALLOW_COPY_AND_ASSIGN(SpillRange); DISALLOW_COPY_AND_ASSIGN(SpillRange);
}; };
...@@ -558,12 +549,7 @@ class RegisterAllocationData final : public ZoneObject { ...@@ -558,12 +549,7 @@ class RegisterAllocationData final : public ZoneObject {
const char* debug_name() const { return debug_name_; } const char* debug_name() const { return debug_name_; }
const RegisterConfiguration* config() const { return config_; } const RegisterConfiguration* config() const { return config_; }
MachineType MachineTypeFor(int virtual_register);
LiveRange* LiveRangeFor(int index); LiveRange* LiveRangeFor(int index);
// Creates a new live range.
LiveRange* NewLiveRange(int index, MachineType machine_type);
LiveRange* NewChildRangeFor(LiveRange* range);
SpillRange* AssignSpillRangeToLiveRange(LiveRange* range); SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
...@@ -577,6 +563,9 @@ class RegisterAllocationData final : public ZoneObject { ...@@ -577,6 +563,9 @@ class RegisterAllocationData final : public ZoneObject {
bool ExistsUseWithoutDefinition(); bool ExistsUseWithoutDefinition();
// Creates a new live range.
LiveRange* NewLiveRange(int index);
void MarkAllocated(RegisterKind kind, int index); void MarkAllocated(RegisterKind kind, int index);
PhiMapValue* InitializePhiMap(const InstructionBlock* block, PhiMapValue* InitializePhiMap(const InstructionBlock* block,
...@@ -597,7 +586,6 @@ class RegisterAllocationData final : public ZoneObject { ...@@ -597,7 +586,6 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<SpillRange*> spill_ranges_; ZoneVector<SpillRange*> spill_ranges_;
BitVector* assigned_registers_; BitVector* assigned_registers_;
BitVector* assigned_double_registers_; BitVector* assigned_double_registers_;
int virtual_register_count_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData); DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
}; };
...@@ -676,6 +664,9 @@ class LiveRangeBuilder final : public ZoneObject { ...@@ -676,6 +664,9 @@ class LiveRangeBuilder final : public ZoneObject {
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos); void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos); void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
// Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const;
UsePosition* NewUsePosition(LifetimePosition pos, InstructionOperand* operand, UsePosition* NewUsePosition(LifetimePosition pos, InstructionOperand* operand,
void* hint, UsePositionHintType hint_type); void* hint, UsePositionHintType hint_type);
UsePosition* NewUsePosition(LifetimePosition pos) { UsePosition* NewUsePosition(LifetimePosition pos) {
......
...@@ -89,8 +89,7 @@ class InterpreterState { ...@@ -89,8 +89,7 @@ class InterpreterState {
if (key.is_constant) { if (key.is_constant) {
return ConstantOperand(key.index); return ConstantOperand(key.index);
} }
return AllocatedOperand( return AllocatedOperand(key.kind, key.index);
key.kind, InstructionSequence::DefaultRepresentation(), key.index);
} }
friend std::ostream& operator<<(std::ostream& os, friend std::ostream& operator<<(std::ostream& os,
...@@ -149,7 +148,7 @@ class ParallelMoveCreator : public HandleAndZoneScope { ...@@ -149,7 +148,7 @@ class ParallelMoveCreator : public HandleAndZoneScope {
ParallelMove* Create(int size) { ParallelMove* Create(int size) {
ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone()); ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
std::set<InstructionOperand, CompareOperandModuloType> seen; std::set<InstructionOperand> seen;
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
MoveOperands mo(CreateRandomOperand(true), CreateRandomOperand(false)); MoveOperands mo(CreateRandomOperand(true), CreateRandomOperand(false));
if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) { if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
...@@ -161,38 +160,18 @@ class ParallelMoveCreator : public HandleAndZoneScope { ...@@ -161,38 +160,18 @@ class ParallelMoveCreator : public HandleAndZoneScope {
} }
private: private:
MachineType RandomType() {
int index = rng_->NextInt(3);
switch (index) {
case 0:
return kRepWord32;
case 1:
return kRepWord64;
case 2:
return kRepTagged;
}
UNREACHABLE();
return kMachNone;
}
MachineType RandomDoubleType() {
int index = rng_->NextInt(2);
if (index == 0) return kRepFloat64;
return kRepFloat32;
}
InstructionOperand CreateRandomOperand(bool is_source) { InstructionOperand CreateRandomOperand(bool is_source) {
int index = rng_->NextInt(6); int index = rng_->NextInt(6);
// destination can't be Constant. // destination can't be Constant.
switch (rng_->NextInt(is_source ? 5 : 4)) { switch (rng_->NextInt(is_source ? 5 : 4)) {
case 0: case 0:
return StackSlotOperand(RandomType(), index); return StackSlotOperand(index);
case 1: case 1:
return DoubleStackSlotOperand(RandomDoubleType(), index); return DoubleStackSlotOperand(index);
case 2: case 2:
return RegisterOperand(RandomType(), index); return RegisterOperand(index);
case 3: case 3:
return DoubleRegisterOperand(RandomDoubleType(), index); return DoubleRegisterOperand(index);
case 4: case 4:
return ConstantOperand(index); return ConstantOperand(index);
} }
......
...@@ -263,8 +263,8 @@ TEST(InstructionAddGapMove) { ...@@ -263,8 +263,8 @@ TEST(InstructionAddGapMove) {
CHECK(move); CHECK(move);
CHECK_EQ(1u, move->size()); CHECK_EQ(1u, move->size());
MoveOperands* cur = move->at(0); MoveOperands* cur = move->at(0);
CHECK(op1.Equals(cur->source())); CHECK(op1 == cur->source());
CHECK(op2.Equals(cur->destination())); CHECK(op2 == cur->destination());
} }
} }
...@@ -308,15 +308,15 @@ TEST(InstructionOperands) { ...@@ -308,15 +308,15 @@ TEST(InstructionOperands) {
CHECK(k == m->TempCount()); CHECK(k == m->TempCount());
for (size_t z = 0; z < i; z++) { for (size_t z = 0; z < i; z++) {
CHECK(outputs[z].Equals(*m->OutputAt(z))); CHECK(outputs[z] == *m->OutputAt(z));
} }
for (size_t z = 0; z < j; z++) { for (size_t z = 0; z < j; z++) {
CHECK(inputs[z].Equals(*m->InputAt(z))); CHECK(inputs[z] == *m->InputAt(z));
} }
for (size_t z = 0; z < k; z++) { for (size_t z = 0; z < k; z++) {
CHECK(temps[z].Equals(*m->TempAt(z))); CHECK(temps[z] == *m->TempAt(z));
} }
} }
} }
......
...@@ -59,14 +59,13 @@ class TestCode : public HandleAndZoneScope { ...@@ -59,14 +59,13 @@ class TestCode : public HandleAndZoneScope {
Start(); Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop)); sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1; int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, RegisterOperand(kRepWord32, 13), AddGapMove(index, RegisterOperand(13), RegisterOperand(13));
RegisterOperand(kRepWord32, 13));
} }
void NonRedundantMoves() { void NonRedundantMoves() {
Start(); Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop)); sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1; int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, ConstantOperand(11), RegisterOperand(kRepWord32, 11)); AddGapMove(index, ConstantOperand(11), RegisterOperand(11));
} }
void Other() { void Other() {
Start(); Start();
......
...@@ -96,12 +96,12 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build( ...@@ -96,12 +96,12 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
} }
for (auto i : s.virtual_registers_) { for (auto i : s.virtual_registers_) {
int const virtual_register = i.second; int const virtual_register = i.second;
if (sequence.IsFloat(virtual_register)) { if (sequence.IsDouble(virtual_register)) {
EXPECT_FALSE(sequence.IsReference(virtual_register)); EXPECT_FALSE(sequence.IsReference(virtual_register));
s.doubles_.insert(virtual_register); s.doubles_.insert(virtual_register);
} }
if (sequence.IsReference(virtual_register)) { if (sequence.IsReference(virtual_register)) {
EXPECT_FALSE(sequence.IsFloat(virtual_register)); EXPECT_FALSE(sequence.IsDouble(virtual_register));
s.references_.insert(virtual_register); s.references_.insert(virtual_register);
} }
} }
......
...@@ -33,7 +33,7 @@ class MoveOptimizerTest : public InstructionSequenceTest { ...@@ -33,7 +33,7 @@ class MoveOptimizerTest : public InstructionSequenceTest {
auto to = ConvertMoveArg(to_op); auto to = ConvertMoveArg(to_op);
for (auto move : *moves) { for (auto move : *moves) {
if (move->IsRedundant()) continue; if (move->IsRedundant()) continue;
if (move->source().Equals(from) && move->destination().Equals(to)) { if (move->source() == from && move->destination() == to) {
return true; return true;
} }
} }
...@@ -67,10 +67,10 @@ class MoveOptimizerTest : public InstructionSequenceTest { ...@@ -67,10 +67,10 @@ class MoveOptimizerTest : public InstructionSequenceTest {
case kConstant: case kConstant:
return ConstantOperand(op.value_); return ConstantOperand(op.value_);
case kFixedSlot: case kFixedSlot:
return StackSlotOperand(kRepWord32, op.value_); return StackSlotOperand(op.value_);
case kFixedRegister: case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers()); CHECK(0 <= op.value_ && op.value_ < num_general_registers());
return RegisterOperand(kRepWord32, op.value_); return RegisterOperand(op.value_);
default: default:
break; break;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment