Commit 3e188ace authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

[turbofan] Add backend support for load/store float32 values.

This is the bare minimum required to support typed arrays. Support for
working with float32 values will be added based on this.

TEST=compiler-unittests,cctest
R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/500343002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23386 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent fa908676
...@@ -415,6 +415,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -415,6 +415,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
break; break;
} }
case kArmVldr32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vldr(scratch, i.InputOffset());
__ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVstr32: {
int index = 0;
SwVfpRegister scratch = kScratchDoubleReg.low();
MemOperand operand = i.InputOffset(&index);
__ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
__ vstr(scratch, operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVldr64: case kArmVldr64:
__ vldr(i.OutputDoubleRegister(), i.InputOffset()); __ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
......
...@@ -50,6 +50,8 @@ namespace compiler { ...@@ -50,6 +50,8 @@ namespace compiler {
V(ArmVcvtF64U32) \ V(ArmVcvtF64U32) \
V(ArmVcvtS32F64) \ V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \ V(ArmVcvtU32F64) \
V(ArmVldr32) \
V(ArmVstr32) \
V(ArmVldr64) \ V(ArmVldr64) \
V(ArmVstr64) \ V(ArmVstr64) \
V(ArmLdrb) \ V(ArmLdrb) \
......
...@@ -55,6 +55,8 @@ class ArmOperandGenerator V8_FINAL : public OperandGenerator { ...@@ -55,6 +55,8 @@ class ArmOperandGenerator V8_FINAL : public OperandGenerator {
case kArmRsb: case kArmRsb:
return ImmediateFitsAddrMode1Instruction(value); return ImmediateFitsAddrMode1Instruction(value);
case kArmVldr32:
case kArmVstr32:
case kArmVldr64: case kArmVldr64:
case kArmVstr64: case kArmVstr64:
return value >= -1020 && value <= 1020 && (value % 4) == 0; return value >= -1020 && value <= 1020 && (value % 4) == 0;
...@@ -294,12 +296,15 @@ void InstructionSelector::VisitLoad(Node* node) { ...@@ -294,12 +296,15 @@ void InstructionSelector::VisitLoad(Node* node) {
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
InstructionOperand* result = rep == kRepFloat64 InstructionOperand* result = (rep == kRepFloat32 || rep == kRepFloat64)
? g.DefineAsDoubleRegister(node) ? g.DefineAsDoubleRegister(node)
: g.DefineAsRegister(node); : g.DefineAsRegister(node);
ArchOpcode opcode; ArchOpcode opcode;
switch (rep) { switch (rep) {
case kRepFloat32:
opcode = kArmVldr32;
break;
case kRepFloat64: case kRepFloat64:
opcode = kArmVldr64; opcode = kArmVldr64;
break; break;
...@@ -349,11 +354,15 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -349,11 +354,15 @@ void InstructionSelector::VisitStore(Node* node) {
return; return;
} }
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind); DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
InstructionOperand* val = InstructionOperand* val = (rep == kRepFloat32 || rep == kRepFloat64)
rep == kRepFloat64 ? g.UseDoubleRegister(value) : g.UseRegister(value); ? g.UseDoubleRegister(value)
: g.UseRegister(value);
ArchOpcode opcode; ArchOpcode opcode;
switch (rep) { switch (rep) {
case kRepFloat32:
opcode = kArmVstr32;
break;
case kRepFloat64: case kRepFloat64:
opcode = kArmVstr64; opcode = kArmVstr64;
break; break;
......
...@@ -426,6 +426,20 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -426,6 +426,20 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Str: case kArm64Str:
__ Str(i.InputRegister(2), i.MemoryOperand()); __ Str(i.InputRegister(2), i.MemoryOperand());
break; break;
case kArm64LdrS: {
UseScratchRegisterScope scope(masm());
FPRegister scratch = scope.AcquireS();
__ Ldr(scratch, i.MemoryOperand());
__ Fcvt(i.OutputDoubleRegister(), scratch);
break;
}
case kArm64StrS: {
UseScratchRegisterScope scope(masm());
FPRegister scratch = scope.AcquireS();
__ Fcvt(scratch, i.InputDoubleRegister(2));
__ Str(scratch, i.MemoryOperand());
break;
}
case kArm64LdrD: case kArm64LdrD:
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand()); __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break; break;
......
...@@ -68,6 +68,8 @@ namespace compiler { ...@@ -68,6 +68,8 @@ namespace compiler {
V(Arm64Float64ToUint32) \ V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \ V(Arm64Int32ToFloat64) \
V(Arm64Uint32ToFloat64) \ V(Arm64Uint32ToFloat64) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \ V(Arm64LdrD) \
V(Arm64StrD) \ V(Arm64StrD) \
V(Arm64Ldrb) \ V(Arm64Ldrb) \
......
...@@ -148,13 +148,16 @@ void InstructionSelector::VisitLoad(Node* node) { ...@@ -148,13 +148,16 @@ void InstructionSelector::VisitLoad(Node* node) {
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
InstructionOperand* result = rep == kRepFloat64 InstructionOperand* result = (rep == kRepFloat32 || rep == kRepFloat64)
? g.DefineAsDoubleRegister(node) ? g.DefineAsDoubleRegister(node)
: g.DefineAsRegister(node); : g.DefineAsRegister(node);
ArchOpcode opcode; ArchOpcode opcode;
// TODO(titzer): signed/unsigned small loads // TODO(titzer): signed/unsigned small loads
switch (rep) { switch (rep) {
case kRepFloat32:
opcode = kArm64LdrS;
break;
case kRepFloat64: case kRepFloat64:
opcode = kArm64LdrD; opcode = kArm64LdrD;
break; break;
...@@ -207,13 +210,16 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -207,13 +210,16 @@ void InstructionSelector::VisitStore(Node* node) {
} }
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind); DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
InstructionOperand* val; InstructionOperand* val;
if (rep == kRepFloat64) { if (rep == kRepFloat32 || rep == kRepFloat64) {
val = g.UseDoubleRegister(value); val = g.UseDoubleRegister(value);
} else { } else {
val = g.UseRegister(value); val = g.UseRegister(value);
} }
ArchOpcode opcode; ArchOpcode opcode;
switch (rep) { switch (rep) {
case kRepFloat32:
opcode = kArm64StrS;
break;
case kRepFloat64: case kRepFloat64:
opcode = kArm64StrD; opcode = kArm64StrD;
break; break;
......
...@@ -389,6 +389,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -389,6 +389,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movsd(operand, i.InputDoubleRegister(index)); __ movsd(operand, i.InputDoubleRegister(index));
} }
break; break;
case kIA32Movss:
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
__ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
__ movss(operand, xmm0);
}
break;
case kIA32StoreWriteBarrier: { case kIA32StoreWriteBarrier: {
Register object = i.InputRegister(0); Register object = i.InputRegister(0);
Register index = i.InputRegister(1); Register index = i.InputRegister(1);
......
...@@ -50,6 +50,7 @@ namespace compiler { ...@@ -50,6 +50,7 @@ namespace compiler {
V(IA32Movzxwl) \ V(IA32Movzxwl) \
V(IA32Movw) \ V(IA32Movw) \
V(IA32Movl) \ V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \ V(IA32Movsd) \
V(IA32StoreWriteBarrier) V(IA32StoreWriteBarrier)
......
...@@ -47,12 +47,15 @@ void InstructionSelector::VisitLoad(Node* node) { ...@@ -47,12 +47,15 @@ void InstructionSelector::VisitLoad(Node* node) {
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
InstructionOperand* output = rep == kRepFloat64 InstructionOperand* output = (rep == kRepFloat32 || rep == kRepFloat64)
? g.DefineAsDoubleRegister(node) ? g.DefineAsDoubleRegister(node)
: g.DefineAsRegister(node); : g.DefineAsRegister(node);
ArchOpcode opcode; ArchOpcode opcode;
// TODO(titzer): signed/unsigned small loads // TODO(titzer): signed/unsigned small loads
switch (rep) { switch (rep) {
case kRepFloat32:
opcode = kIA32Movss;
break;
case kRepFloat64: case kRepFloat64:
opcode = kIA32Movsd; opcode = kIA32Movsd;
break; break;
...@@ -111,7 +114,7 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -111,7 +114,7 @@ void InstructionSelector::VisitStore(Node* node) {
} }
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind); DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
InstructionOperand* val; InstructionOperand* val;
if (rep == kRepFloat64) { if (rep == kRepFloat32 || rep == kRepFloat64) {
val = g.UseDoubleRegister(value); val = g.UseDoubleRegister(value);
} else { } else {
if (g.CanBeImmediate(value)) { if (g.CanBeImmediate(value)) {
...@@ -124,6 +127,9 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -124,6 +127,9 @@ void InstructionSelector::VisitStore(Node* node) {
} }
ArchOpcode opcode; ArchOpcode opcode;
switch (rep) { switch (rep) {
case kRepFloat32:
opcode = kIA32Movss;
break;
case kRepFloat64: case kRepFloat64:
opcode = kIA32Movsd; opcode = kIA32Movsd;
break; break;
......
...@@ -244,8 +244,17 @@ void InstructionSelector::MarkAsReference(Node* node) { ...@@ -244,8 +244,17 @@ void InstructionSelector::MarkAsReference(Node* node) {
void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) { void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
DCHECK_NOT_NULL(node); DCHECK_NOT_NULL(node);
if (RepresentationOf(rep) == kRepFloat64) MarkAsDouble(node); switch (RepresentationOf(rep)) {
if (RepresentationOf(rep) == kRepTagged) MarkAsReference(node); case kRepFloat32:
case kRepFloat64:
MarkAsDouble(node);
break;
case kRepTagged:
MarkAsReference(node);
break;
default:
break;
}
} }
......
...@@ -24,6 +24,7 @@ OStream& operator<<(OStream& os, const MachineType& type) { ...@@ -24,6 +24,7 @@ OStream& operator<<(OStream& os, const MachineType& type) {
PRINT(kRepWord16); PRINT(kRepWord16);
PRINT(kRepWord32); PRINT(kRepWord32);
PRINT(kRepWord64); PRINT(kRepWord64);
PRINT(kRepFloat32);
PRINT(kRepFloat64); PRINT(kRepFloat64);
PRINT(kRepTagged); PRINT(kRepTagged);
......
...@@ -23,17 +23,18 @@ enum MachineType { ...@@ -23,17 +23,18 @@ enum MachineType {
kRepWord16 = 1 << 2, kRepWord16 = 1 << 2,
kRepWord32 = 1 << 3, kRepWord32 = 1 << 3,
kRepWord64 = 1 << 4, kRepWord64 = 1 << 4,
kRepFloat64 = 1 << 5, kRepFloat32 = 1 << 5,
kRepTagged = 1 << 6, kRepFloat64 = 1 << 6,
kRepTagged = 1 << 7,
// Types. // Types.
kTypeBool = 1 << 7, kTypeBool = 1 << 8,
kTypeInt32 = 1 << 8, kTypeInt32 = 1 << 9,
kTypeUint32 = 1 << 9, kTypeUint32 = 1 << 10,
kTypeInt64 = 1 << 10, kTypeInt64 = 1 << 11,
kTypeUint64 = 1 << 11, kTypeUint64 = 1 << 12,
kTypeNumber = 1 << 12, kTypeNumber = 1 << 13,
kTypeAny = 1 << 13 kTypeAny = 1 << 14
}; };
OStream& operator<<(OStream& os, const MachineType& type); OStream& operator<<(OStream& os, const MachineType& type);
...@@ -42,13 +43,15 @@ typedef uint16_t MachineTypeUnion; ...@@ -42,13 +43,15 @@ typedef uint16_t MachineTypeUnion;
// Globally useful machine types and constants. // Globally useful machine types and constants.
const MachineTypeUnion kRepMask = kRepBit | kRepWord8 | kRepWord16 | const MachineTypeUnion kRepMask = kRepBit | kRepWord8 | kRepWord16 |
kRepWord32 | kRepWord64 | kRepFloat64 | kRepWord32 | kRepWord64 | kRepFloat32 |
kRepTagged; kRepFloat64 | kRepTagged;
const MachineTypeUnion kTypeMask = kTypeBool | kTypeInt32 | kTypeUint32 | const MachineTypeUnion kTypeMask = kTypeBool | kTypeInt32 | kTypeUint32 |
kTypeInt64 | kTypeUint64 | kTypeNumber | kTypeInt64 | kTypeUint64 | kTypeNumber |
kTypeAny; kTypeAny;
const MachineType kMachNone = static_cast<MachineType>(0); const MachineType kMachNone = static_cast<MachineType>(0);
const MachineType kMachFloat32 =
static_cast<MachineType>(kRepFloat32 | kTypeNumber);
const MachineType kMachFloat64 = const MachineType kMachFloat64 =
static_cast<MachineType>(kRepFloat64 | kTypeNumber); static_cast<MachineType>(kRepFloat64 | kTypeNumber);
const MachineType kMachInt8 = static_cast<MachineType>(kRepWord8 | kTypeInt32); const MachineType kMachInt8 = static_cast<MachineType>(kRepWord8 | kTypeInt32);
...@@ -92,6 +95,7 @@ inline int ElementSizeOf(MachineType machine_type) { ...@@ -92,6 +95,7 @@ inline int ElementSizeOf(MachineType machine_type) {
case kRepWord16: case kRepWord16:
return 2; return 2;
case kRepWord32: case kRepWord32:
case kRepFloat32:
return 4; return 4;
case kRepWord64: case kRepWord64:
case kRepFloat64: case kRepFloat64:
......
...@@ -54,10 +54,10 @@ class RepresentationSelector { ...@@ -54,10 +54,10 @@ class RepresentationSelector {
public: public:
// Information for each node tracked during the fixpoint. // Information for each node tracked during the fixpoint.
struct NodeInfo { struct NodeInfo {
MachineTypeUnion use : 14; // Union of all usages for the node. MachineTypeUnion use : 15; // Union of all usages for the node.
bool queued : 1; // Bookkeeping for the traversal. bool queued : 1; // Bookkeeping for the traversal.
bool visited : 1; // Bookkeeping for the traversal. bool visited : 1; // Bookkeeping for the traversal.
MachineTypeUnion output : 14; // Output type of the node. MachineTypeUnion output : 15; // Output type of the node.
}; };
RepresentationSelector(JSGraph* jsgraph, Zone* zone, RepresentationSelector(JSGraph* jsgraph, Zone* zone,
......
...@@ -514,15 +514,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -514,15 +514,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break; break;
} }
case kX64Movsd:
if (instr->HasOutput()) {
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
case kX64Movsxbl: case kX64Movsxbl:
__ movsxbl(i.OutputRegister(), i.MemoryOperand()); __ movsxbl(i.OutputRegister(), i.MemoryOperand());
break; break;
...@@ -599,6 +590,26 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -599,6 +590,26 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
} }
break; break;
case kX64Movss:
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
__ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
__ movss(operand, xmm0);
}
break;
case kX64Movsd:
if (instr->HasOutput()) {
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
case kX64StoreWriteBarrier: { case kX64StoreWriteBarrier: {
Register object = i.InputRegister(0); Register object = i.InputRegister(0);
Register index = i.InputRegister(1); Register index = i.InputRegister(1);
......
...@@ -60,7 +60,6 @@ namespace compiler { ...@@ -60,7 +60,6 @@ namespace compiler {
V(SSEFloat64ToUint32) \ V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \ V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \ V(SSEUint32ToFloat64) \
V(X64Movsd) \
V(X64Movsxbl) \ V(X64Movsxbl) \
V(X64Movzxbl) \ V(X64Movzxbl) \
V(X64Movb) \ V(X64Movb) \
...@@ -70,6 +69,8 @@ namespace compiler { ...@@ -70,6 +69,8 @@ namespace compiler {
V(X64Movl) \ V(X64Movl) \
V(X64Movsxlq) \ V(X64Movsxlq) \
V(X64Movq) \ V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
V(X64StoreWriteBarrier) V(X64StoreWriteBarrier)
......
...@@ -62,12 +62,15 @@ void InstructionSelector::VisitLoad(Node* node) { ...@@ -62,12 +62,15 @@ void InstructionSelector::VisitLoad(Node* node) {
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
InstructionOperand* output = rep == kRepFloat64 InstructionOperand* output = (rep == kRepFloat32 || rep == kRepFloat64)
? g.DefineAsDoubleRegister(node) ? g.DefineAsDoubleRegister(node)
: g.DefineAsRegister(node); : g.DefineAsRegister(node);
ArchOpcode opcode; ArchOpcode opcode;
// TODO(titzer): signed/unsigned small loads // TODO(titzer): signed/unsigned small loads
switch (rep) { switch (rep) {
case kRepFloat32:
opcode = kX64Movss;
break;
case kRepFloat64: case kRepFloat64:
opcode = kX64Movsd; opcode = kX64Movsd;
break; break;
...@@ -125,7 +128,7 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -125,7 +128,7 @@ void InstructionSelector::VisitStore(Node* node) {
} }
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind); DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
InstructionOperand* val; InstructionOperand* val;
if (rep == kRepFloat64) { if (rep == kRepFloat32 || rep == kRepFloat64) {
val = g.UseDoubleRegister(value); val = g.UseDoubleRegister(value);
} else { } else {
if (g.CanBeImmediate(value)) { if (g.CanBeImmediate(value)) {
...@@ -138,6 +141,9 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -138,6 +141,9 @@ void InstructionSelector::VisitStore(Node* node) {
} }
ArchOpcode opcode; ArchOpcode opcode;
switch (rep) { switch (rep) {
case kRepFloat32:
opcode = kX64Movss;
break;
case kRepFloat64: case kRepFloat64:
opcode = kX64Movsd; opcode = kX64Movsd;
break; break;
......
...@@ -2751,6 +2751,7 @@ TEST(RunLoadStore) { ...@@ -2751,6 +2751,7 @@ TEST(RunLoadStore) {
RunLoadStore<int32_t>(kMachInt32); RunLoadStore<int32_t>(kMachInt32);
RunLoadStore<uint32_t>(kMachUint32); RunLoadStore<uint32_t>(kMachUint32);
RunLoadStore<void*>(kMachAnyTagged); RunLoadStore<void*>(kMachAnyTagged);
RunLoadStore<float>(kMachFloat32);
RunLoadStore<double>(kMachFloat64); RunLoadStore<double>(kMachFloat64);
} }
......
...@@ -1197,6 +1197,8 @@ struct MemoryAccess { ...@@ -1197,6 +1197,8 @@ struct MemoryAccess {
MachineType type; MachineType type;
ArchOpcode ldr_opcode; ArchOpcode ldr_opcode;
ArchOpcode str_opcode; ArchOpcode str_opcode;
bool (InstructionSelectorTest::Stream::*val_predicate)(
const InstructionOperand*) const;
const int32_t immediates[40]; const int32_t immediates[40];
}; };
...@@ -1212,36 +1214,49 @@ static const MemoryAccess kMemoryAccesses[] = { ...@@ -1212,36 +1214,49 @@ static const MemoryAccess kMemoryAccesses[] = {
{kMachInt8, {kMachInt8,
kArmLdrsb, kArmLdrsb,
kArmStrb, kArmStrb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89, {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109, -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachUint8, {kMachUint8,
kArmLdrb, kArmLdrb,
kArmStrb, kArmStrb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434, {-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
-127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28, -127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}}, 39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
{kMachInt16, {kMachInt16,
kArmLdrsh, kArmLdrsh,
kArmStrh, kArmStrh,
&InstructionSelectorTest::Stream::IsInteger,
{-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101, {-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
-98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98, -98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}}, 102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
{kMachUint16, {kMachUint16,
kArmLdrh, kArmLdrh,
kArmStrh, kArmStrh,
&InstructionSelectorTest::Stream::IsInteger,
{-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41, {-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
-32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109, -32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
114, 116, 120, 138, 150, 161, 166, 172, 228, 255}}, 114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
{kMachInt32, {kMachInt32,
kArmLdr, kArmLdr,
kArmStr, kArmStr,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92, {-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
-80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88, -80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}}, 93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
{kMachFloat32,
kArmVldr32,
kArmVstr32,
&InstructionSelectorTest::Stream::IsDouble,
{-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
-84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
{kMachFloat64, {kMachFloat64,
kArmVldr64, kArmVldr64,
kArmVstr64, kArmVstr64,
&InstructionSelectorTest::Stream::IsDouble,
{-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104, {-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
-96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100, -96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}}; 108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}};
...@@ -1262,7 +1277,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) { ...@@ -1262,7 +1277,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode()); EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode()); EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount()); EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
} }
...@@ -1278,7 +1294,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) { ...@@ -1278,7 +1294,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
ASSERT_EQ(2U, s[0]->InputCount()); ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
} }
} }
......
...@@ -330,6 +330,7 @@ static const MemoryAccess kMemoryAccesses[] = { ...@@ -330,6 +330,7 @@ static const MemoryAccess kMemoryAccesses[] = {
{kMachUint32, kArm64LdrW, kArm64StrW}, {kMachUint32, kArm64LdrW, kArm64StrW},
{kMachInt64, kArm64Ldr, kArm64Str}, {kMachInt64, kArm64Ldr, kArm64Str},
{kMachUint64, kArm64Ldr, kArm64Str}, {kMachUint64, kArm64Ldr, kArm64Str},
{kMachFloat32, kArm64LdrS, kArm64StrS},
{kMachFloat64, kArm64LdrD, kArm64StrD}}; {kMachFloat64, kArm64LdrD, kArm64StrD}};
......
...@@ -100,6 +100,7 @@ static const MemoryAccess kMemoryAccesses[] = { ...@@ -100,6 +100,7 @@ static const MemoryAccess kMemoryAccesses[] = {
{kMachUint16, kIA32Movzxwl, kIA32Movw}, {kMachUint16, kIA32Movzxwl, kIA32Movw},
{kMachInt32, kIA32Movl, kIA32Movl}, {kMachInt32, kIA32Movl, kIA32Movl},
{kMachUint32, kIA32Movl, kIA32Movl}, {kMachUint32, kIA32Movl, kIA32Movl},
{kMachFloat32, kIA32Movss, kIA32Movss},
{kMachFloat64, kIA32Movsd, kIA32Movsd}}; {kMachFloat64, kIA32Movsd, kIA32Movsd}};
} // namespace } // namespace
......
...@@ -186,18 +186,15 @@ TARGET_TEST_P(InstructionSelectorFinishTest, Parameter) { ...@@ -186,18 +186,15 @@ TARGET_TEST_P(InstructionSelectorFinishTest, Parameter) {
EXPECT_EQ(kArchNop, s[0]->arch_opcode()); EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
ASSERT_TRUE(s[0]->Output()->IsUnallocated()); ASSERT_TRUE(s[0]->Output()->IsUnallocated());
EXPECT_EQ(param->id(), EXPECT_EQ(param->id(), s.ToVreg(s[0]->Output()));
UnallocatedOperand::cast(s[0]->Output())->virtual_register());
EXPECT_EQ(kArchNop, s[1]->arch_opcode()); EXPECT_EQ(kArchNop, s[1]->arch_opcode());
ASSERT_EQ(1U, s[1]->InputCount()); ASSERT_EQ(1U, s[1]->InputCount());
ASSERT_TRUE(s[1]->InputAt(0)->IsUnallocated()); ASSERT_TRUE(s[1]->InputAt(0)->IsUnallocated());
EXPECT_EQ(param->id(), EXPECT_EQ(param->id(), s.ToVreg(s[1]->InputAt(0)));
UnallocatedOperand::cast(s[1]->InputAt(0))->virtual_register());
ASSERT_EQ(1U, s[1]->OutputCount()); ASSERT_EQ(1U, s[1]->OutputCount());
ASSERT_TRUE(s[1]->Output()->IsUnallocated()); ASSERT_TRUE(s[1]->Output()->IsUnallocated());
EXPECT_TRUE(UnallocatedOperand::cast(s[1]->Output())->HasSameAsInputPolicy()); EXPECT_TRUE(UnallocatedOperand::cast(s[1]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(finish->id(), EXPECT_EQ(finish->id(), s.ToVreg(s[1]->Output()));
UnallocatedOperand::cast(s[1]->Output())->virtual_register());
} }
......
...@@ -24,7 +24,6 @@ class InstructionSelectorTest : public CompilerTest { ...@@ -24,7 +24,6 @@ class InstructionSelectorTest : public CompilerTest {
base::RandomNumberGenerator* rng() { return &rng_; } base::RandomNumberGenerator* rng() { return &rng_; }
protected:
class Stream; class Stream;
enum StreamBuilderMode { kAllInstructions, kTargetInstructions }; enum StreamBuilderMode { kAllInstructions, kTargetInstructions };
...@@ -116,10 +115,23 @@ class InstructionSelectorTest : public CompilerTest { ...@@ -116,10 +115,23 @@ class InstructionSelectorTest : public CompilerTest {
return instructions_[index]; return instructions_[index];
} }
bool IsDouble(const InstructionOperand* operand) const {
return IsDouble(ToVreg(operand));
}
bool IsDouble(int virtual_register) const { bool IsDouble(int virtual_register) const {
return doubles_.find(virtual_register) != doubles_.end(); return doubles_.find(virtual_register) != doubles_.end();
} }
bool IsInteger(const InstructionOperand* operand) const {
return IsInteger(ToVreg(operand));
}
bool IsInteger(int virtual_register) const {
return !IsDouble(virtual_register) && !IsReference(virtual_register);
}
bool IsReference(const InstructionOperand* operand) const {
return IsReference(ToVreg(operand));
}
bool IsReference(int virtual_register) const { bool IsReference(int virtual_register) const {
return references_.find(virtual_register) != references_.end(); return references_.find(virtual_register) != references_.end();
} }
...@@ -129,6 +141,7 @@ class InstructionSelectorTest : public CompilerTest { ...@@ -129,6 +141,7 @@ class InstructionSelectorTest : public CompilerTest {
} }
int ToVreg(const InstructionOperand* operand) const { int ToVreg(const InstructionOperand* operand) const {
if (operand->IsConstant()) return operand->index();
EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind()); EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind());
return UnallocatedOperand::cast(operand)->virtual_register(); return UnallocatedOperand::cast(operand)->virtual_register();
} }
......
...@@ -67,6 +67,7 @@ static const MemoryAccess kMemoryAccesses[] = { ...@@ -67,6 +67,7 @@ static const MemoryAccess kMemoryAccesses[] = {
{kMachUint32, kX64Movl, kX64Movl}, {kMachUint32, kX64Movl, kX64Movl},
{kMachInt64, kX64Movq, kX64Movq}, {kMachInt64, kX64Movq, kX64Movq},
{kMachUint64, kX64Movq, kX64Movq}, {kMachUint64, kX64Movq, kX64Movq},
{kMachFloat32, kX64Movss, kX64Movss},
{kMachFloat64, kX64Movsd, kX64Movsd}}; {kMachFloat64, kX64Movsd, kX64Movsd}};
} // namespace } // namespace
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment