Commit 66dd8699 authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

[turbofan] Add backend support for signed loads.

Also rename the arch opcodes to match their native counterparts.

TEST=compiler-unittests,cctest
R=jarin@chromium.org

Review URL: https://codereview.chromium.org/505713002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23345 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e6a8544a
......@@ -377,42 +377,49 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmLoadWord8:
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmStoreWord8: {
case kArmLdrsb:
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmStrb: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
__ strb(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmLoadWord16:
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
break;
case kArmStoreWord16: {
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
break;
case kArmStrh: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
__ strh(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmLoadWord32:
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
break;
case kArmStoreWord32: {
case kArmStr: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
__ str(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmFloat64Load:
case kArmVldr64:
__ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmFloat64Store: {
case kArmVstr64: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
__ vstr(i.InputDoubleRegister(index), operand);
......
......@@ -50,14 +50,16 @@ namespace compiler {
V(ArmVcvtF64U32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
V(ArmFloat64Load) \
V(ArmFloat64Store) \
V(ArmLoadWord8) \
V(ArmStoreWord8) \
V(ArmLoadWord16) \
V(ArmStoreWord16) \
V(ArmLoadWord32) \
V(ArmStoreWord32) \
V(ArmVldr64) \
V(ArmVstr64) \
V(ArmLdrb) \
V(ArmLdrsb) \
V(ArmStrb) \
V(ArmLdrh) \
V(ArmLdrsh) \
V(ArmStrh) \
V(ArmLdr) \
V(ArmStr) \
V(ArmStoreWriteBarrier)
......
......@@ -55,19 +55,21 @@ class ArmOperandGenerator V8_FINAL : public OperandGenerator {
case kArmRsb:
return ImmediateFitsAddrMode1Instruction(value);
case kArmFloat64Load:
case kArmFloat64Store:
case kArmVldr64:
case kArmVstr64:
return value >= -1020 && value <= 1020 && (value % 4) == 0;
case kArmLoadWord8:
case kArmStoreWord8:
case kArmLoadWord32:
case kArmStoreWord32:
case kArmLdrb:
case kArmLdrsb:
case kArmStrb:
case kArmLdr:
case kArmStr:
case kArmStoreWriteBarrier:
return value >= -4095 && value <= 4095;
case kArmLoadWord16:
case kArmStoreWord16:
case kArmLdrh:
case kArmLdrsh:
case kArmStrh:
return value >= -255 && value <= 255;
case kArchJmp:
......@@ -287,6 +289,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
......@@ -296,21 +299,20 @@ void InstructionSelector::VisitLoad(Node* node) {
: g.DefineAsRegister(node);
ArchOpcode opcode;
// TODO(titzer): signed/unsigned small loads
switch (rep) {
case kRepFloat64:
opcode = kArmFloat64Load;
opcode = kArmVldr64;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = kArmLoadWord8;
opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
break;
case kRepWord16:
opcode = kArmLoadWord16;
opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
break;
case kRepTagged: // Fall through.
case kRepWord32:
opcode = kArmLoadWord32;
opcode = kArmLdr;
break;
default:
UNREACHABLE();
......@@ -320,9 +322,6 @@ void InstructionSelector::VisitLoad(Node* node) {
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
g.UseRegister(base), g.UseImmediate(index));
} else if (g.CanBeImmediate(base, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
g.UseRegister(index), g.UseImmediate(base));
} else {
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), result,
g.UseRegister(base), g.UseRegister(index));
......@@ -356,18 +355,18 @@ void InstructionSelector::VisitStore(Node* node) {
ArchOpcode opcode;
switch (rep) {
case kRepFloat64:
opcode = kArmFloat64Store;
opcode = kArmVstr64;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = kArmStoreWord8;
opcode = kArmStrb;
break;
case kRepWord16:
opcode = kArmStoreWord16;
opcode = kArmStrh;
break;
case kRepTagged: // Fall through.
case kRepWord32:
opcode = kArmStoreWord32;
opcode = kArmStr;
break;
default:
UNREACHABLE();
......@@ -377,9 +376,6 @@ void InstructionSelector::VisitStore(Node* node) {
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
g.UseRegister(base), g.UseImmediate(index), val);
} else if (g.CanBeImmediate(base, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
g.UseRegister(index), g.UseImmediate(base), val);
} else {
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
g.UseRegister(base), g.UseRegister(index), val);
......
......@@ -396,34 +396,40 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Uint32ToFloat64:
__ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
case kArm64LoadWord8:
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64StoreWord8:
case kArm64Ldrsb:
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64Strb:
__ Strb(i.InputRegister(2), i.MemoryOperand());
break;
case kArm64LoadWord16:
case kArm64Ldrh:
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64StoreWord16:
case kArm64Ldrsh:
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64Strh:
__ Strh(i.InputRegister(2), i.MemoryOperand());
break;
case kArm64LoadWord32:
case kArm64LdrW:
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
break;
case kArm64StoreWord32:
case kArm64StrW:
__ Str(i.InputRegister32(2), i.MemoryOperand());
break;
case kArm64LoadWord64:
case kArm64Ldr:
__ Ldr(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64StoreWord64:
case kArm64Str:
__ Str(i.InputRegister(2), i.MemoryOperand());
break;
case kArm64Float64Load:
case kArm64LdrD:
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kArm64Float64Store:
case kArm64StrD:
__ Str(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kArm64StoreWriteBarrier: {
......
......@@ -68,16 +68,18 @@ namespace compiler {
V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \
V(Arm64Uint32ToFloat64) \
V(Arm64Float64Load) \
V(Arm64Float64Store) \
V(Arm64LoadWord8) \
V(Arm64StoreWord8) \
V(Arm64LoadWord16) \
V(Arm64StoreWord16) \
V(Arm64LoadWord32) \
V(Arm64StoreWord32) \
V(Arm64LoadWord64) \
V(Arm64StoreWord64) \
V(Arm64LdrD) \
V(Arm64StrD) \
V(Arm64Ldrb) \
V(Arm64Ldrsb) \
V(Arm64Strb) \
V(Arm64Ldrh) \
V(Arm64Ldrsh) \
V(Arm64Strh) \
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
V(Arm64Str) \
V(Arm64StoreWriteBarrier)
......
......@@ -143,6 +143,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
......@@ -155,21 +156,21 @@ void InstructionSelector::VisitLoad(Node* node) {
// TODO(titzer): signed/unsigned small loads
switch (rep) {
case kRepFloat64:
opcode = kArm64Float64Load;
opcode = kArm64LdrD;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = kArm64LoadWord8;
opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
break;
case kRepWord16:
opcode = kArm64LoadWord16;
opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
break;
case kRepWord32:
opcode = kArm64LoadWord32;
opcode = kArm64LdrW;
break;
case kRepTagged: // Fall through.
case kRepWord64:
opcode = kArm64LoadWord64;
opcode = kArm64Ldr;
break;
default:
UNREACHABLE();
......@@ -178,9 +179,6 @@ void InstructionSelector::VisitLoad(Node* node) {
if (g.CanBeImmediate(index, kLoadStoreImm)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
g.UseRegister(base), g.UseImmediate(index));
} else if (g.CanBeImmediate(base, kLoadStoreImm)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
g.UseRegister(index), g.UseImmediate(base));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MRR), result,
g.UseRegister(base), g.UseRegister(index));
......@@ -217,21 +215,21 @@ void InstructionSelector::VisitStore(Node* node) {
ArchOpcode opcode;
switch (rep) {
case kRepFloat64:
opcode = kArm64Float64Store;
opcode = kArm64StrD;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = kArm64StoreWord8;
opcode = kArm64Strb;
break;
case kRepWord16:
opcode = kArm64StoreWord16;
opcode = kArm64Strh;
break;
case kRepWord32:
opcode = kArm64StoreWord32;
opcode = kArm64StrW;
break;
case kRepTagged: // Fall through.
case kRepWord64:
opcode = kArm64StoreWord64;
opcode = kArm64Str;
break;
default:
UNREACHABLE();
......@@ -240,9 +238,6 @@ void InstructionSelector::VisitStore(Node* node) {
if (g.CanBeImmediate(index, kLoadStoreImm)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
g.UseRegister(base), g.UseImmediate(index), val);
} else if (g.CanBeImmediate(base, kLoadStoreImm)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
g.UseRegister(index), g.UseImmediate(base), val);
} else {
Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
g.UseRegister(base), g.UseRegister(index), val);
......
......@@ -335,60 +335,60 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
__ LoadUint32(i.OutputDoubleRegister(), i.InputRegister(0));
break;
case kSSELoad:
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
case kSSEStore: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
break;
}
case kIA32LoadWord8:
case kIA32Movzxbl:
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32StoreWord8: {
case kIA32Movb: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ mov_b(operand, i.InputRegister(index));
if (HasImmediateInput(instr, index)) {
__ mov_b(operand, i.InputInt8(index));
} else {
__ mov_b(operand, i.InputRegister(index));
}
break;
}
case kIA32StoreWord8I: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ mov_b(operand, i.InputInt8(index));
case kIA32Movsxwl:
__ movsx_w(i.OutputRegister(), i.MemoryOperand());
break;
}
case kIA32LoadWord16:
case kIA32Movzxwl:
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32StoreWord16: {
case kIA32Movw: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ mov_w(operand, i.InputRegister(index));
break;
}
case kIA32StoreWord16I: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ mov_w(operand, i.InputInt16(index));
if (HasImmediateInput(instr, index)) {
__ mov_w(operand, i.InputInt16(index));
} else {
__ mov_w(operand, i.InputRegister(index));
}
break;
}
case kIA32LoadWord32:
__ mov(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32StoreWord32: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ mov(operand, i.InputRegister(index));
case kIA32Movl:
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov(operand, i.InputImmediate(index));
} else {
__ mov(operand, i.InputRegister(index));
}
}
break;
}
case kIA32StoreWord32I: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ mov(operand, i.InputImmediate(index));
case kIA32Movsd:
if (instr->HasOutput()) {
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
}
case kIA32StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
......
......@@ -43,17 +43,14 @@ namespace compiler {
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSELoad) \
V(SSEStore) \
V(IA32LoadWord8) \
V(IA32StoreWord8) \
V(IA32StoreWord8I) \
V(IA32LoadWord16) \
V(IA32StoreWord16) \
V(IA32StoreWord16I) \
V(IA32LoadWord32) \
V(IA32StoreWord32) \
V(IA32StoreWord32I) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
V(IA32Movsxwl) \
V(IA32Movzxwl) \
V(IA32Movw) \
V(IA32Movl) \
V(IA32Movsd) \
V(IA32StoreWriteBarrier)
......
......@@ -42,6 +42,7 @@ class IA32OperandGenerator V8_FINAL : public OperandGenerator {
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
......@@ -53,18 +54,18 @@ void InstructionSelector::VisitLoad(Node* node) {
// TODO(titzer): signed/unsigned small loads
switch (rep) {
case kRepFloat64:
opcode = kSSELoad;
opcode = kIA32Movsd;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = kIA32LoadWord8;
opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
break;
case kRepWord16:
opcode = kIA32LoadWord16;
opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
break;
case kRepTagged: // Fall through.
case kRepWord32:
opcode = kIA32LoadWord32;
opcode = kIA32Movl;
break;
default:
UNREACHABLE();
......@@ -109,13 +110,11 @@ void InstructionSelector::VisitStore(Node* node) {
return;
}
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
bool is_immediate = false;
InstructionOperand* val;
if (rep == kRepFloat64) {
val = g.UseDoubleRegister(value);
} else {
is_immediate = g.CanBeImmediate(value);
if (is_immediate) {
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
} else if (rep == kRepWord8 || rep == kRepBit) {
val = g.UseByteRegister(value);
......@@ -126,18 +125,18 @@ void InstructionSelector::VisitStore(Node* node) {
ArchOpcode opcode;
switch (rep) {
case kRepFloat64:
opcode = kSSEStore;
opcode = kIA32Movsd;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = is_immediate ? kIA32StoreWord8I : kIA32StoreWord8;
opcode = kIA32Movb;
break;
case kRepWord16:
opcode = is_immediate ? kIA32StoreWord16I : kIA32StoreWord16;
opcode = kIA32Movw;
break;
case kRepTagged: // Fall through.
case kRepWord32:
opcode = is_immediate ? kIA32StoreWord32I : kIA32StoreWord32;
opcode = kIA32Movl;
break;
default:
UNREACHABLE();
......
......@@ -405,12 +405,14 @@ OStream& operator<<(OStream& os, const PointerMap& pm);
class Instruction : public ZoneObject {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
InstructionOperand* Output() const { return OutputAt(0); }
InstructionOperand* OutputAt(size_t i) const {
DCHECK(i < OutputCount());
return operands_[i];
}
bool HasOutput() const { return OutputCount() == 1; }
InstructionOperand* Output() const { return OutputAt(0); }
size_t InputCount() const { return InputCountField::decode(bit_field_); }
InstructionOperand* InputAt(size_t i) const {
DCHECK(i < InputCount());
......
......@@ -11,11 +11,12 @@ namespace compiler {
#define PRINT(bit) \
if (type & bit) { \
if (before) os << "+"; \
if (before) os << "|"; \
os << #bit; \
before = true; \
}
OStream& operator<<(OStream& os, const MachineType& type) {
bool before = false;
PRINT(kRepBit);
......@@ -35,6 +36,10 @@ OStream& operator<<(OStream& os, const MachineType& type) {
PRINT(kTypeAny);
return os;
}
}
}
} // namespace v8::internal::compiler
#undef PRINT
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -70,6 +70,12 @@ const MachineType kMachPtr = kPointerSize == 4 ? kRepWord32 : kRepWord64;
const MachineType kMachAnyTagged =
static_cast<MachineType>(kRepTagged | kTypeAny);
// Gets only the type of the given type.
inline MachineType TypeOf(MachineType machine_type) {
int result = machine_type & kTypeMask;
return static_cast<MachineType>(result);
}
// Gets only the representation of the given type.
inline MachineType RepresentationOf(MachineType machine_type) {
int result = machine_type & kRepMask;
......@@ -97,8 +103,9 @@ inline int ElementSizeOf(MachineType machine_type) {
return kPointerSize;
}
}
}
}
} // namespace v8::internal::compiler
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_MACHINE_TYPE_H_
......@@ -391,24 +391,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64PushI:
__ pushq(i.InputImmediate(0));
break;
case kX64Movl: {
RegisterOrOperand input = i.InputRegisterOrOperand(0);
if (input.type == kRegister) {
__ movl(i.OutputRegister(), input.reg);
} else {
__ movl(i.OutputRegister(), input.operand);
}
break;
}
case kX64Movsxlq: {
RegisterOrOperand input = i.InputRegisterOrOperand(0);
if (input.type == kRegister) {
__ movsxlq(i.OutputRegister(), input.reg);
} else {
__ movsxlq(i.OutputRegister(), input.operand);
}
break;
}
case kX64CallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
......@@ -532,75 +514,91 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kSSELoad:
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
case kX64Movsd:
if (instr->HasOutput()) {
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
case kSSEStore: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
case kX64Movsxbl:
__ movsxbl(i.OutputRegister(), i.MemoryOperand());
break;
}
case kX64LoadWord8:
case kX64Movzxbl:
__ movzxbl(i.OutputRegister(), i.MemoryOperand());
break;
case kX64StoreWord8: {
case kX64Movb: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movb(operand, i.InputRegister(index));
if (HasImmediateInput(instr, index)) {
__ movb(operand, Immediate(i.InputInt8(index)));
} else {
__ movb(operand, i.InputRegister(index));
}
break;
}
case kX64StoreWord8I: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movb(operand, Immediate(i.InputInt8(index)));
case kX64Movsxwl:
__ movsxwl(i.OutputRegister(), i.MemoryOperand());
break;
}
case kX64LoadWord16:
case kX64Movzxwl:
__ movzxwl(i.OutputRegister(), i.MemoryOperand());
break;
case kX64StoreWord16: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movw(operand, i.InputRegister(index));
break;
}
case kX64StoreWord16I: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movw(operand, Immediate(i.InputInt16(index)));
break;
}
case kX64LoadWord32:
__ movl(i.OutputRegister(), i.MemoryOperand());
break;
case kX64StoreWord32: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movl(operand, i.InputRegister(index));
break;
}
case kX64StoreWord32I: {
case kX64Movw: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movl(operand, i.InputImmediate(index));
if (HasImmediateInput(instr, index)) {
__ movw(operand, Immediate(i.InputInt16(index)));
} else {
__ movw(operand, i.InputRegister(index));
}
break;
}
case kX64LoadWord64:
__ movq(i.OutputRegister(), i.MemoryOperand());
case kX64Movl:
if (instr->HasOutput()) {
if (instr->addressing_mode() == kMode_None) {
RegisterOrOperand input = i.InputRegisterOrOperand(0);
if (input.type == kRegister) {
__ movl(i.OutputRegister(), input.reg);
} else {
__ movl(i.OutputRegister(), input.operand);
}
} else {
__ movl(i.OutputRegister(), i.MemoryOperand());
}
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movl(operand, i.InputImmediate(index));
} else {
__ movl(operand, i.InputRegister(index));
}
}
break;
case kX64StoreWord64: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movq(operand, i.InputRegister(index));
case kX64Movsxlq: {
RegisterOrOperand input = i.InputRegisterOrOperand(0);
if (input.type == kRegister) {
__ movsxlq(i.OutputRegister(), input.reg);
} else {
__ movsxlq(i.OutputRegister(), input.operand);
}
break;
}
case kX64StoreWord64I: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
__ movq(operand, i.InputImmediate(index));
case kX64Movq:
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
} else {
int index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movq(operand, i.InputImmediate(index));
} else {
__ movq(operand, i.InputRegister(index));
}
}
break;
}
case kX64StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
......
......@@ -46,8 +46,6 @@ namespace compiler {
V(X64Ror32) \
V(X64Push) \
V(X64PushI) \
V(X64Movsxlq) \
V(X64Movl) \
V(X64CallCodeObject) \
V(X64CallAddress) \
V(PopStack) \
......@@ -62,20 +60,16 @@ namespace compiler {
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSELoad) \
V(SSEStore) \
V(X64LoadWord8) \
V(X64StoreWord8) \
V(X64StoreWord8I) \
V(X64LoadWord16) \
V(X64StoreWord16) \
V(X64StoreWord16I) \
V(X64LoadWord32) \
V(X64StoreWord32) \
V(X64StoreWord32I) \
V(X64LoadWord64) \
V(X64StoreWord64) \
V(X64StoreWord64I) \
V(X64Movsd) \
V(X64Movsxbl) \
V(X64Movzxbl) \
V(X64Movb) \
V(X64Movsxwl) \
V(X64Movzxwl) \
V(X64Movw) \
V(X64Movl) \
V(X64Movsxlq) \
V(X64Movq) \
V(X64StoreWriteBarrier)
......
......@@ -57,6 +57,7 @@ class X64OperandGenerator V8_FINAL : public OperandGenerator {
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
......@@ -68,21 +69,21 @@ void InstructionSelector::VisitLoad(Node* node) {
// TODO(titzer): signed/unsigned small loads
switch (rep) {
case kRepFloat64:
opcode = kSSELoad;
opcode = kX64Movsd;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = kX64LoadWord8;
opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
break;
case kRepWord16:
opcode = kX64LoadWord16;
opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
break;
case kRepWord32:
opcode = kX64LoadWord32;
opcode = kX64Movl;
break;
case kRepTagged: // Fall through.
case kRepWord64:
opcode = kX64LoadWord64;
opcode = kX64Movq;
break;
default:
UNREACHABLE();
......@@ -123,13 +124,11 @@ void InstructionSelector::VisitStore(Node* node) {
return;
}
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
bool is_immediate = false;
InstructionOperand* val;
if (rep == kRepFloat64) {
val = g.UseDoubleRegister(value);
} else {
is_immediate = g.CanBeImmediate(value);
if (is_immediate) {
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
} else if (rep == kRepWord8 || rep == kRepBit) {
val = g.UseByteRegister(value);
......@@ -140,21 +139,21 @@ void InstructionSelector::VisitStore(Node* node) {
ArchOpcode opcode;
switch (rep) {
case kRepFloat64:
opcode = kSSEStore;
opcode = kX64Movsd;
break;
case kRepBit: // Fall through.
case kRepWord8:
opcode = is_immediate ? kX64StoreWord8I : kX64StoreWord8;
opcode = kX64Movb;
break;
case kRepWord16:
opcode = is_immediate ? kX64StoreWord16I : kX64StoreWord16;
opcode = kX64Movw;
break;
case kRepWord32:
opcode = is_immediate ? kX64StoreWord32I : kX64StoreWord32;
opcode = kX64Movl;
break;
case kRepTagged: // Fall through.
case kRepWord64:
opcode = is_immediate ? kX64StoreWord64I : kX64StoreWord64;
opcode = kX64Movq;
break;
default:
UNREACHABLE();
......
......@@ -85,12 +85,24 @@ struct ReturnValueTraits<int16_t> {
static MachineType Representation() { return kMachInt16; }
};
template <>
struct ReturnValueTraits<uint16_t> {
static uint16_t Cast(uintptr_t r) { return static_cast<uint16_t>(r); }
static MachineType Representation() { return kMachUint16; }
};
template <>
struct ReturnValueTraits<int8_t> {
static int8_t Cast(uintptr_t r) { return static_cast<int8_t>(r); }
static MachineType Representation() { return kMachInt8; }
};
template <>
struct ReturnValueTraits<uint8_t> {
static uint8_t Cast(uintptr_t r) { return static_cast<uint8_t>(r); }
static MachineType Representation() { return kMachUint8; }
};
template <>
struct ReturnValueTraits<double> {
static double Cast(uintptr_t r) {
......
......@@ -2672,10 +2672,10 @@ TEST(RunDeadInt32Binops) {
}
template <typename Type, typename CType>
template <typename Type>
static void RunLoadImmIndex(MachineType rep) {
const int kNumElems = 3;
CType buffer[kNumElems];
Type buffer[kNumElems];
// initialize the buffer with raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
......@@ -2692,19 +2692,21 @@ static void RunLoadImmIndex(MachineType rep) {
m.Return(m.Load(rep, base, index));
Type expected = buffer[i];
Type actual = static_cast<CType>(m.Call());
CHECK_EQ(expected, actual);
printf("XXX\n");
Type actual = m.Call();
CHECK(expected == actual);
}
}
}
TEST(RunLoadImmIndex) {
RunLoadImmIndex<int8_t, uint8_t>(kMachInt8);
RunLoadImmIndex<int16_t, uint16_t>(kMachInt16);
RunLoadImmIndex<int32_t, uint32_t>(kMachInt32);
RunLoadImmIndex<int32_t*, int32_t*>(kMachAnyTagged);
RunLoadImmIndex<int8_t>(kMachInt8);
RunLoadImmIndex<uint8_t>(kMachUint8);
RunLoadImmIndex<int16_t>(kMachInt16);
RunLoadImmIndex<uint16_t>(kMachUint16);
RunLoadImmIndex<int32_t>(kMachInt32);
RunLoadImmIndex<uint32_t>(kMachUint32);
RunLoadImmIndex<int32_t*>(kMachAnyTagged);
// TODO(titzer): test kRepBit loads
// TODO(titzer): test kMachFloat64 loads
......@@ -2734,17 +2736,20 @@ static void RunLoadStore(MachineType rep) {
m.Store(rep, base, index1, load);
m.Return(m.Int32Constant(OK));
CHECK_NE(buffer[x], buffer[y]);
CHECK(buffer[x] != buffer[y]);
CHECK_EQ(OK, m.Call());
CHECK_EQ(buffer[x], buffer[y]);
CHECK(buffer[x] == buffer[y]);
}
}
TEST(RunLoadStore) {
RunLoadStore<int8_t>(kMachInt8);
RunLoadStore<uint8_t>(kMachUint8);
RunLoadStore<int16_t>(kMachInt16);
RunLoadStore<uint16_t>(kMachUint16);
RunLoadStore<int32_t>(kMachInt32);
RunLoadStore<uint32_t>(kMachUint32);
RunLoadStore<void*>(kMachAnyTagged);
RunLoadStore<double>(kMachFloat64);
}
......@@ -3792,15 +3797,15 @@ static void LoadStoreTruncation() {
// Test lower bound.
input = min;
CHECK_EQ(max + 2, m.Call());
CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
CHECK_EQ(min + 1, input);
// Test all one byte values that are not one byte bounds.
for (int i = -127; i < 127; i++) {
input = i;
int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
CHECK_EQ(expected, m.Call());
CHECK_EQ(i + 1, input);
CHECK_EQ(static_cast<IntType>(expected), m.Call());
CHECK_EQ(static_cast<IntType>(i + 1), input);
}
}
......
......@@ -1187,6 +1187,140 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
::testing::ValuesIn(kShifts));
// -----------------------------------------------------------------------------
// Memory access instructions.
namespace {
struct MemoryAccess {
MachineType type;
ArchOpcode ldr_opcode;
ArchOpcode str_opcode;
const int32_t immediates[40];
};
std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
OStringStream ost;
ost << memacc.type;
return os << ost.c_str();
}
static const MemoryAccess kMemoryAccesses[] = {
{kMachInt8,
kArmLdrsb,
kArmStrb,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachUint8,
kArmLdrb,
kArmStrb,
{-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
-127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
{kMachInt16,
kArmLdrsh,
kArmStrh,
{-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
-98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
{kMachUint16,
kArmLdrh,
kArmStrh,
{-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
-32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
{kMachInt32,
kArmLdr,
kArmStr,
{-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
-80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
{kMachFloat64,
kArmVldr64,
kArmVstr64,
{-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
-96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}};
} // namespace
typedef InstructionSelectorTestWithParam<MemoryAccess>
InstructionSelectorMemoryAccessTest;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, memacc.type, kMachPtr);
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(0U, s[0]->OutputCount());
}
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
// -----------------------------------------------------------------------------
// Miscellaneous.
......
......@@ -298,6 +298,76 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
EXPECT_EQ(kArm64Mov32, s[0]->arch_opcode());
}
// -----------------------------------------------------------------------------
// Memory access instructions.
namespace {
struct MemoryAccess {
MachineType type;
ArchOpcode ldr_opcode;
ArchOpcode str_opcode;
};
std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
OStringStream ost;
ost << memacc.type;
return os << ost.c_str();
}
} // namespace
static const MemoryAccess kMemoryAccesses[] = {
{kMachInt8, kArm64Ldrsb, kArm64Strb},
{kMachUint8, kArm64Ldrb, kArm64Strb},
{kMachInt16, kArm64Ldrsh, kArm64Strh},
{kMachUint16, kArm64Ldrh, kArm64Strh},
{kMachInt32, kArm64LdrW, kArm64StrW},
{kMachUint32, kArm64LdrW, kArm64StrW},
{kMachInt64, kArm64Ldr, kArm64Str},
{kMachUint64, kArm64Ldr, kArm64Str},
{kMachFloat64, kArm64LdrD, kArm64StrD}};
typedef InstructionSelectorTestWithParam<MemoryAccess>
InstructionSelectorMemoryAccessTest;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(0U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -73,6 +73,138 @@ TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
}
}
// -----------------------------------------------------------------------------
// Loads and stores
namespace {
struct MemoryAccess {
MachineType type;
ArchOpcode load_opcode;
ArchOpcode store_opcode;
};
std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
OStringStream ost;
ost << memacc.type;
return os << ost.c_str();
}
static const MemoryAccess kMemoryAccesses[] = {
{kMachInt8, kIA32Movsxbl, kIA32Movb},
{kMachUint8, kIA32Movzxbl, kIA32Movb},
{kMachInt16, kIA32Movsxwl, kIA32Movw},
{kMachUint16, kIA32Movzxwl, kIA32Movw},
{kMachInt32, kIA32Movl, kIA32Movl},
{kMachUint32, kIA32Movl, kIA32Movl},
{kMachFloat64, kIA32Movsd, kIA32Movsd}};
} // namespace
typedef InstructionSelectorTestWithParam<MemoryAccess>
InstructionSelectorMemoryAccessTest;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, base, kImmediates) {
StreamBuilder m(this, memacc.type, kMachPtr);
m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, kImmediates) {
StreamBuilder m(this, memacc.type, kMachPtr);
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(0U, s[0]->OutputCount());
}
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, base, kImmediates) {
StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, kImmediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -38,6 +38,73 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
}
// -----------------------------------------------------------------------------
// Loads and stores
namespace {
struct MemoryAccess {
MachineType type;
ArchOpcode load_opcode;
ArchOpcode store_opcode;
};
std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
OStringStream ost;
ost << memacc.type;
return os << ost.c_str();
}
static const MemoryAccess kMemoryAccesses[] = {
{kMachInt8, kX64Movsxbl, kX64Movb},
{kMachUint8, kX64Movzxbl, kX64Movb},
{kMachInt16, kX64Movsxwl, kX64Movw},
{kMachUint16, kX64Movzxwl, kX64Movw},
{kMachInt32, kX64Movl, kX64Movl},
{kMachUint32, kX64Movl, kX64Movl},
{kMachInt64, kX64Movq, kX64Movq},
{kMachUint64, kX64Movq, kX64Movq},
{kMachFloat64, kX64Movsd, kX64Movsd}};
} // namespace
typedef InstructionSelectorTestWithParam<MemoryAccess>
InstructionSelectorMemoryAccessTest;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(0U, s[0]->OutputCount());
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment