Commit 38e764f7 authored by Weiliang Lin's avatar Weiliang Lin

[x86] Introduce vandps/vandpd/vxorps/vxorpd.

R=bmeurer@chromium.org

Review URL: https://codereview.chromium.org/1072343002

Cr-Commit-Position: refs/heads/master@{#27768}
parent eef2b9b0
...@@ -480,7 +480,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -480,7 +480,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break; break;
case kSSEFloat32Abs: { case kSSEFloat32Abs: {
// TODO(bmeurer): Use 128-bit constants. // TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33); __ psrlq(kScratchDoubleReg, 33);
__ andps(i.OutputDoubleRegister(), kScratchDoubleReg); __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
...@@ -488,7 +487,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -488,7 +487,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
case kSSEFloat32Neg: { case kSSEFloat32Neg: {
// TODO(bmeurer): Use 128-bit constants. // TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31); __ psllq(kScratchDoubleReg, 31);
__ xorps(i.OutputDoubleRegister(), kScratchDoubleReg); __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
...@@ -542,7 +540,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -542,7 +540,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
case kSSEFloat64Abs: { case kSSEFloat64Abs: {
// TODO(bmeurer): Use 128-bit constants. // TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1); __ psrlq(kScratchDoubleReg, 1);
__ andpd(i.OutputDoubleRegister(), kScratchDoubleReg); __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
...@@ -550,7 +547,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -550,7 +547,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
case kSSEFloat64Neg: { case kSSEFloat64Neg: {
// TODO(bmeurer): Use 128-bit constants. // TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63); __ psllq(kScratchDoubleReg, 63);
__ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
...@@ -683,6 +679,38 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -683,6 +679,38 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputOperand(1)); i.InputOperand(1));
break; break;
} }
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
CpuFeatureScope avx_scope(masm(), AVX);
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kAVXFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31);
CpuFeatureScope avx_scope(masm(), AVX);
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kAVXFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
CpuFeatureScope avx_scope(masm(), AVX);
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kAVXFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63);
CpuFeatureScope avx_scope(masm(), AVX);
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kIA32Movsxbl: case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand()); __ movsx_b(i.OutputRegister(), i.MemoryOperand());
break; break;
......
...@@ -76,6 +76,10 @@ namespace compiler { ...@@ -76,6 +76,10 @@ namespace compiler {
V(AVXFloat64Div) \ V(AVXFloat64Div) \
V(AVXFloat64Max) \ V(AVXFloat64Max) \
V(AVXFloat64Min) \ V(AVXFloat64Min) \
V(AVXFloat64Abs) \
V(AVXFloat64Neg) \
V(AVXFloat32Abs) \
V(AVXFloat32Neg) \
V(IA32Movsxbl) \ V(IA32Movsxbl) \
V(IA32Movzxbl) \ V(IA32Movzxbl) \
V(IA32Movb) \ V(IA32Movb) \
......
...@@ -153,6 +153,18 @@ void VisitRROFloat(InstructionSelector* selector, Node* node, ...@@ -153,6 +153,18 @@ void VisitRROFloat(InstructionSelector* selector, Node* node,
} }
} }
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
} else {
selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
}
}
} // namespace } // namespace
...@@ -684,8 +696,8 @@ void InstructionSelector::VisitFloat32Sub(Node* node) { ...@@ -684,8 +696,8 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
IA32OperandGenerator g(this); IA32OperandGenerator g(this);
Float32BinopMatcher m(node); Float32BinopMatcher m(node);
if (m.left().IsMinusZero()) { if (m.left().IsMinusZero()) {
Emit(kSSEFloat32Neg, g.DefineSameAsFirst(node), VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
g.UseRegister(m.right().node())); kSSEFloat32Neg);
return; return;
} }
VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub); VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
...@@ -708,8 +720,8 @@ void InstructionSelector::VisitFloat64Sub(Node* node) { ...@@ -708,8 +720,8 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
} }
} }
} }
Emit(kSSEFloat64Neg, g.DefineSameAsFirst(node), VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
g.UseRegister(m.right().node())); kSSEFloat64Neg);
return; return;
} }
VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub); VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
...@@ -767,13 +779,13 @@ void InstructionSelector::VisitFloat64Min(Node* node) { ...@@ -767,13 +779,13 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
void InstructionSelector::VisitFloat32Abs(Node* node) { void InstructionSelector::VisitFloat32Abs(Node* node) {
IA32OperandGenerator g(this); IA32OperandGenerator g(this);
Emit(kSSEFloat32Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0))); VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
} }
void InstructionSelector::VisitFloat64Abs(Node* node) { void InstructionSelector::VisitFloat64Abs(Node* node) {
IA32OperandGenerator g(this); IA32OperandGenerator g(this);
Emit(kSSEFloat64Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0))); VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
} }
......
...@@ -729,7 +729,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -729,7 +729,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break; break;
case kSSEFloat32Abs: { case kSSEFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants. // TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33); __ psrlq(kScratchDoubleReg, 33);
__ andps(i.OutputDoubleRegister(), kScratchDoubleReg); __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
...@@ -737,7 +736,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -737,7 +736,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
case kSSEFloat32Neg: { case kSSEFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants. // TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31); __ psllq(kScratchDoubleReg, 31);
__ xorps(i.OutputDoubleRegister(), kScratchDoubleReg); __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
...@@ -810,7 +808,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -810,7 +808,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break; break;
case kSSEFloat64Abs: { case kSSEFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants. // TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1); __ psrlq(kScratchDoubleReg, 1);
__ andpd(i.OutputDoubleRegister(), kScratchDoubleReg); __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
...@@ -818,7 +815,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -818,7 +815,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
case kSSEFloat64Neg: { case kSSEFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants. // TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63); __ psllq(kScratchDoubleReg, 63);
__ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
...@@ -957,6 +953,62 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -957,6 +953,62 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kAVXFloat64Min: case kAVXFloat64Min:
ASSEMBLE_AVX_BINOP(vminsd); ASSEMBLE_AVX_BINOP(vminsd);
break; break;
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
CpuFeatureScope avx_scope(masm(), AVX);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputOperand(0));
}
break;
}
case kAVXFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31);
CpuFeatureScope avx_scope(masm(), AVX);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputOperand(0));
}
break;
}
case kAVXFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
CpuFeatureScope avx_scope(masm(), AVX);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputOperand(0));
}
break;
}
case kAVXFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63);
CpuFeatureScope avx_scope(masm(), AVX);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputOperand(0));
}
break;
}
case kX64Movsxbl: case kX64Movsxbl:
ASSEMBLE_MOVX(movsxbl); ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister()); __ AssertZeroExtended(i.OutputRegister());
......
...@@ -94,6 +94,10 @@ namespace compiler { ...@@ -94,6 +94,10 @@ namespace compiler {
V(AVXFloat64Div) \ V(AVXFloat64Div) \
V(AVXFloat64Max) \ V(AVXFloat64Max) \
V(AVXFloat64Min) \ V(AVXFloat64Min) \
V(AVXFloat64Abs) \
V(AVXFloat64Neg) \
V(AVXFloat32Abs) \
V(AVXFloat32Neg) \
V(X64Movsxbl) \ V(X64Movsxbl) \
V(X64Movzxbl) \ V(X64Movzxbl) \
V(X64Movb) \ V(X64Movb) \
......
...@@ -856,6 +856,18 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node, ...@@ -856,6 +856,18 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node,
} }
} }
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
X64OperandGenerator g(selector);
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
} else {
selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
}
}
} // namespace } // namespace
...@@ -868,8 +880,8 @@ void InstructionSelector::VisitFloat32Sub(Node* node) { ...@@ -868,8 +880,8 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Float32BinopMatcher m(node); Float32BinopMatcher m(node);
if (m.left().IsMinusZero()) { if (m.left().IsMinusZero()) {
Emit(kSSEFloat32Neg, g.DefineSameAsFirst(node), VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
g.UseRegister(m.right().node())); kSSEFloat32Neg);
return; return;
} }
VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub); VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
...@@ -898,7 +910,7 @@ void InstructionSelector::VisitFloat32Min(Node* node) { ...@@ -898,7 +910,7 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
void InstructionSelector::VisitFloat32Abs(Node* node) { void InstructionSelector::VisitFloat32Abs(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Emit(kSSEFloat32Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0))); VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
} }
...@@ -929,8 +941,8 @@ void InstructionSelector::VisitFloat64Sub(Node* node) { ...@@ -929,8 +941,8 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
} }
} }
} }
Emit(kSSEFloat64Neg, g.DefineSameAsFirst(node), VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
g.UseRegister(m.right().node())); kSSEFloat64Neg);
return; return;
} }
VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub); VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
...@@ -968,7 +980,7 @@ void InstructionSelector::VisitFloat64Min(Node* node) { ...@@ -968,7 +980,7 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
void InstructionSelector::VisitFloat64Abs(Node* node) { void InstructionSelector::VisitFloat64Abs(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Emit(kSSEFloat64Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0))); VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
} }
......
...@@ -2665,6 +2665,26 @@ void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, ...@@ -2665,6 +2665,26 @@ void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
} }
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, kNone, k0F, kWIG);
EMIT(op);
emit_sse_operand(dst, src2);
}
void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, k66, k0F, kWIG);
EMIT(op);
emit_sse_operand(dst, src2);
}
void Assembler::bmi1(byte op, Register reg, Register vreg, const Operand& rm) { void Assembler::bmi1(byte op, Register reg, Register vreg, const Operand& rm) {
DCHECK(IsEnabled(BMI1)); DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
......
...@@ -1398,6 +1398,30 @@ class Assembler : public AssemblerBase { ...@@ -1398,6 +1398,30 @@ class Assembler : public AssemblerBase {
} }
void rorx(Register dst, const Operand& src, byte imm8); void rorx(Register dst, const Operand& src, byte imm8);
#define PACKED_OP_LIST(V) \
V(and, 0x54) \
V(xor, 0x57)
#define AVX_PACKED_OP_DECLARE(name, opcode) \
void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vps(opcode, dst, src1, Operand(src2)); \
} \
void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
vps(opcode, dst, src1, src2); \
} \
void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vpd(opcode, dst, src1, Operand(src2)); \
} \
void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
vpd(opcode, dst, src1, src2); \
}
PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
// Prefetch src position into cache level. // Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal // non-temporal
......
...@@ -985,6 +985,40 @@ int DisassemblerIA32::AVXInstruction(byte* data) { ...@@ -985,6 +985,40 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
default: default:
UnimplementedInstruction(); UnimplementedInstruction();
} }
} else if (vex_none() && vex_0f()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x54:
AppendToBuffer("vandps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x57:
AppendToBuffer("vxorps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
}
} else if (vex_66() && vex_0f()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x54:
AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x57:
AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
}
} else { } else {
UnimplementedInstruction(); UnimplementedInstruction();
} }
......
...@@ -3510,6 +3510,46 @@ void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1, ...@@ -3510,6 +3510,46 @@ void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
} }
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
void Assembler::vucomiss(XMMRegister dst, XMMRegister src) { void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX)); DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
......
...@@ -1582,6 +1582,30 @@ class Assembler : public AssemblerBase { ...@@ -1582,6 +1582,30 @@ class Assembler : public AssemblerBase {
void rorxl(Register dst, Register src, byte imm8); void rorxl(Register dst, Register src, byte imm8);
void rorxl(Register dst, const Operand& src, byte imm8); void rorxl(Register dst, const Operand& src, byte imm8);
#define PACKED_OP_LIST(V) \
V(and, 0x54) \
V(xor, 0x57)
#define AVX_PACKED_OP_DECLARE(name, opcode) \
void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vps(opcode, dst, src1, src2); \
} \
void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
vps(opcode, dst, src1, src2); \
} \
void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vpd(opcode, dst, src1, src2); \
} \
void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
vpd(opcode, dst, src1, src2); \
}
PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
// Debugging // Debugging
void Print(); void Print();
......
...@@ -876,13 +876,7 @@ int DisassemblerX64::SetCC(byte* data) { ...@@ -876,13 +876,7 @@ int DisassemblerX64::SetCC(byte* data) {
int DisassemblerX64::AVXInstruction(byte* data) { int DisassemblerX64::AVXInstruction(byte* data) {
byte opcode = *data; byte opcode = *data;
byte* current = data + 1; byte* current = data + 1;
if (vex_0f() && opcode == 0x2e) { if (vex_66() && vex_0f38()) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("vucomis%c %s,", vex_66() ? 'd' : 's',
NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (vex_66() && vex_0f38()) {
int mod, regop, rm, vvvv = vex_vreg(); int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm); get_modrm(*current, &mod, &regop, &rm);
switch (opcode) { switch (opcode) {
...@@ -1137,6 +1131,49 @@ int DisassemblerX64::AVXInstruction(byte* data) { ...@@ -1137,6 +1131,49 @@ int DisassemblerX64::AVXInstruction(byte* data) {
default: default:
UnimplementedInstruction(); UnimplementedInstruction();
} }
} else if (vex_none() && vex_0f()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x2e:
AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
case 0x54:
AppendToBuffer("vandps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x57:
AppendToBuffer("vxorps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
}
} else if (vex_66() && vex_0f()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x2e:
AppendToBuffer("vucomisd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
case 0x54:
AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x57:
AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
}
} else { } else {
UnimplementedInstruction(); UnimplementedInstruction();
} }
......
...@@ -525,6 +525,16 @@ TEST(DisasmIa320) { ...@@ -525,6 +525,16 @@ TEST(DisasmIa320) {
__ vminss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000)); __ vminss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vmaxss(xmm0, xmm1, xmm2); __ vmaxss(xmm0, xmm1, xmm2);
__ vmaxss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000)); __ vmaxss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vandps(xmm0, xmm1, xmm2);
__ vandps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vxorps(xmm0, xmm1, xmm2);
__ vxorps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vandpd(xmm0, xmm1, xmm2);
__ vandpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vxorpd(xmm0, xmm1, xmm2);
__ vxorpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
} }
} }
......
...@@ -525,6 +525,16 @@ TEST(DisasmX64) { ...@@ -525,6 +525,16 @@ TEST(DisasmX64) {
__ vmaxsd(xmm9, xmm1, Operand(rbx, rcx, times_1, 10000)); __ vmaxsd(xmm9, xmm1, Operand(rbx, rcx, times_1, 10000));
__ vucomisd(xmm9, xmm1); __ vucomisd(xmm9, xmm1);
__ vucomisd(xmm8, Operand(rbx, rdx, times_2, 10981)); __ vucomisd(xmm8, Operand(rbx, rdx, times_2, 10981));
__ vandps(xmm0, xmm9, xmm2);
__ vandps(xmm9, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vxorps(xmm0, xmm1, xmm9);
__ vxorps(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vandpd(xmm0, xmm9, xmm2);
__ vandpd(xmm9, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vxorpd(xmm0, xmm1, xmm9);
__ vxorpd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
} }
} }
......
...@@ -640,6 +640,7 @@ TEST_F(InstructionSelectorTest, Int32MulHigh) { ...@@ -640,6 +640,7 @@ TEST_F(InstructionSelectorTest, Int32MulHigh) {
TEST_F(InstructionSelectorTest, Float32Abs) { TEST_F(InstructionSelectorTest, Float32Abs) {
{
StreamBuilder m(this, kMachFloat32, kMachFloat32); StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0); Node* const n = m.Float32Abs(p0);
...@@ -653,10 +654,26 @@ TEST_F(InstructionSelectorTest, Float32Abs) { ...@@ -653,10 +654,26 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat32Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
} }
TEST_F(InstructionSelectorTest, Float64Abs) { TEST_F(InstructionSelectorTest, Float64Abs) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0); Node* const n = m.Float64Abs(p0);
...@@ -670,6 +687,21 @@ TEST_F(InstructionSelectorTest, Float64Abs) { ...@@ -670,6 +687,21 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat64Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
} }
...@@ -706,6 +738,7 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { ...@@ -706,6 +738,7 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) { TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
{
StreamBuilder m(this, kMachFloat32, kMachFloat32); StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0); Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
...@@ -718,10 +751,26 @@ TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) { ...@@ -718,10 +751,26 @@ TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
} }
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) { TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
...@@ -734,6 +783,21 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) { ...@@ -734,6 +783,21 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
} }
......
...@@ -997,6 +997,7 @@ TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) { ...@@ -997,6 +997,7 @@ TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
TEST_F(InstructionSelectorTest, Float32Abs) { TEST_F(InstructionSelectorTest, Float32Abs) {
{
StreamBuilder m(this, kMachFloat32, kMachFloat32); StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0); Node* const n = m.Float32Abs(p0);
...@@ -1010,10 +1011,26 @@ TEST_F(InstructionSelectorTest, Float32Abs) { ...@@ -1010,10 +1011,26 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat32Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
} }
TEST_F(InstructionSelectorTest, Float64Abs) { TEST_F(InstructionSelectorTest, Float64Abs) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0); Node* const n = m.Float64Abs(p0);
...@@ -1027,6 +1044,21 @@ TEST_F(InstructionSelectorTest, Float64Abs) { ...@@ -1027,6 +1044,21 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat64Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
} }
...@@ -1063,6 +1095,7 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { ...@@ -1063,6 +1095,7 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) { TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
{
StreamBuilder m(this, kMachFloat32, kMachFloat32); StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0); Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
...@@ -1075,10 +1108,26 @@ TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) { ...@@ -1075,10 +1108,26 @@ TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, kMachFloat32, kMachFloat32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
} }
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) { TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0); Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
...@@ -1091,6 +1140,21 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) { ...@@ -1091,6 +1140,21 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
ASSERT_EQ(1U, s[0]->OutputCount()); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode()); EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment