Commit 50c4d882 authored by Weiliang Lin's avatar Weiliang Lin Committed by Benedikt Meurer

[x64] introduce vex prefix version of float64 arithmetic binop

BUG=
R=bmeurer@chromium.org

Review URL: https://codereview.chromium.org/764863002

Patch from Weiliang Lin <weiliang.lin@intel.com>.

Cr-Commit-Position: refs/heads/master@{#25582}
parent 45a36948
...@@ -207,6 +207,19 @@ static bool HasImmediateInput(Instruction* instr, int index) { ...@@ -207,6 +207,19 @@ static bool HasImmediateInput(Instruction* instr, int index) {
} while (0) } while (0)
#define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr) \
do { \
CpuFeatureScope avx_scope(masm(), AVX); \
if (instr->InputAt(1)->IsDoubleRegister()) { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
} else { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputOperand(1)); \
} \
} while (0)
// Assembles an instruction after register allocation, producing machine code. // Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) { void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X64OperandConverter i(this, instr); X64OperandConverter i(this, instr);
...@@ -482,6 +495,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -482,6 +495,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
__ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister); __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
break; break;
case kAVXFloat64Add:
ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
break;
case kAVXFloat64Sub:
ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
break;
case kAVXFloat64Mul:
ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
break;
case kAVXFloat64Div:
ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
break;
case kX64Movsxbl: case kX64Movsxbl:
if (instr->addressing_mode() != kMode_None) { if (instr->addressing_mode() != kMode_None) {
__ movsxbl(i.OutputRegister(), i.MemoryOperand()); __ movsxbl(i.OutputRegister(), i.MemoryOperand());
......
...@@ -62,6 +62,10 @@ namespace compiler { ...@@ -62,6 +62,10 @@ namespace compiler {
V(SSEFloat64ToUint32) \ V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \ V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \ V(SSEUint32ToFloat64) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
V(X64Movsxbl) \ V(X64Movsxbl) \
V(X64Movzxbl) \ V(X64Movzxbl) \
V(X64Movb) \ V(X64Movb) \
......
...@@ -705,29 +705,49 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { ...@@ -705,29 +705,49 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) { void InstructionSelector::VisitFloat64Add(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node), if (IsSupported(AVX)) {
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); Emit(kAVXFloat64Add, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
} }
void InstructionSelector::VisitFloat64Sub(Node* node) { void InstructionSelector::VisitFloat64Sub(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node), if (IsSupported(AVX)) {
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
} }
void InstructionSelector::VisitFloat64Mul(Node* node) { void InstructionSelector::VisitFloat64Mul(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node), if (IsSupported(AVX)) {
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
} }
void InstructionSelector::VisitFloat64Div(Node* node) { void InstructionSelector::VisitFloat64Div(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Emit(kSSEFloat64Div, g.DefineSameAsFirst(node), if (IsSupported(AVX)) {
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); Emit(kAVXFloat64Div, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
} }
......
...@@ -189,6 +189,65 @@ void Assembler::emit_optional_rex_32(const Operand& op) { ...@@ -189,6 +189,65 @@ void Assembler::emit_optional_rex_32(const Operand& op) {
} }
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
LeadingOpcode m) {
byte rxb = ~((reg.high_bit() << 2) | rm.high_bit()) << 5;
emit(rxb | m);
}
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, const Operand& rm,
LeadingOpcode m) {
byte rxb = ~((reg.high_bit() << 2) | rm.rex_) << 5;
emit(rxb | m);
}
// byte 1 of 2-byte VEX
void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
SIMDPrefix pp) {
byte rv = ~((reg.high_bit() << 4) | v.code()) << 3;
emit(rv | l | pp);
}
// byte 2 of 3-byte VEX
void Assembler::emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
SIMDPrefix pp) {
emit(w | ((~v.code() & 0xf) << 3) | l | pp);
}
void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
XMMRegister rm, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
if (rm.high_bit() || mm != k0F || w != kW0) {
emit_vex3_byte0();
emit_vex3_byte1(reg, rm, mm);
emit_vex3_byte2(w, vreg, l, pp);
} else {
emit_vex2_byte0();
emit_vex2_byte1(reg, vreg, l, pp);
}
}
void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
const Operand& rm, VectorLength l,
SIMDPrefix pp, LeadingOpcode mm, VexW w) {
if (rm.rex_ || mm != k0F || w != kW0) {
emit_vex3_byte0();
emit_vex3_byte1(reg, rm, mm);
emit_vex3_byte2(w, vreg, l, pp);
} else {
emit_vex2_byte0();
emit_vex2_byte1(reg, vreg, l, pp);
}
}
Address Assembler::target_address_at(Address pc, Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) { ConstantPoolArray* constant_pool) {
return Memory::int32_at(pc) + pc + 4; return Memory::int32_at(pc) + pc + 4;
......
...@@ -3182,45 +3182,12 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { ...@@ -3182,45 +3182,12 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
} }
// byte 1 of 3-byte VEX // AVX instructions
void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm, byte m) {
DCHECK(1 <= m && m <= 3);
byte rxb = ~((reg.high_bit() << 2) | rm.high_bit()) << 5;
emit(rxb | m);
}
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, const Operand& rm, byte m) {
DCHECK(1 <= m && m <= 3);
byte rxb = ~((reg.high_bit() << 2) | rm.rex_) << 5;
emit(rxb | m);
}
// byte 1 of 2-byte VEX
void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, byte lpp) {
DCHECK(lpp <= 3);
byte rv = ~((reg.high_bit() << 4) | v.code()) << 3;
emit(rv | lpp);
}
// byte 2 of 3-byte VEX
void Assembler::emit_vex3_byte2(byte w, XMMRegister v, byte lpp) {
DCHECK(w <= 1);
DCHECK(lpp <= 3);
emit((w << 7) | ((~v.code() & 0xf) << 3) | lpp);
}
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1, void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) { XMMRegister src2) {
DCHECK(IsEnabled(FMA3)); DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_vex3_byte0(); emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW1);
emit_vex3_byte1(dst, src2, 0x02);
emit_vex3_byte2(0x1, src1, 0x01);
emit(op); emit(op);
emit_sse_operand(dst, src2); emit_sse_operand(dst, src2);
} }
...@@ -3230,9 +3197,7 @@ void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1, ...@@ -3230,9 +3197,7 @@ void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) { const Operand& src2) {
DCHECK(IsEnabled(FMA3)); DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_vex3_byte0(); emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW1);
emit_vex3_byte1(dst, src2, 0x02);
emit_vex3_byte2(0x1, src1, 0x01);
emit(op); emit(op);
emit_sse_operand(dst, src2); emit_sse_operand(dst, src2);
} }
...@@ -3242,9 +3207,7 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1, ...@@ -3242,9 +3207,7 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) { XMMRegister src2) {
DCHECK(IsEnabled(FMA3)); DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_vex3_byte0(); emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW0);
emit_vex3_byte1(dst, src2, 0x02);
emit_vex3_byte2(0x0, src1, 0x01);
emit(op); emit(op);
emit_sse_operand(dst, src2); emit_sse_operand(dst, src2);
} }
...@@ -3254,9 +3217,27 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1, ...@@ -3254,9 +3217,27 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) { const Operand& src2) {
DCHECK(IsEnabled(FMA3)); DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_vex3_byte0(); emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW0);
emit_vex3_byte1(dst, src2, 0x02); emit(op);
emit_vex3_byte2(0x0, src1, 0x01); emit_sse_operand(dst, src2);
}
void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
emit(op); emit(op);
emit_sse_operand(dst, src2); emit_sse_operand(dst, src2);
} }
......
...@@ -1285,6 +1285,33 @@ class Assembler : public AssemblerBase { ...@@ -1285,6 +1285,33 @@ class Assembler : public AssemblerBase {
void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2); void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x58, dst, src1, src2);
}
void vaddsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x58, dst, src1, src2);
}
void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x5c, dst, src1, src2);
}
void vsubsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5c, dst, src1, src2);
}
void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x59, dst, src1, src2);
}
void vmulsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x59, dst, src1, src2);
}
void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x5e, dst, src1, src2);
}
void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5e, dst, src1, src2);
}
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
// Debugging // Debugging
void Print(); void Print();
...@@ -1479,12 +1506,26 @@ class Assembler : public AssemblerBase { ...@@ -1479,12 +1506,26 @@ class Assembler : public AssemblerBase {
} }
// Emit vex prefix // Emit vex prefix
enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128 };
enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x2 };
void emit_vex2_byte0() { emit(0xc5); } void emit_vex2_byte0() { emit(0xc5); }
void emit_vex2_byte1(XMMRegister reg, XMMRegister v, byte lpp); inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
SIMDPrefix pp);
void emit_vex3_byte0() { emit(0xc4); } void emit_vex3_byte0() { emit(0xc4); }
void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, byte m); inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m);
void emit_vex3_byte1(XMMRegister reg, const Operand& rm, byte m); inline void emit_vex3_byte1(XMMRegister reg, const Operand& rm,
void emit_vex3_byte2(byte w, XMMRegister v, byte lpp); LeadingOpcode m);
inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
SIMDPrefix pp);
inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, const Operand& rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
// Emit the ModR/M byte, and optionally the SIB byte and // Emit the ModR/M byte, and optionally the SIB byte and
// 1- or 4-byte offset for a memory operand. Also encodes // 1- or 4-byte offset for a memory operand. Also encodes
......
...@@ -383,12 +383,12 @@ class DisassemblerX64 { ...@@ -383,12 +383,12 @@ class DisassemblerX64 {
} }
bool vex_0f38() { bool vex_0f38() {
DCHECK(vex_byte0_ == VEX3_PREFIX); if (vex_byte0_ == VEX2_PREFIX) return false;
return (vex_byte1_ & 3) == 2; return (vex_byte1_ & 3) == 2;
} }
bool vex_0f3a() { bool vex_0f3a() {
DCHECK(vex_byte0_ == VEX3_PREFIX); if (vex_byte0_ == VEX2_PREFIX) return false;
return (vex_byte1_ & 3) == 3; return (vex_byte1_ & 3) == 3;
} }
...@@ -870,83 +870,102 @@ int DisassemblerX64::SetCC(byte* data) { ...@@ -870,83 +870,102 @@ int DisassemblerX64::SetCC(byte* data) {
int DisassemblerX64::AVXInstruction(byte* data) { int DisassemblerX64::AVXInstruction(byte* data) {
byte opcode = *data; byte opcode = *data;
byte* current = data + 1; byte* current = data + 1;
if (vex_byte0_ == VEX3_PREFIX) { if (vex_66() && vex_0f38()) {
if (vex_128()) { int mod, regop, rm, vvvv = vex_vreg();
if (vex_66() && vex_0f38()) { get_modrm(*current, &mod, &regop, &rm);
int mod, regop, rm, vvvv = vex_vreg(); switch (opcode) {
get_modrm(*current, &mod, &regop, &rm); case 0x99:
switch (opcode) { AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
case 0x99: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0xa9:
break; AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
case 0xa9: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0xb9:
break; AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
case 0xb9: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0x9b:
break; AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
case 0x9b: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0xab:
break; AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
case 0xab: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0xbb:
break; AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
case 0xbb: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0x9d:
break; AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
case 0x9d: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0xad:
break; AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
case 0xad: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0xbd:
break; AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
case 0xbd: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0x9f:
break; AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
case 0x9f: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0xaf:
break; AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
case 0xaf: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); case 0xbf:
break; AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
case 0xbf: NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(), current += PrintRightXMMOperand(current);
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv)); break;
current += PrintRightXMMOperand(current); default:
break; UnimplementedInstruction();
default: }
UnimplementedInstruction(); } else if (vex_f2() && vex_0f()) {
} int mod, regop, rm, vvvv = vex_vreg();
} get_modrm(*current, &mod, &regop, &rm);
} else { switch (opcode) {
UnimplementedInstruction(); case 0x58:
AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x59:
AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x5c:
AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x5e:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
} }
} else if (vex_byte0_ == VEX2_PREFIX) {
UnimplementedInstruction();
} else { } else {
UNREACHABLE(); UnimplementedInstruction();
} }
return static_cast<int>(current - data); return static_cast<int>(current - data);
......
...@@ -475,6 +475,21 @@ TEST(DisasmX64) { ...@@ -475,6 +475,21 @@ TEST(DisasmX64) {
} }
} }
// AVX instruction
{
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(&assm, AVX);
__ vaddsd(xmm0, xmm1, xmm2);
__ vaddsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vmulsd(xmm0, xmm1, xmm2);
__ vmulsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vsubsd(xmm0, xmm1, xmm2);
__ vsubsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vdivsd(xmm0, xmm1, xmm2);
__ vdivsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
}
}
// FMA3 instruction // FMA3 instruction
{ {
if (CpuFeatures::IsSupported(FMA3)) { if (CpuFeatures::IsSupported(FMA3)) {
......
...@@ -887,6 +887,38 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) { ...@@ -887,6 +887,38 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
} }
} }
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
Node* ret = m.Float64Div(mul, sub);
m.Return(ret);
Stream s = m.Build(AVX);
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
}
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
Node* ret = m.Float64Div(mul, sub);
m.Return(ret);
Stream s = m.Build();
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
}
}
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment