Commit 50c4d882 authored by Weiliang Lin's avatar Weiliang Lin Committed by Benedikt Meurer

[x64] introduce vex prefix version of float64 arithmetic binop

BUG=
R=bmeurer@chromium.org

Review URL: https://codereview.chromium.org/764863002

Patch from Weiliang Lin <weiliang.lin@intel.com>.

Cr-Commit-Position: refs/heads/master@{#25582}
parent 45a36948
......@@ -207,6 +207,19 @@ static bool HasImmediateInput(Instruction* instr, int index) {
} while (0)
#define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr) \
do { \
CpuFeatureScope avx_scope(masm(), AVX); \
if (instr->InputAt(1)->IsDoubleRegister()) { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
} else { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputOperand(1)); \
} \
} while (0)
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X64OperandConverter i(this, instr);
......@@ -482,6 +495,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
break;
case kAVXFloat64Add:
ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
break;
case kAVXFloat64Sub:
ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
break;
case kAVXFloat64Mul:
ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
break;
case kAVXFloat64Div:
ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
break;
case kX64Movsxbl:
if (instr->addressing_mode() != kMode_None) {
__ movsxbl(i.OutputRegister(), i.MemoryOperand());
......
......@@ -62,6 +62,10 @@ namespace compiler {
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
V(X64Movsxbl) \
V(X64Movzxbl) \
V(X64Movb) \
......
......@@ -705,29 +705,49 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
if (IsSupported(AVX)) {
Emit(kAVXFloat64Add, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
if (IsSupported(AVX)) {
Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
if (IsSupported(AVX)) {
Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
}
void InstructionSelector::VisitFloat64Div(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
if (IsSupported(AVX)) {
Emit(kAVXFloat64Div, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
}
......
......@@ -189,6 +189,65 @@ void Assembler::emit_optional_rex_32(const Operand& op) {
}
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
LeadingOpcode m) {
byte rxb = ~((reg.high_bit() << 2) | rm.high_bit()) << 5;
emit(rxb | m);
}
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, const Operand& rm,
LeadingOpcode m) {
byte rxb = ~((reg.high_bit() << 2) | rm.rex_) << 5;
emit(rxb | m);
}
// byte 1 of 2-byte VEX
void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
SIMDPrefix pp) {
byte rv = ~((reg.high_bit() << 4) | v.code()) << 3;
emit(rv | l | pp);
}
// byte 2 of 3-byte VEX
void Assembler::emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
SIMDPrefix pp) {
emit(w | ((~v.code() & 0xf) << 3) | l | pp);
}
void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
XMMRegister rm, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
if (rm.high_bit() || mm != k0F || w != kW0) {
emit_vex3_byte0();
emit_vex3_byte1(reg, rm, mm);
emit_vex3_byte2(w, vreg, l, pp);
} else {
emit_vex2_byte0();
emit_vex2_byte1(reg, vreg, l, pp);
}
}
void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
const Operand& rm, VectorLength l,
SIMDPrefix pp, LeadingOpcode mm, VexW w) {
if (rm.rex_ || mm != k0F || w != kW0) {
emit_vex3_byte0();
emit_vex3_byte1(reg, rm, mm);
emit_vex3_byte2(w, vreg, l, pp);
} else {
emit_vex2_byte0();
emit_vex2_byte1(reg, vreg, l, pp);
}
}
Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) {
return Memory::int32_at(pc) + pc + 4;
......
......@@ -3182,45 +3182,12 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
}
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm, byte m) {
DCHECK(1 <= m && m <= 3);
byte rxb = ~((reg.high_bit() << 2) | rm.high_bit()) << 5;
emit(rxb | m);
}
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, const Operand& rm, byte m) {
DCHECK(1 <= m && m <= 3);
byte rxb = ~((reg.high_bit() << 2) | rm.rex_) << 5;
emit(rxb | m);
}
// byte 1 of 2-byte VEX
void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, byte lpp) {
DCHECK(lpp <= 3);
byte rv = ~((reg.high_bit() << 4) | v.code()) << 3;
emit(rv | lpp);
}
// byte 2 of 3-byte VEX
void Assembler::emit_vex3_byte2(byte w, XMMRegister v, byte lpp) {
DCHECK(w <= 1);
DCHECK(lpp <= 3);
emit((w << 7) | ((~v.code() & 0xf) << 3) | lpp);
}
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex3_byte0();
emit_vex3_byte1(dst, src2, 0x02);
emit_vex3_byte2(0x1, src1, 0x01);
emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW1);
emit(op);
emit_sse_operand(dst, src2);
}
......@@ -3230,9 +3197,7 @@ void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex3_byte0();
emit_vex3_byte1(dst, src2, 0x02);
emit_vex3_byte2(0x1, src1, 0x01);
emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW1);
emit(op);
emit_sse_operand(dst, src2);
}
......@@ -3242,9 +3207,7 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex3_byte0();
emit_vex3_byte1(dst, src2, 0x02);
emit_vex3_byte2(0x0, src1, 0x01);
emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW0);
emit(op);
emit_sse_operand(dst, src2);
}
......@@ -3254,9 +3217,27 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex3_byte0();
emit_vex3_byte1(dst, src2, 0x02);
emit_vex3_byte2(0x0, src1, 0x01);
emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW0);
emit(op);
emit_sse_operand(dst, src2);
}
void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
......
......@@ -1285,6 +1285,33 @@ class Assembler : public AssemblerBase {
void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x58, dst, src1, src2);
}
void vaddsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x58, dst, src1, src2);
}
void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x5c, dst, src1, src2);
}
void vsubsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5c, dst, src1, src2);
}
void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x59, dst, src1, src2);
}
void vmulsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x59, dst, src1, src2);
}
void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x5e, dst, src1, src2);
}
void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5e, dst, src1, src2);
}
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
// Debugging
void Print();
......@@ -1479,12 +1506,26 @@ class Assembler : public AssemblerBase {
}
// Emit vex prefix
enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128 };
enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x2 };
void emit_vex2_byte0() { emit(0xc5); }
void emit_vex2_byte1(XMMRegister reg, XMMRegister v, byte lpp);
inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
SIMDPrefix pp);
void emit_vex3_byte0() { emit(0xc4); }
void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, byte m);
void emit_vex3_byte1(XMMRegister reg, const Operand& rm, byte m);
void emit_vex3_byte2(byte w, XMMRegister v, byte lpp);
inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m);
inline void emit_vex3_byte1(XMMRegister reg, const Operand& rm,
LeadingOpcode m);
inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
SIMDPrefix pp);
inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, const Operand& rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
// Emit the ModR/M byte, and optionally the SIB byte and
// 1- or 4-byte offset for a memory operand. Also encodes
......
......@@ -383,12 +383,12 @@ class DisassemblerX64 {
}
bool vex_0f38() {
DCHECK(vex_byte0_ == VEX3_PREFIX);
if (vex_byte0_ == VEX2_PREFIX) return false;
return (vex_byte1_ & 3) == 2;
}
bool vex_0f3a() {
DCHECK(vex_byte0_ == VEX3_PREFIX);
if (vex_byte0_ == VEX2_PREFIX) return false;
return (vex_byte1_ & 3) == 3;
}
......@@ -870,83 +870,102 @@ int DisassemblerX64::SetCC(byte* data) {
int DisassemblerX64::AVXInstruction(byte* data) {
byte opcode = *data;
byte* current = data + 1;
if (vex_byte0_ == VEX3_PREFIX) {
if (vex_128()) {
if (vex_66() && vex_0f38()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x99:
AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xa9:
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xb9:
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x9b:
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xab:
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xbb:
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x9d:
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xad:
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xbd:
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x9f:
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xaf:
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xbf:
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
}
}
} else {
UnimplementedInstruction();
if (vex_66() && vex_0f38()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x99:
AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xa9:
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xb9:
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x9b:
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xab:
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xbb:
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x9d:
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xad:
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xbd:
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x9f:
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xaf:
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xbf:
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
}
} else if (vex_f2() && vex_0f()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x58:
AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x59:
AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x5c:
AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x5e:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
}
} else if (vex_byte0_ == VEX2_PREFIX) {
UnimplementedInstruction();
} else {
UNREACHABLE();
UnimplementedInstruction();
}
return static_cast<int>(current - data);
......
......@@ -475,6 +475,21 @@ TEST(DisasmX64) {
}
}
// AVX instruction
{
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(&assm, AVX);
__ vaddsd(xmm0, xmm1, xmm2);
__ vaddsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vmulsd(xmm0, xmm1, xmm2);
__ vmulsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vsubsd(xmm0, xmm1, xmm2);
__ vsubsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vdivsd(xmm0, xmm1, xmm2);
__ vdivsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
}
}
// FMA3 instruction
{
if (CpuFeatures::IsSupported(FMA3)) {
......
......@@ -887,6 +887,38 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
}
}
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
Node* ret = m.Float64Div(mul, sub);
m.Return(ret);
Stream s = m.Build(AVX);
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
}
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
Node* ret = m.Float64Div(mul, sub);
m.Return(ret);
Stream s = m.Build();
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment