Commit 2ad1c224 authored by weiliang.lin's avatar weiliang.lin Committed by Commit bot

[ia32] Introduce vex prefix version of float64 arithmetic binop

port 50c4d882

BUG=

Review URL: https://codereview.chromium.org/770183002

Cr-Commit-Position: refs/heads/master@{#25595}
parent 34874b98
......@@ -359,7 +359,7 @@ CPU::CPU()
has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
has_avx_ = (cpu_info[2] & 0x18000000) != 0;
has_avx_ = (cpu_info[2] & 0x10000000) != 0;
if (has_avx_) has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
}
......
......@@ -492,6 +492,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEUint32ToFloat64:
__ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kAVXFloat64Add: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Sub: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Mul: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Div: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
......
......@@ -46,6 +46,10 @@ namespace compiler {
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
......
......@@ -873,29 +873,49 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
if (IsSupported(AVX)) {
Emit(kAVXFloat64Add, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
if (IsSupported(AVX)) {
Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
if (IsSupported(AVX)) {
Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
}
void InstructionSelector::VisitFloat64Div(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
if (IsSupported(AVX)) {
Emit(kAVXFloat64Div, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else {
Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
}
}
......
......@@ -60,11 +60,17 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
if (cpu.has_avx() && FLAG_enable_avx) supported_ |= 1u << AVX;
if (cpu.has_fma3() && FLAG_enable_fma3) supported_ |= 1u << FMA3;
}
void CpuFeatures::PrintTarget() { }
void CpuFeatures::PrintFeatures() { }
void CpuFeatures::PrintFeatures() {
printf("SSE3=%d SSE4_1=%d AVX=%d FMA3=%d\n", CpuFeatures::IsSupported(SSE3),
CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(AVX),
CpuFeatures::IsSupported(FMA3));
}
// -----------------------------------------------------------------------------
......@@ -2437,6 +2443,16 @@ void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
}
void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
const Operand& src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kLIG, kF2, k0F, kWIG);
EMIT(op);
emit_sse_operand(dst, src2);
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
......@@ -2458,6 +2474,19 @@ void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
}
void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
if (mm != k0F || w != kW0) {
EMIT(0xc4);
EMIT(0xc0 | mm);
EMIT(w | ((~vreg.code() & 0xf) << 3) | l | pp);
} else {
EMIT(0xc5);
EMIT(((~vreg.code()) << 3) | l | pp);
}
}
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
......
......@@ -1051,6 +1051,34 @@ class Assembler : public AssemblerBase {
// Parallel XMM operations.
void movntdqa(XMMRegister dst, const Operand& src);
void movntdq(const Operand& dst, XMMRegister src);
// AVX instructions
void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vaddsd(dst, src1, Operand(src2));
}
void vaddsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x58, dst, src1, src2);
}
void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsubsd(dst, src1, Operand(src2));
}
void vsubsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5c, dst, src1, src2);
}
void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vmulsd(dst, src1, Operand(src2));
}
void vmulsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x59, dst, src1, src2);
}
void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vdivsd(dst, src1, Operand(src2));
}
void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5e, dst, src1, src2);
}
void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
......@@ -1154,6 +1182,14 @@ class Assembler : public AssemblerBase {
void emit_farith(int b1, int b2, int i);
// Emit vex prefix
enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128 };
enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x2 };
inline void emit_vex_prefix(XMMRegister v, VectorLength l, SIMDPrefix pp,
LeadingOpcode m, VexW w);
// labels
void print(Label* L);
void bind_to(Label* L, int pos);
......
......@@ -246,6 +246,9 @@ class DisassemblerIA32 {
DisassemblerIA32(const NameConverter& converter,
bool abort_on_unimplemented = true)
: converter_(converter),
vex_byte0_(0),
vex_byte1_(0),
vex_byte2_(0),
instruction_table_(InstructionTable::get_instance()),
tmp_buffer_pos_(0),
abort_on_unimplemented_(abort_on_unimplemented) {
......@@ -260,6 +263,9 @@ class DisassemblerIA32 {
private:
const NameConverter& converter_;
byte vex_byte0_; // 0xc4 or 0xc5
byte vex_byte1_;
byte vex_byte2_; // only for 3 bytes vex prefix
InstructionTable* instruction_table_;
v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
unsigned int tmp_buffer_pos_;
......@@ -287,6 +293,57 @@ class DisassemblerIA32 {
kSAR = 7
};
bool vex_128() {
DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
return (checked & 4) != 1;
}
bool vex_66() {
DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 1;
}
bool vex_f3() {
DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 2;
}
bool vex_f2() {
DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
return (checked & 3) == 3;
}
bool vex_w() {
if (vex_byte0_ == 0xc5) return false;
return (vex_byte2_ & 0x80) == 1;
}
bool vex_0f() {
if (vex_byte0_ == 0xc5) return true;
return (vex_byte1_ & 3) == 1;
}
bool vex_0f38() {
if (vex_byte0_ == 0xc5) return false;
return (vex_byte1_ & 3) == 2;
}
bool vex_0f3a() {
if (vex_byte0_ == 0xc5) return false;
return (vex_byte1_ & 3) == 3;
}
int vex_vreg() {
DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
return ~(checked >> 3) & 0xf;
}
char float_size_code() { return "sd"[vex_w()]; }
const char* NameOfCPURegister(int reg) const {
return converter_.NameOfCPURegister(reg);
......@@ -340,6 +397,7 @@ class DisassemblerIA32 {
int FPUInstruction(byte* data);
int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
int AVXInstruction(byte* data);
void AppendToBuffer(const char* format, ...);
......@@ -679,6 +737,44 @@ int DisassemblerIA32::CMov(byte* data) {
}
int DisassemblerIA32::AVXInstruction(byte* data) {
byte opcode = *data;
byte* current = data + 1;
if (vex_f2() && vex_0f()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x58:
AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x59:
AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x5c:
AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x5e:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
}
} else {
UnimplementedInstruction();
}
return static_cast<int>(current - data);
}
// Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) {
byte escape_opcode = *data;
......@@ -903,65 +999,81 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*data == 0x2E /*cs*/) {
branch_hint = "predicted not taken";
data++;
} else if (*data == 0xC4 && *(data + 1) >= 0xc0) {
vex_byte0_ = *data;
vex_byte1_ = *(data + 1);
vex_byte2_ = *(data + 2);
data += 3;
} else if (*data == 0xC5 && *(data + 1) >= 0xc0) {
vex_byte0_ = *data;
vex_byte1_ = *(data + 1);
data += 2;
}
bool processed = true; // Will be set to false if the current instruction
// is not in 'instructions' table.
const InstructionDesc& idesc = instruction_table_->Get(*data);
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
AppendToBuffer(idesc.mnem);
data++;
break;
// Decode AVX instructions.
if (vex_byte0_ != 0) {
data += AVXInstruction(data);
} else {
const InstructionDesc& idesc = instruction_table_->Get(*data);
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
AppendToBuffer(idesc.mnem);
data++;
break;
case TWO_OPERANDS_INSTR:
data++;
data += PrintOperands(idesc.mnem, idesc.op_order_, data);
break;
case TWO_OPERANDS_INSTR:
data++;
data += PrintOperands(idesc.mnem, idesc.op_order_, data);
break;
case JUMP_CONDITIONAL_SHORT_INSTR:
data += JumpConditionalShort(data, branch_hint);
break;
case JUMP_CONDITIONAL_SHORT_INSTR:
data += JumpConditionalShort(data, branch_hint);
break;
case REGISTER_INSTR:
AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
data++;
break;
case REGISTER_INSTR:
AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
data++;
break;
case MOVE_REG_INSTR: {
byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
AppendToBuffer("mov %s,%s",
NameOfCPURegister(*data & 0x07),
NameOfAddress(addr));
data += 5;
break;
}
case MOVE_REG_INSTR: {
byte* addr =
reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
AppendToBuffer("mov %s,%s", NameOfCPURegister(*data & 0x07),
NameOfAddress(addr));
data += 5;
break;
}
case CALL_JUMP_INSTR: {
byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case CALL_JUMP_INSTR: {
byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case SHORT_IMMEDIATE_INSTR: {
byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case SHORT_IMMEDIATE_INSTR: {
byte* addr =
reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case BYTE_IMMEDIATE_INSTR: {
AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
case BYTE_IMMEDIATE_INSTR: {
AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
case NO_INSTR:
processed = false;
break;
case NO_INSTR:
processed = false;
break;
default:
UNIMPLEMENTED(); // This type is not implemented.
default:
UNIMPLEMENTED(); // This type is not implemented.
}
}
//----------------------------
if (!processed) {
......
......@@ -471,6 +471,21 @@ TEST(DisasmIa320) {
}
}
// AVX instruction
{
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(&assm, AVX);
__ vaddsd(xmm0, xmm1, xmm2);
__ vaddsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vmulsd(xmm0, xmm1, xmm2);
__ vmulsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vsubsd(xmm0, xmm1, xmm2);
__ vsubsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vdivsd(xmm0, xmm1, xmm2);
__ vdivsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
}
}
// xchg.
{
__ xchg(eax, eax);
......
......@@ -601,6 +601,38 @@ TEST_F(InstructionSelectorTest, Int32MulHigh) {
EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), edx));
}
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
Node* ret = m.Float64Div(mul, sub);
m.Return(ret);
Stream s = m.Build(AVX);
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
}
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
Node* ret = m.Float64Div(mul, sub);
m.Return(ret);
Stream s = m.Build();
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment