Commit 39515a68 authored by bmeurer's avatar bmeurer Committed by Commit bot

[x64] Recognize zero extension of 8-bit and 16-bit values.

R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/938513003

Cr-Commit-Position: refs/heads/master@{#26712}
parent 52a23441
...@@ -248,6 +248,18 @@ class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode { ...@@ -248,6 +248,18 @@ class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
} while (0) } while (0)
#define ASSEMBLE_MOVX(asm_instr) \
do { \
if (instr->addressing_mode() != kMode_None) { \
__ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
} else if (instr->InputAt(0)->IsRegister()) { \
__ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
} else { \
__ asm_instr(i.OutputRegister(), i.InputOperand(0)); \
} \
} while (0)
#define ASSEMBLE_DOUBLE_BINOP(asm_instr) \ #define ASSEMBLE_DOUBLE_BINOP(asm_instr) \
do { \ do { \
if (instr->InputAt(1)->IsDoubleRegister()) { \ if (instr->InputAt(1)->IsDoubleRegister()) { \
...@@ -801,17 +813,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -801,17 +813,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd); ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
break; break;
case kX64Movsxbl: case kX64Movsxbl:
if (instr->addressing_mode() != kMode_None) { ASSEMBLE_MOVX(movsxbl);
__ movsxbl(i.OutputRegister(), i.MemoryOperand());
} else if (instr->InputAt(0)->IsRegister()) {
__ movsxbl(i.OutputRegister(), i.InputRegister(0));
} else {
__ movsxbl(i.OutputRegister(), i.InputOperand(0));
}
__ AssertZeroExtended(i.OutputRegister()); __ AssertZeroExtended(i.OutputRegister());
break; break;
case kX64Movzxbl: case kX64Movzxbl:
__ movzxbl(i.OutputRegister(), i.MemoryOperand()); ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
break; break;
case kX64Movb: { case kX64Movb: {
int index = 0; int index = 0;
...@@ -824,17 +831,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -824,17 +831,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break; break;
} }
case kX64Movsxwl: case kX64Movsxwl:
if (instr->addressing_mode() != kMode_None) { ASSEMBLE_MOVX(movsxwl);
__ movsxwl(i.OutputRegister(), i.MemoryOperand());
} else if (instr->InputAt(0)->IsRegister()) {
__ movsxwl(i.OutputRegister(), i.InputRegister(0));
} else {
__ movsxwl(i.OutputRegister(), i.InputOperand(0));
}
__ AssertZeroExtended(i.OutputRegister()); __ AssertZeroExtended(i.OutputRegister());
break; break;
case kX64Movzxwl: case kX64Movzxwl:
__ movzxwl(i.OutputRegister(), i.MemoryOperand()); ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister()); __ AssertZeroExtended(i.OutputRegister());
break; break;
case kX64Movw: { case kX64Movw: {
...@@ -869,14 +870,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -869,14 +870,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
} }
break; break;
case kX64Movsxlq: { case kX64Movsxlq:
if (instr->InputAt(0)->IsRegister()) { ASSEMBLE_MOVX(movsxlq);
__ movsxlq(i.OutputRegister(), i.InputRegister(0));
} else {
__ movsxlq(i.OutputRegister(), i.InputOperand(0));
}
break; break;
}
case kX64Movq: case kX64Movq:
if (instr->HasOutput()) { if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand()); __ movq(i.OutputRegister(), i.MemoryOperand());
......
...@@ -366,7 +366,15 @@ static void VisitBinop(InstructionSelector* selector, Node* node, ...@@ -366,7 +366,15 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32And(Node* node) { void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kX64And32); X64OperandGenerator g(this);
Uint32BinopMatcher m(node);
if (m.right().Is(0xff)) {
Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
} else if (m.right().Is(0xffff)) {
Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
} else {
VisitBinop(this, node, kX64And32);
}
} }
......
...@@ -991,7 +991,43 @@ TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) { ...@@ -991,7 +991,43 @@ TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Word64Shl. // Floating point operations.
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
Node* ret = m.Float64Div(mul, sub);
m.Return(ret);
Stream s = m.Build(AVX);
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
}
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
Node* ret = m.Float64Div(mul, sub);
m.Return(ret);
Stream s = m.Build();
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
}
}
// -----------------------------------------------------------------------------
// Miscellaneous.
TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) { TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
...@@ -1032,34 +1068,62 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) { ...@@ -1032,34 +1068,62 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
} }
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
{ {
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachInt32, kMachInt32);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1)); Node* const p0 = m.Parameter(0);
Node* mul = m.Float64Mul(add, m.Parameter(1)); Node* const n = m.Word32And(p0, m.Int32Constant(0xff));
Node* sub = m.Float64Sub(mul, add); m.Return(n);
Node* ret = m.Float64Div(mul, sub); Stream s = m.Build();
m.Return(ret); ASSERT_EQ(1U, s.size());
Stream s = m.Build(AVX); EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode());
ASSERT_EQ(4U, s.size()); ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode()); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode()); EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
} }
{ {
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64); StreamBuilder m(this, kMachInt32, kMachInt32);
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1)); Node* const p0 = m.Parameter(0);
Node* mul = m.Float64Mul(add, m.Parameter(1)); Node* const n = m.Word32And(m.Int32Constant(0xff), p0);
Node* sub = m.Float64Sub(mul, add); m.Return(n);
Node* ret = m.Float64Div(mul, sub);
m.Return(ret);
Stream s = m.Build(); Stream s = m.Build();
ASSERT_EQ(4U, s.size()); ASSERT_EQ(1U, s.size());
EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode()); EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode());
EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode()); ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode()); EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode()); ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
}
TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
{
StreamBuilder m(this, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32And(p0, m.Int32Constant(0xffff));
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
{
StreamBuilder m(this, kMachInt32, kMachInt32);
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32And(m.Int32Constant(0xffff), p0);
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment