Commit 87ae05c7 authored by danno's avatar danno Committed by Commit bot

[turbofan]: Micro optimizations to lea[l/q] on ia32/x64

Utilize all opportunities to turn leas into adds.

Review-Url: https://codereview.chromium.org/2418803002
Cr-Commit-Position: refs/heads/master@{#40341}
parent 8bb1e6d0
......@@ -1461,7 +1461,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (i.InputRegister(1).is(i.OutputRegister())) {
__ shl(i.OutputRegister(), 1);
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
__ add(i.OutputRegister(), i.InputRegister(1));
}
} else if (mode == kMode_M2) {
__ shl(i.OutputRegister(), 1);
......@@ -1472,6 +1472,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_MR1 &&
i.InputRegister(1).is(i.OutputRegister())) {
__ add(i.OutputRegister(), i.InputRegister(0));
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
......
......@@ -1997,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (i.InputRegister(1).is(i.OutputRegister())) {
__ shll(i.OutputRegister(), Immediate(1));
} else {
__ leal(i.OutputRegister(), i.MemoryOperand());
__ addl(i.OutputRegister(), i.InputRegister(1));
}
} else if (mode == kMode_M2) {
__ shll(i.OutputRegister(), Immediate(1));
......@@ -2008,15 +2008,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ leal(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_MR1 &&
i.InputRegister(1).is(i.OutputRegister())) {
__ addl(i.OutputRegister(), i.InputRegister(0));
} else {
__ leal(i.OutputRegister(), i.MemoryOperand());
}
__ AssertZeroExtended(i.OutputRegister());
break;
}
case kX64Lea:
__ leaq(i.OutputRegister(), i.MemoryOperand());
case kX64Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leaq" to "addq", "subq" or "shlq" if the register allocation
// and addressing mode just happens to work out. The "addq"/"subq" forms
// in these cases are faster based on measurements.
if (i.InputRegister(0).is(i.OutputRegister())) {
if (mode == kMode_MRI) {
int32_t constant_summand = i.InputInt32(1);
if (constant_summand > 0) {
__ addq(i.OutputRegister(), Immediate(constant_summand));
} else if (constant_summand < 0) {
__ subq(i.OutputRegister(), Immediate(-constant_summand));
}
} else if (mode == kMode_MR1) {
if (i.InputRegister(1).is(i.OutputRegister())) {
__ shlq(i.OutputRegister(), Immediate(1));
} else {
__ addq(i.OutputRegister(), i.InputRegister(1));
}
} else if (mode == kMode_M2) {
__ shlq(i.OutputRegister(), Immediate(1));
} else if (mode == kMode_M4) {
__ shlq(i.OutputRegister(), Immediate(2));
} else if (mode == kMode_M8) {
__ shlq(i.OutputRegister(), Immediate(3));
} else {
__ leaq(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_MR1 &&
i.InputRegister(1).is(i.OutputRegister())) {
__ addq(i.OutputRegister(), i.InputRegister(0));
} else {
__ leaq(i.OutputRegister(), i.MemoryOperand());
}
break;
}
case kX64Dec32:
__ decl(i.OutputRegister());
break;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment