Commit 3709b925 authored by bmeurer's avatar bmeurer Committed by Commit bot

Revert of [x86] Use better left operand heuristic for Float64Add and...

Revert of [x86] Use better left operand heuristic for Float64Add and Float64Mul. (patchset #1 id:1 of https://codereview.chromium.org/958583003/)

Reason for revert:
Tanks on Atom and Haswell

Original issue's description:
> [x86] Use better left operand heuristic for Float64Add and Float64Mul.
>
> R=dcarney@chromium.org
>
> Committed: https://crrev.com/9da259fb1f4ecfefeb2cf7efbe449d8aa1904032
> Cr-Commit-Position: refs/heads/master@{#26849}

TBR=dcarney@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review URL: https://codereview.chromium.org/972243002

Cr-Commit-Position: refs/heads/master@{#26973}
parent 1604bd46
...@@ -634,15 +634,12 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { ...@@ -634,15 +634,12 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) { void InstructionSelector::VisitFloat64Add(Node* node) {
IA32OperandGenerator g(this); IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (g.CanBeBetterLeftOperand(right)) std::swap(left, right);
if (IsSupported(AVX)) { if (IsSupported(AVX)) {
Emit(kAVXFloat64Add, g.DefineAsRegister(node), g.UseRegister(left), Emit(kAVXFloat64Add, g.DefineAsRegister(node),
g.Use(right)); g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else { } else {
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node), g.UseRegister(left), Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
g.Use(right)); g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} }
} }
...@@ -661,15 +658,12 @@ void InstructionSelector::VisitFloat64Sub(Node* node) { ...@@ -661,15 +658,12 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
void InstructionSelector::VisitFloat64Mul(Node* node) { void InstructionSelector::VisitFloat64Mul(Node* node) {
IA32OperandGenerator g(this); IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (g.CanBeBetterLeftOperand(right)) std::swap(left, right);
if (IsSupported(AVX)) { if (IsSupported(AVX)) {
Emit(kAVXFloat64Mul, g.DefineAsRegister(node), g.UseRegister(left), Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
g.Use(right)); g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else { } else {
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node), g.UseRegister(left), Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
g.Use(right)); g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} }
} }
......
...@@ -832,15 +832,12 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { ...@@ -832,15 +832,12 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) { void InstructionSelector::VisitFloat64Add(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (g.CanBeBetterLeftOperand(right)) std::swap(left, right);
if (IsSupported(AVX)) { if (IsSupported(AVX)) {
Emit(kAVXFloat64Add, g.DefineAsRegister(node), g.UseRegister(left), Emit(kAVXFloat64Add, g.DefineAsRegister(node),
g.Use(right)); g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else { } else {
Emit(kSSEFloat64Add, g.DefineSameAsFirst(node), g.UseRegister(left), Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
g.Use(right)); g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} }
} }
...@@ -859,15 +856,12 @@ void InstructionSelector::VisitFloat64Sub(Node* node) { ...@@ -859,15 +856,12 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
void InstructionSelector::VisitFloat64Mul(Node* node) { void InstructionSelector::VisitFloat64Mul(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (g.CanBeBetterLeftOperand(right)) std::swap(left, right);
if (IsSupported(AVX)) { if (IsSupported(AVX)) {
Emit(kAVXFloat64Mul, g.DefineAsRegister(node), g.UseRegister(left), Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
g.Use(right)); g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} else { } else {
Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node), g.UseRegister(left), Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
g.Use(right)); g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment