Commit 136db7b8 authored by whesse@chromium.org's avatar whesse@chromium.org

Port new version of ParallelMove's LGapResolver to X64.

Review URL: http://codereview.chromium.org/6366003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6452 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent cadb9cb6
...@@ -216,8 +216,9 @@ SOURCES = { ...@@ -216,8 +216,9 @@ SOURCES = {
x64/full-codegen-x64.cc x64/full-codegen-x64.cc
x64/ic-x64.cc x64/ic-x64.cc
x64/jump-target-x64.cc x64/jump-target-x64.cc
x64/lithium-x64.cc
x64/lithium-codegen-x64.cc x64/lithium-codegen-x64.cc
x64/lithium-gap-resolver-x64.cc
x64/lithium-x64.cc
x64/macro-assembler-x64.cc x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc x64/register-allocator-x64.cc
......
...@@ -32,12 +32,11 @@ namespace v8 { ...@@ -32,12 +32,11 @@ namespace v8 {
namespace internal { namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner) LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32), spilled_register_(-1) { : cgen_(owner),
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { moves_(32),
source_uses_[i] = 0; source_uses_(),
destination_uses_[i] = 0; destination_uses_(),
} spilled_register_(-1) {}
}
void LGapResolver::Resolve(LParallelMove* parallel_move) { void LGapResolver::Resolve(LParallelMove* parallel_move) {
......
...@@ -37,157 +37,6 @@ namespace v8 { ...@@ -37,157 +37,6 @@ namespace v8 {
namespace internal { namespace internal {
class LGapNode: public ZoneObject {
public:
explicit LGapNode(LOperand* operand)
: operand_(operand), resolved_(false), visited_id_(-1) { }
LOperand* operand() const { return operand_; }
bool IsResolved() const { return !IsAssigned() || resolved_; }
void MarkResolved() {
ASSERT(!IsResolved());
resolved_ = true;
}
int visited_id() const { return visited_id_; }
void set_visited_id(int id) {
ASSERT(id > visited_id_);
visited_id_ = id;
}
bool IsAssigned() const { return assigned_from_.is_set(); }
LGapNode* assigned_from() const { return assigned_from_.get(); }
void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
private:
LOperand* operand_;
SetOncePointer<LGapNode> assigned_from_;
bool resolved_;
int visited_id_;
};
LGapResolver::LGapResolver()
: nodes_(32),
identified_cycles_(4),
result_(16),
next_visited_id_(0) {
}
const ZoneList<LMoveOperands>* LGapResolver::Resolve(
const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand) {
nodes_.Rewind(0);
identified_cycles_.Rewind(0);
result_.Rewind(0);
next_visited_id_ = 0;
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) RegisterMove(move);
}
for (int i = 0; i < identified_cycles_.length(); ++i) {
ResolveCycle(identified_cycles_[i], marker_operand);
}
int unresolved_nodes;
do {
unresolved_nodes = 0;
for (int j = 0; j < nodes_.length(); j++) {
LGapNode* node = nodes_[j];
if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
AddResultMove(node->assigned_from(), node);
node->MarkResolved();
}
if (!node->IsResolved()) ++unresolved_nodes;
}
} while (unresolved_nodes > 0);
return &result_;
}
void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
AddResultMove(from->operand(), to->operand());
}
void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
result_.Add(LMoveOperands(from, to));
}
void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
ZoneList<LOperand*> cycle_operands(8);
cycle_operands.Add(marker_operand);
LGapNode* cur = start;
do {
cur->MarkResolved();
cycle_operands.Add(cur->operand());
cur = cur->assigned_from();
} while (cur != start);
cycle_operands.Add(marker_operand);
for (int i = cycle_operands.length() - 1; i > 0; --i) {
LOperand* from = cycle_operands[i];
LOperand* to = cycle_operands[i - 1];
AddResultMove(from, to);
}
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
ASSERT(a != b);
LGapNode* cur = a;
while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
cur->set_visited_id(visited_id);
cur = cur->assigned_from();
}
return cur == b;
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
ASSERT(a != b);
return CanReach(a, b, next_visited_id_++);
}
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.source(), move.destination());
} else {
LGapNode* from = LookupNode(move.source());
LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
}
ASSERT(!to->IsAssigned());
if (CanReach(from, to)) {
// This introduces a cycle. Save.
identified_cycles_.Add(from);
}
to->set_assigned_from(from);
}
}
LGapNode* LGapResolver::LookupNode(LOperand* operand) {
for (int i = 0; i < nodes_.length(); ++i) {
if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
}
// No node found => create a new one.
LGapNode* result = new LGapNode(operand);
nodes_.Add(result);
return result;
}
#define __ masm()-> #define __ masm()->
bool LCodeGen::GenerateCode() { bool LCodeGen::GenerateCode() {
...@@ -696,86 +545,7 @@ void LCodeGen::DoLabel(LLabel* label) { ...@@ -696,86 +545,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) { void LCodeGen::DoParallelMove(LParallelMove* move) {
// xmm0 must always be a scratch register. resolver_.Resolve(move);
XMMRegister xmm_scratch = xmm0;
LUnallocated marker_operand(LUnallocated::NONE);
Register cpu_scratch = kScratchRegister;
const ZoneList<LMoveOperands>* moves =
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.source();
LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(xmm_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
if (from->IsConstantOperand()) {
LConstantOperand* constant_from = LConstantOperand::cast(from);
if (to->IsRegister()) {
if (IsInteger32Constant(constant_from)) {
__ movl(ToRegister(to), Immediate(ToInteger32(constant_from)));
} else {
__ Move(ToRegister(to), ToHandle(constant_from));
}
} else {
if (IsInteger32Constant(constant_from)) {
__ movl(ToOperand(to), Immediate(ToInteger32(constant_from)));
} else {
__ Move(ToOperand(to), ToHandle(constant_from));
}
}
} else if (from == &marker_operand) {
if (to->IsRegister()) {
__ movq(ToRegister(to), cpu_scratch);
} else if (to->IsStackSlot()) {
__ movq(ToOperand(to), cpu_scratch);
} else if (to->IsDoubleRegister()) {
__ movsd(ToDoubleRegister(to), xmm_scratch);
} else {
ASSERT(to->IsDoubleStackSlot());
__ movsd(ToOperand(to), xmm_scratch);
}
} else if (to == &marker_operand) {
if (from->IsRegister()) {
__ movq(cpu_scratch, ToRegister(from));
} else if (from->IsStackSlot()) {
__ movq(cpu_scratch, ToOperand(from));
} else if (from->IsDoubleRegister()) {
__ movsd(xmm_scratch, ToDoubleRegister(from));
} else {
ASSERT(from->IsDoubleStackSlot());
__ movsd(xmm_scratch, ToOperand(from));
}
} else if (from->IsRegister()) {
if (to->IsRegister()) {
__ movq(ToRegister(to), ToRegister(from));
} else {
__ movq(ToOperand(to), ToRegister(from));
}
} else if (to->IsRegister()) {
__ movq(ToRegister(to), ToOperand(from));
} else if (from->IsStackSlot()) {
ASSERT(to->IsStackSlot());
__ push(rax);
__ movq(rax, ToOperand(from));
__ movq(ToOperand(to), rax);
__ pop(rax);
} else if (from->IsDoubleRegister()) {
ASSERT(to->IsDoubleStackSlot());
__ movsd(ToOperand(to), ToDoubleRegister(from));
} else if (to->IsDoubleRegister()) {
ASSERT(from->IsDoubleStackSlot());
__ movsd(ToDoubleRegister(to), ToOperand(from));
} else {
ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
__ movsd(xmm_scratch, ToOperand(from));
__ movsd(ToOperand(to), xmm_scratch);
}
}
} }
......
...@@ -34,37 +34,15 @@ ...@@ -34,37 +34,15 @@
#include "deoptimizer.h" #include "deoptimizer.h"
#include "safepoint-table.h" #include "safepoint-table.h"
#include "scopes.h" #include "scopes.h"
#include "x64/lithium-gap-resolver-x64.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Forward declarations. // Forward declarations.
class LDeferredCode; class LDeferredCode;
class LGapNode;
class SafepointGenerator; class SafepointGenerator;
class LGapResolver BASE_EMBEDDED {
public:
LGapResolver();
const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand);
private:
LGapNode* LookupNode(LOperand* operand);
bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
bool CanReach(LGapNode* a, LGapNode* b);
void RegisterMove(LMoveOperands move);
void AddResultMove(LOperand* from, LOperand* to);
void AddResultMove(LGapNode* from, LGapNode* to);
void ResolveCycle(LGapNode* start, LOperand* marker_operand);
ZoneList<LGapNode*> nodes_;
ZoneList<LGapNode*> identified_cycles_;
ZoneList<LMoveOperands> result_;
int next_visited_id_;
};
class LCodeGen BASE_EMBEDDED { class LCodeGen BASE_EMBEDDED {
public: public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
...@@ -80,10 +58,24 @@ class LCodeGen BASE_EMBEDDED { ...@@ -80,10 +58,24 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()), scope_(chunk->graph()->info()->scope()),
status_(UNUSED), status_(UNUSED),
deferred_(8), deferred_(8),
osr_pc_offset_(-1) { osr_pc_offset_(-1),
resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions(); PopulateDeoptimizationLiteralsWithInlinedFunctions();
} }
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the // Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the // chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded. // code generation attempt succeeded.
...@@ -129,7 +121,6 @@ class LCodeGen BASE_EMBEDDED { ...@@ -129,7 +121,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; } LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); } HGraph* graph() const { return chunk_->graph(); }
MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block); int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction(); LInstruction* GetNextInstruction();
...@@ -190,13 +181,6 @@ class LCodeGen BASE_EMBEDDED { ...@@ -190,13 +181,6 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const; Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const; XMMRegister ToDoubleRegister(int index) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation. // Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr);
......
This diff is collapsed.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#include "v8.h"
#include "lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Execute a move by emitting a swap of two operands. The move from
// source to destination is removed from the move graph.
void EmitSwap(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
};
} } // namespace v8::internal
#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
...@@ -691,6 +691,8 @@ ...@@ -691,6 +691,8 @@
'../../src/x64/jump-target-x64.cc', '../../src/x64/jump-target-x64.cc',
'../../src/x64/lithium-codegen-x64.cc', '../../src/x64/lithium-codegen-x64.cc',
'../../src/x64/lithium-codegen-x64.h', '../../src/x64/lithium-codegen-x64.h',
'../../src/x64/lithium-gap-resolver-x64.cc',
'../../src/x64/lithium-gap-resolver-x64.h',
'../../src/x64/lithium-x64.cc', '../../src/x64/lithium-x64.cc',
'../../src/x64/lithium-x64.h', '../../src/x64/lithium-x64.h',
'../../src/x64/macro-assembler-x64.cc', '../../src/x64/macro-assembler-x64.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment