Change the algorithm and generated code for parallel moves on IA32.

Instead of spilling and then immediately restoring eax to resolve
memory to memory moves, the gap move resolver now tracks registers
that are known to be free and uses one if available.  If not it spills
but restores lazily when the spilled value is needed or at the end of
the algorithm.

Instead of using esi for resolving cycles and assuming it is free to
overwrite because it can be rematerialized, the gap move resolver now
resolves cycles using swaps, possibly using a free register as above.

The algorithm is also changed to be simpler: a recursive depth-first
traversal of the move dependence graph.  It uses a list of moves to be
performed (because it mutates the moves themselves), but does not use
any auxiliary structure other than the control stack.  It does not
build up a separate list of scheduled moves to be interpreted by the
code generate, but emits code on the fly.

Review URL: http://codereview.chromium.org/6263005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6344 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 3342871a
......@@ -190,6 +190,7 @@ SOURCES = {
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/lithium-codegen-ia32.cc
ia32/lithium-gap-resolver-ia32.cc
ia32/lithium-ia32.cc
ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc
......
......@@ -172,13 +172,13 @@ bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.from()->IsConstantOperand()) {
if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.from(), move.to());
AddResultMove(move.source(), move.destination());
} else {
LGapNode* from = LookupNode(move.from());
LGapNode* to = LookupNode(move.to());
LGapNode* from = LookupNode(move.source());
LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
......@@ -816,8 +816,8 @@ void LCodeGen::DoParallelMove(LParallelMove* move) {
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.from();
LOperand* to = move.to();
LOperand* from = move.source();
LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(dbl_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
......
......@@ -58,157 +58,6 @@ class SafepointGenerator : public PostCallGenerator {
};
class LGapNode: public ZoneObject {
public:
explicit LGapNode(LOperand* operand)
: operand_(operand), resolved_(false), visited_id_(-1) { }
LOperand* operand() const { return operand_; }
bool IsResolved() const { return !IsAssigned() || resolved_; }
void MarkResolved() {
ASSERT(!IsResolved());
resolved_ = true;
}
int visited_id() const { return visited_id_; }
void set_visited_id(int id) {
ASSERT(id > visited_id_);
visited_id_ = id;
}
bool IsAssigned() const { return assigned_from_.is_set(); }
LGapNode* assigned_from() const { return assigned_from_.get(); }
void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
private:
LOperand* operand_;
SetOncePointer<LGapNode> assigned_from_;
bool resolved_;
int visited_id_;
};
LGapResolver::LGapResolver()
: nodes_(32),
identified_cycles_(4),
result_(16),
next_visited_id_(0) {
}
const ZoneList<LMoveOperands>* LGapResolver::Resolve(
const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand) {
nodes_.Rewind(0);
identified_cycles_.Rewind(0);
result_.Rewind(0);
next_visited_id_ = 0;
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) RegisterMove(move);
}
for (int i = 0; i < identified_cycles_.length(); ++i) {
ResolveCycle(identified_cycles_[i], marker_operand);
}
int unresolved_nodes;
do {
unresolved_nodes = 0;
for (int j = 0; j < nodes_.length(); j++) {
LGapNode* node = nodes_[j];
if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
AddResultMove(node->assigned_from(), node);
node->MarkResolved();
}
if (!node->IsResolved()) ++unresolved_nodes;
}
} while (unresolved_nodes > 0);
return &result_;
}
void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
AddResultMove(from->operand(), to->operand());
}
void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
result_.Add(LMoveOperands(from, to));
}
void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
ZoneList<LOperand*> cycle_operands(8);
cycle_operands.Add(marker_operand);
LGapNode* cur = start;
do {
cur->MarkResolved();
cycle_operands.Add(cur->operand());
cur = cur->assigned_from();
} while (cur != start);
cycle_operands.Add(marker_operand);
for (int i = cycle_operands.length() - 1; i > 0; --i) {
LOperand* from = cycle_operands[i];
LOperand* to = cycle_operands[i - 1];
AddResultMove(from, to);
}
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
ASSERT(a != b);
LGapNode* cur = a;
while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
cur->set_visited_id(visited_id);
cur = cur->assigned_from();
}
return cur == b;
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
ASSERT(a != b);
return CanReach(a, b, next_visited_id_++);
}
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.from()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.from(), move.to());
} else {
LGapNode* from = LookupNode(move.from());
LGapNode* to = LookupNode(move.to());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
}
ASSERT(!to->IsAssigned());
if (CanReach(from, to)) {
// This introduces a cycle. Save.
identified_cycles_.Add(from);
}
to->set_assigned_from(from);
}
}
LGapNode* LGapResolver::LookupNode(LOperand* operand) {
for (int i = 0; i < nodes_.length(); ++i) {
if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
}
// No node found => create a new one.
LGapNode* result = new LGapNode(operand);
nodes_.Add(result);
return result;
}
#define __ masm()->
bool LCodeGen::GenerateCode() {
......@@ -427,6 +276,14 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
}
Operand LCodeGen::HighOperand(LOperand* op) {
ASSERT(op->IsDoubleStackSlot());
int index = op->index();
int offset = (index >= 0) ? index + 3 : index - 1;
return Operand(ebp, -offset * kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation) {
if (environment == NULL) return;
......@@ -762,66 +619,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) {
// xmm0 must always be a scratch register.
XMMRegister xmm_scratch = xmm0;
LUnallocated marker_operand(LUnallocated::NONE);
Register cpu_scratch = esi;
bool destroys_cpu_scratch = false;
const ZoneList<LMoveOperands>* moves =
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.from();
LOperand* to = move.to();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(xmm_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
if (from->IsConstantOperand()) {
__ mov(ToOperand(to), ToImmediate(from));
} else if (from == &marker_operand) {
if (to->IsRegister() || to->IsStackSlot()) {
__ mov(ToOperand(to), cpu_scratch);
ASSERT(destroys_cpu_scratch);
} else {
ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
__ movdbl(ToOperand(to), xmm_scratch);
}
} else if (to == &marker_operand) {
if (from->IsRegister() || from->IsStackSlot()) {
__ mov(cpu_scratch, ToOperand(from));
destroys_cpu_scratch = true;
} else {
ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
__ movdbl(xmm_scratch, ToOperand(from));
}
} else if (from->IsRegister()) {
__ mov(ToOperand(to), ToRegister(from));
} else if (to->IsRegister()) {
__ mov(ToRegister(to), ToOperand(from));
} else if (from->IsStackSlot()) {
ASSERT(to->IsStackSlot());
__ push(eax);
__ mov(eax, ToOperand(from));
__ mov(ToOperand(to), eax);
__ pop(eax);
} else if (from->IsDoubleRegister()) {
__ movdbl(ToOperand(to), ToDoubleRegister(from));
} else if (to->IsDoubleRegister()) {
__ movdbl(ToDoubleRegister(to), ToOperand(from));
} else {
ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
__ movdbl(xmm_scratch, ToOperand(from));
__ movdbl(ToOperand(to), xmm_scratch);
}
}
if (destroys_cpu_scratch) {
__ mov(cpu_scratch, Operand(ebp, -kPointerSize));
}
resolver_.Resolve(move);
}
......
......@@ -34,6 +34,7 @@
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "ia32/lithium-gap-resolver-ia32.h"
namespace v8 {
namespace internal {
......@@ -43,28 +44,6 @@ class LDeferredCode;
class LGapNode;
class SafepointGenerator;
class LGapResolver BASE_EMBEDDED {
public:
LGapResolver();
const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand);
private:
LGapNode* LookupNode(LOperand* operand);
bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
bool CanReach(LGapNode* a, LGapNode* b);
void RegisterMove(LMoveOperands move);
void AddResultMove(LOperand* from, LOperand* to);
void AddResultMove(LGapNode* from, LGapNode* to);
void ResolveCycle(LGapNode* start, LOperand* marker_operand);
ZoneList<LGapNode*> nodes_;
ZoneList<LGapNode*> identified_cycles_;
ZoneList<LMoveOperands> result_;
int next_visited_id_;
};
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
......@@ -80,10 +59,24 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1) {
osr_pc_offset_(-1),
resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
Immediate ToImmediate(LOperand* op);
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
Operand HighOperand(LOperand* op);
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
......@@ -129,7 +122,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
......@@ -191,11 +183,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
Immediate ToImmediate(LOperand* op);
// Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr);
......
This diff is collapsed.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
#include "v8.h"
#include "lithium-allocator.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// Emit any code necessary at the end of a gap move.
void Finish();
// Add or delete a move from the move graph without emitting any code.
// Used to build up the graph and remove trivial moves.
void AddMove(LMoveOperands move);
void RemoveMove(int index);
// Report the count of uses of operand as a source in a not-yet-performed
// move. Used to rebuild use counts.
int CountSourceUses(LOperand* operand);
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Execute a move by emitting a swap of two operands. The move from
// source to destination is removed from the move graph.
void EmitSwap(int index);
// Ensure that the given operand is not spilled.
void EnsureRestored(LOperand* operand);
// Return a register that can be used as a temp register, spilling
// something if necessary.
Register EnsureTempRegister();
// Return a known free register different from the given one (which could
// be no_reg---returning any free register), or no_reg if there is no such
// register.
Register GetFreeRegisterNot(Register reg);
// Verify that the state is the initial one, ready to resolve a single
// parallel move.
bool HasBeenReset();
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
int source_uses_[Register::kNumAllocatableRegisters];
int destination_uses_[Register::kNumAllocatableRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
int spilled_register_;
};
} } // namespace v8::internal
#endif // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
......@@ -316,7 +316,7 @@ int LChunk::GetNextSpillIndex(bool is_double) {
}
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index);
......
......@@ -745,10 +745,10 @@ void LAllocator::AddConstraintsGapMove(int index,
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands cur = move_operands->at(i);
LOperand* cur_to = cur.to();
LOperand* cur_to = cur.destination();
if (cur_to->IsUnallocated()) {
if (cur_to->VirtualRegister() == from->VirtualRegister()) {
move->AddMove(cur.from(), to);
move->AddMove(cur.source(), to);
return;
}
}
......@@ -896,8 +896,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands* cur = &move_operands->at(i);
if (cur->IsIgnored()) continue;
LOperand* from = cur->from();
LOperand* to = cur->to();
LOperand* from = cur->source();
LOperand* to = cur->destination();
HPhi* phi = LookupPhi(to);
LOperand* hint = to;
if (phi != NULL) {
......@@ -1217,9 +1217,9 @@ void LAllocator::BuildLiveRanges() {
LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
for (int j = 0; j < move->move_operands()->length(); ++j) {
LOperand* to = move->move_operands()->at(j).to();
LOperand* to = move->move_operands()->at(j).destination();
if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
hint = move->move_operands()->at(j).from();
hint = move->move_operands()->at(j).source();
phi_operand = to;
break;
}
......
......@@ -321,27 +321,49 @@ class LUnallocated: public LOperand {
class LMoveOperands BASE_EMBEDDED {
public:
LMoveOperands(LOperand* from, LOperand* to) : from_(from), to_(to) { }
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
}
LOperand* source() const { return source_; }
void set_source(LOperand* operand) { source_ = operand; }
LOperand* destination() const { return destination_; }
void set_destination(LOperand* operand) { destination_ = operand; }
// The gap resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
bool IsPending() const {
return destination_ == NULL && source_ != NULL;
}
LOperand* from() const { return from_; }
LOperand* to() const { return to_; }
// True if this move a move into the given destination operand.
bool Blocks(LOperand* operand) const {
return !IsEliminated() && source()->Equals(operand);
}
// A move is redundant if it's been eliminated, if its source and
// destination are the same, or if its destination is unneeded.
bool IsRedundant() const {
return IsEliminated() || from_->Equals(to_) || IsIgnored();
return IsEliminated() || source_->Equals(destination_) || IsIgnored();
}
bool IsEliminated() const { return from_ == NULL; }
bool IsIgnored() const {
if (to_ != NULL && to_->IsUnallocated() &&
LUnallocated::cast(to_)->HasIgnorePolicy()) {
return true;
}
return false;
return destination_ != NULL &&
destination_->IsUnallocated() &&
LUnallocated::cast(destination_)->HasIgnorePolicy();
}
void Eliminate() { from_ = to_ = NULL; }
// We clear both operands to indicate move that's been eliminated.
void Eliminate() { source_ = destination_ = NULL; }
bool IsEliminated() const {
ASSERT(source_ != NULL || destination_ == NULL);
return source_ == NULL;
}
private:
LOperand* from_;
LOperand* to_;
LOperand* source_;
LOperand* destination_;
};
......
......@@ -39,18 +39,21 @@ bool LParallelMove::IsRedundant() const {
void LParallelMove::PrintDataTo(StringStream* stream) const {
for (int i = move_operands_.length() - 1; i >= 0; --i) {
bool first = true;
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsEliminated()) {
LOperand* from = move_operands_[i].from();
LOperand* to = move_operands_[i].to();
if (from->Equals(to)) {
to->PrintTo(stream);
LOperand* source = move_operands_[i].source();
LOperand* destination = move_operands_[i].destination();
if (!first) stream->Add(" ");
first = false;
if (source->Equals(destination)) {
destination->PrintTo(stream);
} else {
to->PrintTo(stream);
destination->PrintTo(stream);
stream->Add(" = ");
from->PrintTo(stream);
source->PrintTo(stream);
}
stream->Add("; ");
stream->Add(";");
}
}
}
......
......@@ -35,9 +35,6 @@
namespace v8 {
namespace internal {
class LCodeGen;
class Translation;
class LParallelMove : public ZoneObject {
public:
LParallelMove() : move_operands_(4) { }
......
......@@ -155,13 +155,13 @@ bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.from()->IsConstantOperand()) {
if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.from(), move.to());
AddResultMove(move.source(), move.destination());
} else {
LGapNode* from = LookupNode(move.from());
LGapNode* to = LookupNode(move.to());
LGapNode* from = LookupNode(move.source());
LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
......@@ -651,8 +651,8 @@ void LCodeGen::DoParallelMove(LParallelMove* move) {
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.from();
LOperand* to = move.to();
LOperand* from = move.source();
LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(xmm_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
......
......@@ -581,10 +581,10 @@
'../../src/arm/full-codegen-arm.cc',
'../../src/arm/ic-arm.cc',
'../../src/arm/jump-target-arm.cc',
'../../src/arm/lithium-codegen-arm.cc',
'../../src/arm/lithium-codegen-arm.h',
'../../src/arm/lithium-arm.cc',
'../../src/arm/lithium-arm.h',
'../../src/arm/lithium-codegen-arm.cc',
'../../src/arm/lithium-codegen-arm.h',
'../../src/arm/macro-assembler-arm.cc',
'../../src/arm/macro-assembler-arm.h',
'../../src/arm/regexp-macro-assembler-arm.cc',
......@@ -634,6 +634,8 @@
'../../src/ia32/jump-target-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.h',
'../../src/ia32/lithium-gap-resolver-ia32.cc',
'../../src/ia32/lithium-gap-resolver-ia32.h',
'../../src/ia32/lithium-ia32.cc',
'../../src/ia32/lithium-ia32.h',
'../../src/ia32/macro-assembler-ia32.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment