Commit 65f98b1e authored by vegorov@chromium.org's avatar vegorov@chromium.org

Fix issue 962.

SplitBetween (formely known as Split with 3 arguments) should select split position from [start, end] instead of [start, end[. This should also improve allocation quality (remove certain redundant move patterns).

Also some minor renaming and refactoring to make register allocator code more readable.

BUG=v8:962
TEST=test/mjsunit/regress/regress-962.js

Review URL: http://codereview.chromium.org/5720001

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5969 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 38343f79
......@@ -247,7 +247,7 @@ LOperand* LiveRange::CreateAssignedOperand() {
LOperand* op = NULL;
if (HasRegisterAssigned()) {
ASSERT(!IsSpilled());
if (assigned_double_) {
if (IsDouble()) {
op = LDoubleRegister::Create(assigned_register());
} else {
op = LRegister::Create(assigned_register());
......@@ -290,7 +290,7 @@ void LiveRange::AdvanceLastProcessedMarker(
void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
ASSERT(Start().Value() <= position.Value());
ASSERT(Start().Value() < position.Value());
ASSERT(result->IsEmpty());
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
......@@ -625,7 +625,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
if (result == NULL) {
result = new LiveRange(FixedLiveRangeID(index));
ASSERT(result->IsFixed());
result->set_assigned_register(index, false);
result->set_assigned_register(index, GENERAL_REGISTERS);
fixed_live_ranges_[index] = result;
}
return result;
......@@ -642,7 +642,7 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
if (result == NULL) {
result = new LiveRange(FixedDoubleLiveRangeID(index));
ASSERT(result->IsFixed());
result->set_assigned_register(index, true);
result->set_assigned_register(index, DOUBLE_REGISTERS);
fixed_double_live_ranges_[index] = result;
}
return result;
......@@ -1258,14 +1258,6 @@ void LAllocator::BuildLiveRanges() {
}
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("Allocate general registers", this);
num_registers_ = Register::kNumAllocatableRegisters;
mode_ = CPU_REGISTERS;
AllocateRegisters();
}
bool LAllocator::SafePointsAreInOrder() const {
const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
int safe_point = 0;
......@@ -1397,10 +1389,18 @@ void LAllocator::ProcessOsrEntry() {
}
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("Allocate general registers", this);
num_registers_ = Register::kNumAllocatableRegisters;
mode_ = GENERAL_REGISTERS;
AllocateRegisters();
}
void LAllocator::AllocateDoubleRegisters() {
HPhase phase("Allocate double registers", this);
num_registers_ = DoubleRegister::kNumAllocatableRegisters;
mode_ = XMM_REGISTERS;
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
......@@ -1411,7 +1411,7 @@ void LAllocator::AllocateRegisters() {
for (int i = 0; i < live_ranges_.length(); ++i) {
if (live_ranges_[i] != NULL) {
if (HasDoubleValue(live_ranges_[i]->id()) == (mode_ == XMM_REGISTERS)) {
if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
AddToUnhandledUnsorted(live_ranges_[i]);
}
}
......@@ -1422,7 +1422,7 @@ void LAllocator::AllocateRegisters() {
ASSERT(active_live_ranges_.is_empty());
ASSERT(inactive_live_ranges_.is_empty());
if (mode_ == XMM_REGISTERS) {
if (mode_ == DOUBLE_REGISTERS) {
for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
......@@ -1463,11 +1463,7 @@ void LAllocator::AllocateRegisters() {
current->Start().NextInstruction().Value()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
LiveRange* part = Split(current,
current->Start().NextInstruction(),
pos->pos());
Spill(current);
AddToUnhandledSorted(part);
SpillBetween(current, current->Start(), pos->pos());
ASSERT(UnhandledIsSorted());
continue;
}
......@@ -1521,6 +1517,16 @@ void LAllocator::Setup() {
}
const char* LAllocator::RegisterName(int allocation_index) {
ASSERT(mode_ != NONE);
if (mode_ == GENERAL_REGISTERS) {
return Register::AllocationIndexToString(allocation_index);
} else {
return DoubleRegister::AllocationIndexToString(allocation_index);
}
}
void LAllocator::TraceAlloc(const char* msg, ...) {
if (FLAG_trace_alloc) {
va_list arguments;
......@@ -1544,10 +1550,12 @@ bool LAllocator::HasTaggedValue(int virtual_register) const {
}
bool LAllocator::HasDoubleValue(int virtual_register) const {
RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
HValue* value = graph()->LookupValue(virtual_register);
if (value == NULL) return false;
return value->representation().IsDouble();
if (value != NULL && value->representation().IsDouble()) {
return DOUBLE_REGISTERS;
}
return GENERAL_REGISTERS;
}
......@@ -1728,16 +1736,22 @@ void LAllocator::InactiveToActive(LiveRange* range) {
}
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
LifetimePosition max_pos = LifetimePosition::FromInstructionIndex(
chunk_->instructions()->length() + 1);
ASSERT(DoubleRegister::kNumAllocatableRegisters >=
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
Register::kNumAllocatableRegisters);
EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
free_pos(max_pos);
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* cur_active = active_live_ranges_.at(i);
free_pos[cur_active->assigned_register()] =
free_until_pos[cur_active->assigned_register()] =
LifetimePosition::FromInstructionIndex(0);
}
......@@ -1748,65 +1762,83 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
free_pos[cur_reg] = Min(free_pos[cur_reg], next_intersection);
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
}
UsePosition* pos = current->FirstPosWithHint();
if (pos != NULL) {
LOperand* hint = pos->hint();
UsePosition* hinted_use = current->FirstPosWithHint();
if (hinted_use != NULL) {
LOperand* hint = hinted_use->hint();
if (hint->IsRegister() || hint->IsDoubleRegister()) {
int register_index = hint->index();
TraceAlloc("Found reg hint %d for live range %d (free [%d, end %d[)\n",
register_index,
TraceAlloc(
"Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
RegisterName(register_index),
free_until_pos[register_index].Value(),
current->id(),
free_pos[register_index].Value(),
current->End().Value());
if (free_pos[register_index].Value() >= current->End().Value()) {
TraceAlloc("Assigning preferred reg %d to live range %d\n",
register_index,
// The desired register is free until the end of the current live range.
if (free_until_pos[register_index].Value() >= current->End().Value()) {
TraceAlloc("Assigning preferred reg %s to live range %d\n",
RegisterName(register_index),
current->id());
current->set_assigned_register(register_index, mode_ == XMM_REGISTERS);
current->set_assigned_register(register_index, mode_);
return true;
}
}
}
int max_reg = 0;
// Find the register which stays free for the longest time.
int reg = 0;
for (int i = 1; i < RegisterCount(); ++i) {
if (free_pos[i].Value() > free_pos[max_reg].Value()) {
max_reg = i;
if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
reg = i;
}
}
if (free_pos[max_reg].InstructionIndex() == 0) {
LifetimePosition pos = free_until_pos[reg];
if (pos.Value() <= current->Start().Value()) {
// All registers are blocked.
return false;
} else if (free_pos[max_reg].Value() >= current->End().Value()) {
TraceAlloc("Assigning reg %d to live range %d\n", max_reg, current->id());
current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
} else {
// Split the interval before first use position of max_reg and never split
// it interval at its start position.
LifetimePosition pos = free_pos[max_reg];
if (pos.Value() <= current->Start().Value()) return false;
LiveRange* second_range = Split(current, pos);
AddToUnhandledSorted(second_range);
current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
}
if (pos.Value() < current->End().Value()) {
// Register reg is available at the range start but becomes blocked before
// the range end. Split current at position where it becomes blocked.
LiveRange* tail = SplitAt(current, pos);
AddToUnhandledSorted(tail);
}
// Register reg is available at the range start and is free until
// the range end.
ASSERT(pos.Value() >= current->End().Value());
TraceAlloc("Assigning reg %s to live range %d\n",
RegisterName(reg),
current->id());
current->set_assigned_register(reg, mode_);
return true;
}
void LAllocator::AllocateBlockedReg(LiveRange* current) {
LifetimePosition max_pos =
LifetimePosition::FromInstructionIndex(
chunk_->instructions()->length() + 1);
ASSERT(DoubleRegister::kNumAllocatableRegisters >=
Register::kNumAllocatableRegisters);
EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
use_pos(max_pos);
EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
block_pos(max_pos);
UsePosition* register_use = current->NextRegisterPosition(current->Start());
if (register_use == NULL) {
// There is no use in the current live range that requires a register.
// We can just spill it.
Spill(current);
return;
}
LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* range = active_live_ranges_[i];
......@@ -1839,30 +1871,48 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
int max_reg = 0;
int reg = 0;
for (int i = 1; i < RegisterCount(); ++i) {
if (use_pos[i].Value() > use_pos[max_reg].Value()) {
max_reg = i;
if (use_pos[i].Value() > use_pos[reg].Value()) {
reg = i;
}
}
UsePosition* first_usage = current->NextRegisterPosition(current->Start());
if (first_usage == NULL) {
Spill(current);
} else if (use_pos[max_reg].Value() < first_usage->pos().Value()) {
SplitAndSpill(current, current->Start(), first_usage->pos());
} else {
if (block_pos[max_reg].Value() < current->End().Value()) {
// Split current before blocked position.
LiveRange* second_range = Split(current,
LifetimePosition pos = use_pos[reg];
if (pos.Value() < register_use->pos().Value()) {
// All registers are blocked before the first use that requires a register.
// Spill starting part of live range up to that use.
//
// Corner case: the first use position is equal to the start of the range.
// In this case we have nothing to spill and SpillBetween will just return
// this range to the list of unhandled ones. This will lead to the infinite
// loop.
ASSERT(current->Start().Value() < register_use->pos().Value());
SpillBetween(current, current->Start(), register_use->pos());
return;
}
if (block_pos[reg].Value() < current->End().Value()) {
// Register becomes blocked before the current range end. Split before that
// position.
LiveRange* tail = SplitBetween(current,
current->Start(),
block_pos[max_reg]);
AddToUnhandledSorted(second_range);
block_pos[reg].InstructionStart());
AddToUnhandledSorted(tail);
}
current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
// Register reg is not blocked for the whole range.
ASSERT(block_pos[reg].Value() >= current->End().Value());
TraceAlloc("Assigning reg %s to live range %d\n",
RegisterName(reg),
current->id());
current->set_assigned_register(reg, mode_);
// This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register
// at the same lifetime positions as current.
SplitAndSpillIntersecting(current);
}
}
......@@ -1875,9 +1925,9 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
if (range->assigned_register() == reg) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == NULL) {
SplitAndSpill(range, split_pos);
SpillAfter(range, split_pos);
} else {
SplitAndSpill(range, split_pos, next_pos->pos());
SpillBetween(range, split_pos, next_pos->pos());
}
ActiveToHandled(range);
--i;
......@@ -1892,10 +1942,10 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
if (next_intersection.IsValid()) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == NULL) {
SplitAndSpill(range, split_pos);
SpillAfter(range, split_pos);
} else {
next_intersection = Min(next_intersection, next_pos->pos());
SplitAndSpill(range, split_pos, next_intersection);
SpillBetween(range, split_pos, next_intersection);
}
InactiveToHandled(range);
--i;
......@@ -1905,19 +1955,50 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
}
LiveRange* LAllocator::Split(LiveRange* range,
bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
return pos.IsInstructionStart() &&
chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
}
void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
UsePosition* prev_pos = prev->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
UsePosition* next_pos = next->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
LOperand* prev_operand = prev_pos->operand();
LOperand* next_operand = next_pos->operand();
LGap* gap = chunk_->GetGapAt(pos);
gap->GetOrCreateParallelMove(LGap::START)->
AddMove(prev_operand, next_operand);
next_pos->set_hint(prev_operand);
}
LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
ASSERT(!range->IsFixed());
TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
if (pos.Value() <= range->Start().Value()) return range;
LiveRange* result = LiveRangeFor(next_virtual_register_++);
range->SplitAt(pos, result);
return result;
}
LiveRange* LAllocator::SplitBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
ASSERT(!range->IsFixed());
TraceAlloc("Splitting live range %d in position between [%d, %d[\n",
TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
range->id(),
start.Value(),
end.Value());
LifetimePosition split_pos = FindOptimalSplitPos(
start, end.PrevInstruction().InstructionEnd());
LifetimePosition split_pos = FindOptimalSplitPos(start, end);
ASSERT(split_pos.Value() >= start.Value());
return Split(range, split_pos);
return SplitAt(range, split_pos);
}
......@@ -1940,76 +2021,49 @@ LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
}
HBasicBlock* block = end_block;
// Move to the most outside loop header.
// Find header of outermost loop.
while (block->parent_loop_header() != NULL &&
block->parent_loop_header()->block_id() > start_block->block_id()) {
block = block->parent_loop_header();
}
if (block == end_block) {
return end;
}
if (block == end_block) return end;
return LifetimePosition::FromInstructionIndex(
block->first_instruction_index());
}
bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
return pos.IsInstructionStart() &&
chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
}
void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
UsePosition* prev_pos = prev->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
UsePosition* next_pos = next->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
LOperand* prev_operand = prev_pos->operand();
LOperand* next_operand = next_pos->operand();
LGap* gap = chunk_->GetGapAt(pos);
gap->GetOrCreateParallelMove(LGap::START)->
AddMove(prev_operand, next_operand);
next_pos->set_hint(prev_operand);
}
LiveRange* LAllocator::Split(LiveRange* range, LifetimePosition pos) {
ASSERT(!range->IsFixed());
TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
if (pos.Value() <= range->Start().Value()) {
return range;
}
LiveRange* result = LiveRangeFor(next_virtual_register_++);
range->SplitAt(pos, result);
return result;
void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
LiveRange* second_part = SplitAt(range, pos);
Spill(second_part);
}
void LAllocator::SplitAndSpill(LiveRange* range,
void LAllocator::SpillBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
// We have an interval range and want to make sure that it is
// spilled at start and at most spilled until end.
ASSERT(start.Value() < end.Value());
LiveRange* tail_part = Split(range, start);
if (tail_part->Start().Value() < end.Value()) {
LiveRange* third_part = Split(tail_part,
tail_part->Start().NextInstruction(),
end);
Spill(tail_part);
ASSERT(third_part != tail_part);
AddToUnhandledSorted(third_part);
} else {
AddToUnhandledSorted(tail_part);
}
}
LiveRange* second_part = SplitAt(range, start);
if (second_part->Start().Value() < end.Value()) {
// The split result intersects with [start, end[.
// Split it at position between ]start+1, end[, spill the middle part
// and put the rest to unhandled.
LiveRange* third_part = SplitBetween(
second_part,
second_part->Start().InstructionEnd(),
end.PrevInstruction().InstructionEnd());
ASSERT(third_part != second_part);
void LAllocator::SplitAndSpill(LiveRange* range, LifetimePosition at) {
LiveRange* second_part = Split(range, at);
Spill(second_part);
AddToUnhandledSorted(third_part);
} else {
// The split result does not intersect with [start, end[.
// Nothing to spill. Just put it to unhandled as whole.
AddToUnhandledSorted(second_part);
}
}
......@@ -2020,7 +2074,7 @@ void LAllocator::Spill(LiveRange* range) {
if (!first->HasAllocatedSpillOperand()) {
LOperand* op = TryReuseSpillSlot(range);
if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == XMM_REGISTERS);
if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
first->SetSpillOperand(op);
}
range->MakeSpilled();
......
......@@ -55,6 +55,7 @@ class LPointerMap;
class LStackSlot;
class LRegister;
// This class represents a single point of a LOperand's lifetime.
// For each lithium instruction there are exactly two lifetime positions:
// the beginning and the end of the instruction. Lifetime positions for
......@@ -121,7 +122,13 @@ class LifetimePosition {
// instruction.
bool IsValid() const { return value_ != -1; }
static LifetimePosition Invalid() { return LifetimePosition(); }
static inline LifetimePosition Invalid() { return LifetimePosition(); }
static inline LifetimePosition MaxPosition() {
// We have to use this kind of getter instead of static member due to
// crash bug in GDB.
return LifetimePosition(kMaxInt);
}
private:
static const int kStep = 2;
......@@ -135,6 +142,13 @@ class LifetimePosition {
};
enum RegisterKind {
NONE,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
class LOperand: public ZoneObject {
public:
enum Kind {
......@@ -594,8 +608,8 @@ class LiveRange: public ZoneObject {
explicit LiveRange(int id)
: id_(id),
spilled_(false),
assigned_double_(false),
assigned_register_(kInvalidAssignment),
assigned_register_kind_(NONE),
last_interval_(NULL),
first_interval_(NULL),
first_pos_(NULL),
......@@ -620,10 +634,10 @@ class LiveRange: public ZoneObject {
LOperand* CreateAssignedOperand();
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
void set_assigned_register(int reg, bool double_reg) {
void set_assigned_register(int reg, RegisterKind register_kind) {
ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
assigned_double_ = double_reg;
assigned_register_kind_ = register_kind;
ConvertOperands();
}
void MakeSpilled() {
......@@ -652,9 +666,13 @@ class LiveRange: public ZoneObject {
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos);
// Split this live range at the given position which must follow the start of
// the range.
// All uses following the given position will be moved from this
// live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result);
bool IsDouble() const { return assigned_double_; }
bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
bool HasRegisterAssigned() const {
return assigned_register_ != kInvalidAssignment;
}
......@@ -721,8 +739,8 @@ class LiveRange: public ZoneObject {
int id_;
bool spilled_;
bool assigned_double_;
int assigned_register_;
RegisterKind assigned_register_kind_;
UseInterval* last_interval_;
UseInterval* first_interval_;
UsePosition* first_pos_;
......@@ -774,8 +792,8 @@ class LAllocator BASE_EMBEDDED {
// Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const;
// Checks whether the value of a given virtual register is a double.
bool HasDoubleValue(int virtual_register) const;
// Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const;
// Begin a new instruction.
void BeginInstruction();
......@@ -814,12 +832,6 @@ class LAllocator BASE_EMBEDDED {
#endif
private:
enum OperationMode {
NONE,
CPU_REGISTERS,
XMM_REGISTERS
};
void MeetRegisterConstraints();
void ResolvePhis();
void BuildLiveRanges();
......@@ -871,17 +883,38 @@ class LAllocator BASE_EMBEDDED {
// Helper methods for allocating registers.
bool TryAllocateFreeReg(LiveRange* range);
void AllocateBlockedReg(LiveRange* range);
void SplitAndSpillIntersecting(LiveRange* range);
LifetimePosition FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end);
LiveRange* Split(LiveRange* range,
// Live range splitting helpers.
// Split the given range at the given position.
// If range starts at or after the given position then the
// original range is returned.
// Otherwise returns the live range that starts at pos and contains
// all uses from the original range that follow pos. Uses at pos will
// still be owned by the original range after splitting.
LiveRange* SplitAt(LiveRange* range, LifetimePosition pos);
// Split the given range in a position from the interval [start, end].
LiveRange* SplitBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end);
LiveRange* Split(LiveRange* range, LifetimePosition split_pos);
void SplitAndSpill(LiveRange* range,
// Find a lifetime position in the interval [start, end] which
// is optimal for splitting: it is either header of the outermost
// loop covered by this interval or the latest possible position.
LifetimePosition FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end);
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos);
// Spill the given life range after position start and up to position end.
void SpillBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end);
void SplitAndSpill(LiveRange* range, LifetimePosition at);
void SplitAndSpillIntersecting(LiveRange* range);
void Spill(LiveRange* range);
bool IsBlockBoundary(LifetimePosition pos);
void AddGapMove(int pos, LiveRange* prev, LiveRange* next);
......@@ -914,6 +947,8 @@ class LAllocator BASE_EMBEDDED {
HPhi* LookupPhi(LOperand* operand) const;
LGap* GetLastGap(HBasicBlock* block) const;
const char* RegisterName(int allocation_index);
LChunk* chunk_;
ZoneList<InstructionSummary*> summaries_;
InstructionSummary* next_summary_;
......@@ -938,7 +973,7 @@ class LAllocator BASE_EMBEDDED {
// Next virtual register number to be assigned to temporaries.
int next_virtual_register_;
OperationMode mode_;
RegisterKind mode_;
int num_registers_;
HGraph* graph_;
......
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function L(scope) { this.s = new Object(); }
L.prototype.c = function() { return true; }
function F() {
this.l = [new L, new L];
}
F.prototype.foo = function () {
var f, d = arguments,
e, b = this.l,
g;
for (e = 0; e < b.length; e++) {
g = b[e];
f = g.c.apply(g.s, d);
if (f === false) {
break
}
}
return f
}
var ctx = new F;
for (var i = 0; i < 10000000; i++) ctx.foo();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment