Commit a933b704 authored by bbudge's avatar bbudge Committed by Commit bot

[Turbofan] Add the concept of aliasing to RegisterConfiguration.

- Adds the concept of FP register aliasing to RegisterConfiguration.
- Changes RegisterAllocator to distinguish between FP representations
when allocating.
- Changes LinearScanAllocator to detect interference when FP register
aliasing is combining, as on ARM.
- Changes ARM code generation to allow all registers s0 - s31 to be
accessed.
- Adds unit tests for RegisterConfiguration, mostly to test aliasing
calculations.

LOG=N
BUG=v8:4124

Review-Url: https://codereview.chromium.org/2086653003
Cr-Commit-Position: refs/heads/master@{#37251}
parent f0a03f0b
......@@ -36,7 +36,9 @@ class ArmOperandConverter final : public InstructionOperandConverter {
}
SwVfpRegister ToFloat32Register(InstructionOperand* op) {
return ToFloat64Register(op).low();
DCHECK(LocationOperand::cast(op)->representation() ==
MachineRepresentation::kFloat32);
return LocationOperand::cast(op)->GetFloatRegister();
}
LowDwVfpRegister OutputFloat64Register(size_t index = 0) {
......
......@@ -61,8 +61,16 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
bool InstructionOperand::InterferesWith(const InstructionOperand& that) const {
if (!IsFPRegister() || !that.IsFPRegister()) return EqualsCanonicalized(that);
return LocationOperand::cast(this)->register_code() ==
LocationOperand::cast(that).register_code();
const LocationOperand& loc1 = *LocationOperand::cast(this);
const LocationOperand& loc2 = LocationOperand::cast(that);
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
if (config->fp_aliasing_kind() != RegisterConfiguration::COMBINE)
return loc1.register_code() == loc2.register_code();
return config->AreAliases(loc1.representation(), loc1.register_code(),
loc2.representation(), loc2.register_code());
}
void InstructionOperand::Print(const RegisterConfiguration* config) const {
......
......@@ -28,20 +28,34 @@ typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
bool Blocks(const OperandSet& set, const InstructionOperand& operand) {
if (!operand.IsFPRegister()) return set.find(operand) != set.end();
if (set.find(operand) != set.end()) return true;
// Only FP registers alias.
if (!operand.IsFPRegister()) return false;
const LocationOperand& loc = LocationOperand::cast(operand);
if (loc.representation() == MachineRepresentation::kFloat64) {
return set.find(operand) != set.end() ||
set.find(LocationOperand(loc.kind(), loc.location_kind(),
MachineRepresentation::kFloat32,
loc.register_code())) != set.end();
}
DCHECK_EQ(MachineRepresentation::kFloat32, loc.representation());
return set.find(operand) != set.end() ||
set.find(LocationOperand(loc.kind(), loc.location_kind(),
MachineRepresentation::kFloat64,
loc.register_code())) != set.end();
MachineRepresentation rep = loc.representation();
MachineRepresentation other_fp_rep = rep == MachineRepresentation::kFloat64
? MachineRepresentation::kFloat32
: MachineRepresentation::kFloat64;
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
if (config->fp_aliasing_kind() != RegisterConfiguration::COMBINE) {
// Overlap aliasing case.
return set.find(LocationOperand(loc.kind(), loc.location_kind(),
other_fp_rep, loc.register_code())) !=
set.end();
}
// Combine aliasing case.
int alias_base_index = -1;
int aliases = config->GetAliases(rep, loc.register_code(), other_fp_rep,
&alias_base_index);
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
if (set.find(LocationOperand(loc.kind(), loc.location_kind(), other_fp_rep,
aliased_reg)) != set.end())
return true;
}
return false;
}
int FindFirstNonEmptySlot(const Instruction* instr) {
......
......@@ -89,7 +89,7 @@ class PendingAssessment final : public Assessment {
DISALLOW_COPY_AND_ASSIGN(PendingAssessment);
};
// FinalAssessmens are associated to operands that we know to be a certain
// FinalAssessments are associated to operands that we know to be a certain
// virtual register.
class FinalAssessment final : public Assessment {
public:
......
......@@ -33,7 +33,7 @@ int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
RegisterKind kind) {
return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
: cfg->num_allocatable_general_registers();
}
......@@ -64,25 +64,33 @@ Instruction* GetLastInstruction(InstructionSequence* code,
return code->InstructionAt(block->last_instruction_index());
}
bool IsOutputRegisterOf(Instruction* instr, Register reg) {
bool IsOutputRegisterOf(Instruction* instr, int code) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
InstructionOperand* output = instr->OutputAt(i);
if (output->IsRegister() &&
LocationOperand::cast(output)->GetRegister().is(reg)) {
LocationOperand::cast(output)->register_code() == code) {
return true;
}
}
return false;
}
bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
bool IsOutputFPRegisterOf(Instruction* instr, MachineRepresentation rep,
int code) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
InstructionOperand* output = instr->OutputAt(i);
if (output->IsFPRegister() &&
LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
return true;
if (output->IsFPRegister()) {
const LocationOperand* op = LocationOperand::cast(output);
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
if (config->fp_aliasing_kind() != RegisterConfiguration::COMBINE) {
if (op->register_code() == code) return true;
} else {
if (config->AreAliases(op->representation(), op->register_code(), rep,
code)) {
return true;
}
}
}
}
return false;
......@@ -319,11 +327,7 @@ bool UsePosition::HintRegister(int* register_code) const {
case UsePositionHintType::kOperand: {
InstructionOperand* operand =
reinterpret_cast<InstructionOperand*>(hint_);
int assigned_register =
operand->IsRegister()
? LocationOperand::cast(operand)->GetRegister().code()
: LocationOperand::cast(operand)->GetDoubleRegister().code();
*register_code = assigned_register;
*register_code = LocationOperand::cast(operand)->register_code();
return true;
}
case UsePositionHintType::kPhi: {
......@@ -1254,12 +1258,6 @@ SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
parent->SetSpillRange(this);
}
int SpillRange::ByteWidth() const {
return GetByteWidth(live_ranges_[0]->representation());
}
bool SpillRange::IsIntersectingWith(SpillRange* other) const {
if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
this->End() <= other->use_interval_->start() ||
......@@ -1362,7 +1360,6 @@ void RegisterAllocationData::PhiMapValue::CommitAssignment(
}
}
RegisterAllocationData::RegisterAllocationData(
const RegisterConfiguration* config, Zone* zone, Frame* frame,
InstructionSequence* code, const char* debug_name)
......@@ -1378,6 +1375,8 @@ RegisterAllocationData::RegisterAllocationData(
allocation_zone()),
fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
allocation_zone()),
fixed_float_live_ranges_(this->config()->num_float_registers(), nullptr,
allocation_zone()),
fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
allocation_zone()),
spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
......@@ -1553,17 +1552,30 @@ SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
return spill_range;
}
void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
if (kind == FP_REGISTERS) {
assigned_double_registers_->Add(index);
} else {
DCHECK(kind == GENERAL_REGISTERS);
assigned_registers_->Add(index);
void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
if (config()->fp_aliasing_kind() == RegisterConfiguration::COMBINE) {
int alias_base_index = -1;
int aliases = config()->GetAliases(
rep, index, MachineRepresentation::kFloat64, &alias_base_index);
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
assigned_double_registers_->Add(aliased_reg);
}
}
break;
case MachineRepresentation::kFloat64:
assigned_double_registers_->Add(index);
break;
default:
DCHECK(!IsFloatingPoint(rep));
assigned_registers_->Add(index);
break;
}
}
bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
return pos.IsFullStart() &&
code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
......@@ -1877,42 +1889,62 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
}
}
int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
return -index - 1 - config()->num_general_registers();
int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kFloat32:
return -index - 1 - config()->num_general_registers();
case MachineRepresentation::kFloat64:
return -index - 1 - config()->num_general_registers() -
config()->num_float_registers();
default:
break;
}
UNREACHABLE();
return 0;
}
TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
DCHECK(index < config()->num_general_registers());
TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedLiveRangeID(index),
InstructionSequence::DefaultRepresentation());
MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
result = data()->NewLiveRange(FixedLiveRangeID(index), rep);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(GENERAL_REGISTERS, index);
data()->MarkAllocated(rep, index);
data()->fixed_live_ranges()[index] = result;
}
return result;
}
TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
DCHECK(index < config()->num_double_registers());
TopLevelLiveRange* result = data()->fixed_double_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedDoubleLiveRangeID(index),
MachineRepresentation::kFloat64);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(FP_REGISTERS, index);
data()->fixed_double_live_ranges()[index] = result;
TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
int index, MachineRepresentation rep) {
TopLevelLiveRange* result = nullptr;
if (rep == MachineRepresentation::kFloat64) {
DCHECK(index < config()->num_double_registers());
result = data()->fixed_double_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(rep, index);
data()->fixed_double_live_ranges()[index] = result;
}
} else {
DCHECK(rep == MachineRepresentation::kFloat32);
DCHECK(index < config()->num_float_registers());
result = data()->fixed_float_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(rep, index);
data()->fixed_float_live_ranges()[index] = result;
}
}
return result;
}
TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
if (operand->IsUnallocated()) {
return data()->GetOrCreateLiveRangeFor(
......@@ -1924,8 +1956,8 @@ TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
return FixedLiveRangeFor(
LocationOperand::cast(operand)->GetRegister().code());
} else if (operand->IsFPRegister()) {
return FixedDoubleLiveRangeFor(
LocationOperand::cast(operand)->GetDoubleRegister().code());
LocationOperand* op = LocationOperand::cast(operand);
return FixedFPLiveRangeFor(op->register_code(), op->representation());
} else {
return nullptr;
}
......@@ -2021,7 +2053,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
if (instr->ClobbersRegisters()) {
for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
int code = config()->GetAllocatableGeneralCode(i);
if (!IsOutputRegisterOf(instr, Register::from_code(code))) {
if (!IsOutputRegisterOf(instr, code)) {
TopLevelLiveRange* range = FixedLiveRangeFor(code);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
......@@ -2030,11 +2062,22 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
if (instr->ClobbersDoubleRegisters()) {
for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
++i) {
for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
int code = config()->GetAllocatableDoubleCode(i);
if (!IsOutputDoubleRegisterOf(instr, DoubleRegister::from_code(code))) {
TopLevelLiveRange* range = FixedDoubleLiveRangeFor(code);
if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat64,
code)) {
TopLevelLiveRange* range =
FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
}
for (int i = 0; i < config()->num_allocatable_float_registers(); ++i) {
int code = config()->GetAllocatableFloatCode(i);
if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat32,
code)) {
TopLevelLiveRange* range =
FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
......@@ -2381,8 +2424,10 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
num_allocatable_registers_(
GetAllocatableRegisterCount(data->config(), kind)),
allocatable_register_codes_(
GetAllocatableRegisterCodes(data->config(), kind)) {}
GetAllocatableRegisterCodes(data->config(), kind)),
no_combining_(kind != FP_REGISTERS ||
data->config()->fp_aliasing_kind() !=
RegisterConfiguration::COMBINE) {}
LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
const LiveRange* range, int instruction_index) {
......@@ -2552,14 +2597,6 @@ void RegisterAllocator::Spill(LiveRange* range) {
range->Spill();
}
const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
const {
return mode() == FP_REGISTERS ? data()->fixed_double_live_ranges()
: data()->fixed_live_ranges();
}
const char* RegisterAllocator::RegisterName(int register_code) const {
if (mode() == GENERAL_REGISTERS) {
return data()->config()->GetGeneralRegisterName(register_code);
......@@ -2606,11 +2643,16 @@ void LinearScanAllocator::AllocateRegisters() {
SortUnhandled();
DCHECK(UnhandledIsSorted());
auto& fixed_ranges = GetFixedRegisters();
for (TopLevelLiveRange* current : fixed_ranges) {
if (current != nullptr) {
DCHECK_EQ(mode(), current->kind());
AddToInactive(current);
if (mode() == GENERAL_REGISTERS) {
for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
if (current != nullptr) AddToInactive(current);
}
} else {
for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
if (current != nullptr) AddToInactive(current);
}
for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
if (current != nullptr) AddToInactive(current);
}
}
......@@ -2664,7 +2706,7 @@ void LinearScanAllocator::AllocateRegisters() {
void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
int reg) {
data()->MarkAllocated(range->kind(), reg);
data()->MarkAllocated(range->representation(), reg);
range->set_assigned_register(reg);
range->SetUseHints(reg);
if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
......@@ -2778,18 +2820,37 @@ void LinearScanAllocator::InactiveToActive(LiveRange* range) {
bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
int num_regs = num_registers();
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
if (!no_combining() &&
(current->representation() == MachineRepresentation::kFloat32)) {
num_regs = data()->config()->num_float_registers();
num_codes = data()->config()->num_allocatable_float_registers();
codes = data()->config()->allocatable_float_codes();
}
LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
for (int i = 0; i < num_registers(); i++) {
for (int i = 0; i < num_regs; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
for (LiveRange* cur_active : active_live_ranges()) {
free_until_pos[cur_active->assigned_register()] =
LifetimePosition::GapFromInstructionIndex(0);
TRACE("Register %s is free until pos %d (1)\n",
RegisterName(cur_active->assigned_register()),
LifetimePosition::GapFromInstructionIndex(0).value());
int cur_reg = cur_active->assigned_register();
if (no_combining()) {
free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
LifetimePosition::GapFromInstructionIndex(0).value());
} else {
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
cur_active->representation(), cur_reg, current->representation(),
&alias_base_index);
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
free_until_pos[aliased_reg] =
LifetimePosition::GapFromInstructionIndex(0);
}
}
}
for (LiveRange* cur_inactive : inactive_live_ranges()) {
......@@ -2798,9 +2859,21 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
Min(free_until_pos[cur_reg], next_intersection).value());
if (no_combining()) {
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
Min(free_until_pos[cur_reg], next_intersection).value());
} else {
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
cur_inactive->representation(), cur_reg, current->representation(),
&alias_base_index);
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
free_until_pos[aliased_reg] =
Min(free_until_pos[aliased_reg], next_intersection);
}
}
}
int hint_register;
......@@ -2822,9 +2895,9 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
}
// Find the register which stays free for the longest time.
int reg = allocatable_register_code(0);
for (int i = 1; i < num_allocatable_registers(); ++i) {
int code = allocatable_register_code(i);
int reg = codes[0];
for (int i = 1; i < num_codes; ++i) {
int code = codes[i];
if (free_until_pos[code] > free_until_pos[reg]) {
reg = code;
}
......@@ -2844,8 +2917,8 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
AddToUnhandledSorted(tail);
}
// Register reg is available at the range start and is free until
// the range end.
// Register reg is available at the range start and is free until the range
// end.
DCHECK(pos >= current->End());
TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
current->TopLevel()->vreg(), current->relative_id());
......@@ -2864,26 +2937,58 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
return;
}
int num_regs = num_registers();
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
if (!no_combining() &&
(current->representation() == MachineRepresentation::kFloat32)) {
num_regs = data()->config()->num_float_registers();
num_codes = data()->config()->num_allocatable_float_registers();
codes = data()->config()->allocatable_float_codes();
}
LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
for (int i = 0; i < num_registers(); i++) {
for (int i = 0; i < num_regs; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
for (LiveRange* range : active_live_ranges()) {
int cur_reg = range->assigned_register();
if (range->TopLevel()->IsFixed() ||
!range->CanBeSpilled(current->Start())) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
UsePosition* next_use =
range->NextUsePositionRegisterIsBeneficial(current->Start());
if (next_use == nullptr) {
use_pos[cur_reg] = range->End();
bool is_fixed_or_cant_spill =
range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
if (no_combining()) {
if (is_fixed_or_cant_spill) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
use_pos[cur_reg] = next_use->pos();
UsePosition* next_use =
range->NextUsePositionRegisterIsBeneficial(current->Start());
if (next_use == nullptr) {
use_pos[cur_reg] = range->End();
} else {
use_pos[cur_reg] = next_use->pos();
}
}
} else {
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
range->representation(), cur_reg, current->representation(),
&alias_base_index);
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
if (is_fixed_or_cant_spill) {
block_pos[aliased_reg] = use_pos[aliased_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
UsePosition* next_use =
range->NextUsePositionRegisterIsBeneficial(current->Start());
if (next_use == nullptr) {
use_pos[aliased_reg] = range->End();
} else {
use_pos[aliased_reg] = next_use->pos();
}
}
}
}
}
......@@ -2893,17 +2998,36 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
LifetimePosition next_intersection = range->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
if (range->TopLevel()->IsFixed()) {
block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
bool is_fixed = range->TopLevel()->IsFixed();
if (no_combining()) {
if (is_fixed) {
block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
} else {
use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
}
} else {
use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
range->representation(), cur_reg, current->representation(),
&alias_base_index);
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
if (is_fixed) {
block_pos[aliased_reg] =
Min(block_pos[aliased_reg], next_intersection);
use_pos[aliased_reg] =
Min(block_pos[aliased_reg], use_pos[aliased_reg]);
} else {
use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
}
}
}
}
int reg = allocatable_register_code(0);
for (int i = 1; i < num_allocatable_registers(); ++i) {
int code = allocatable_register_code(i);
int reg = codes[0];
for (int i = 1; i < num_codes; ++i) {
int code = codes[i];
if (use_pos[code] > use_pos[reg]) {
reg = code;
}
......@@ -2949,45 +3073,61 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
LifetimePosition split_pos = current->Start();
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
LiveRange* range = active_live_ranges()[i];
if (range->assigned_register() == reg) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
if (next_pos == nullptr) {
SpillAfter(range, spill_pos);
} else {
// When spilling between spill_pos and next_pos ensure that the range
// remains spilled at least until the start of the current live range.
// This guarantees that we will not introduce new unhandled ranges that
// start before the current range as this violates allocation invariant
// and will lead to an inconsistent state of active and inactive
// live-ranges: ranges are allocated in order of their start positions,
// ranges are retired from active/inactive when the start of the
// current live-range is larger than their end.
DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
next_pos->pos()));
SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
if (no_combining()) {
if (range->assigned_register() != reg) continue;
} else {
if (!data()->config()->AreAliases(current->representation(), reg,
range->representation(),
range->assigned_register())) {
continue;
}
ActiveToHandled(range);
--i;
}
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
if (next_pos == nullptr) {
SpillAfter(range, spill_pos);
} else {
// When spilling between spill_pos and next_pos ensure that the range
// remains spilled at least until the start of the current live range.
// This guarantees that we will not introduce new unhandled ranges that
// start before the current range as this violates allocation invariants
// and will lead to an inconsistent state of active and inactive
// live-ranges: ranges are allocated in order of their start positions,
// ranges are retired from active/inactive when the start of the
// current live-range is larger than their end.
DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
next_pos->pos()));
SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
}
ActiveToHandled(range);
--i;
}
for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
LiveRange* range = inactive_live_ranges()[i];
DCHECK(range->End() > current->Start());
if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) {
LifetimePosition next_intersection = range->FirstIntersection(current);
if (next_intersection.IsValid()) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == nullptr) {
SpillAfter(range, split_pos);
} else {
next_intersection = Min(next_intersection, next_pos->pos());
SpillBetween(range, split_pos, next_intersection);
}
InactiveToHandled(range);
--i;
if (range->TopLevel()->IsFixed()) continue;
if (no_combining()) {
if (range->assigned_register() != reg) continue;
} else {
if (!data()->config()->AreAliases(current->representation(), reg,
range->representation(),
range->assigned_register()))
continue;
}
LifetimePosition next_intersection = range->FirstIntersection(current);
if (next_intersection.IsValid()) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == nullptr) {
SpillAfter(range, split_pos);
} else {
next_intersection = Min(next_intersection, next_pos->pos());
SpillBetween(range, split_pos, next_intersection);
}
InactiveToHandled(range);
--i;
}
}
}
......@@ -3167,8 +3307,7 @@ void OperandAssigner::AssignSpillSlots() {
if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
if (!range->HasSlot()) {
int byte_width = range->ByteWidth();
int index = data()->frame()->AllocateSpillSlot(byte_width);
int index = data()->frame()->AllocateSpillSlot(range->byte_width());
range->set_assigned_slot(index);
}
}
......
......@@ -678,8 +678,7 @@ class SpillRange final : public ZoneObject {
SpillRange(TopLevelLiveRange* range, Zone* zone);
UseInterval* interval() const { return use_interval_; }
// Currently, only 4 or 8 byte slots are supported.
int ByteWidth() const;
bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other);
bool HasSlot() const { return assigned_slot_ != kUnassignedSlot; }
......@@ -768,6 +767,12 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() {
return fixed_live_ranges_;
}
ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() {
return fixed_float_live_ranges_;
}
const ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() const {
return fixed_float_live_ranges_;
}
ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() {
return fixed_double_live_ranges_;
}
......@@ -779,7 +784,7 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
DelayedReferences& delayed_references() { return delayed_references_; }
InstructionSequence* code() const { return code_; }
// This zone is for datastructures only needed during register allocation
// This zone is for data structures only needed during register allocation
// phases.
Zone* allocation_zone() const { return allocation_zone_; }
// This zone is for InstructionOperands and moves that live beyond register
......@@ -810,7 +815,7 @@ class RegisterAllocationData final : public ZoneObject {
bool ExistsUseWithoutDefinition();
bool RangesDefinedInDeferredStayInDeferred();
void MarkAllocated(RegisterKind kind, int index);
void MarkAllocated(MachineRepresentation rep, int index);
PhiMapValue* InitializePhiMap(const InstructionBlock* block,
PhiInstruction* phi);
......@@ -835,6 +840,7 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<BitVector*> live_out_sets_;
ZoneVector<TopLevelLiveRange*> live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_float_live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_double_live_ranges_;
ZoneVector<SpillRange*> spill_ranges_;
DelayedReferences delayed_references_;
......@@ -911,9 +917,9 @@ class LiveRangeBuilder final : public ZoneObject {
void ProcessLoopHeader(const InstructionBlock* block, BitVector* live);
static int FixedLiveRangeID(int index) { return -index - 1; }
int FixedDoubleLiveRangeID(int index);
int FixedFPLiveRangeID(int index, MachineRepresentation rep);
TopLevelLiveRange* FixedLiveRangeFor(int index);
TopLevelLiveRange* FixedDoubleLiveRangeFor(int index);
TopLevelLiveRange* FixedFPLiveRangeFor(int index, MachineRepresentation rep);
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
......@@ -947,7 +953,7 @@ class LiveRangeBuilder final : public ZoneObject {
class RegisterAllocator : public ZoneObject {
public:
explicit RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
protected:
RegisterAllocationData* data() const { return data_; }
......@@ -955,9 +961,14 @@ class RegisterAllocator : public ZoneObject {
RegisterKind mode() const { return mode_; }
int num_registers() const { return num_registers_; }
int num_allocatable_registers() const { return num_allocatable_registers_; }
int allocatable_register_code(int allocatable_index) const {
return allocatable_register_codes_[allocatable_index];
const int* allocatable_register_codes() const {
return allocatable_register_codes_;
}
// Returns true if registers do not combine to form larger registers, i.e.
// no complex aliasing detection is required. This is always true for the
// general register pass, and true for the FP register pass except for arm
// and mips archs.
bool no_combining() const { return no_combining_; }
// TODO(mtrofin): explain why splitting in gap START is always OK.
LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
......@@ -1009,6 +1020,9 @@ class RegisterAllocator : public ZoneObject {
int num_allocatable_registers_;
const int* allocatable_register_codes_;
private:
bool no_combining_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
......
......@@ -4,6 +4,7 @@
#include "src/assembler.h"
#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/wasm/wasm-module.h"
......@@ -178,7 +179,18 @@ struct Allocator {
if (IsFloatingPoint(type)) {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
return regloc(fp_regs[fp_offset++]);
DoubleRegister reg = fp_regs[fp_offset++];
#if V8_TARGET_ARCH_ARM
// Allocate floats using a double register, but modify the code to
// reflect how ARM FP registers alias.
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
if (type == kAstF32) {
int float_reg_code = reg.code() * 2;
DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
return regloc(DoubleRegister::from_code(float_reg_code));
}
#endif
return regloc(reg);
} else {
int offset = -1 - stack_offset;
stack_offset += Words(type);
......
......@@ -57,16 +57,14 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
Register::kNumRegisters, DoubleRegister::kMaxNumRegisters,
#if V8_TARGET_ARCH_IA32
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount, AliasingKind::OVERLAP,
#elif V8_TARGET_ARCH_X87
kMaxAllocatableGeneralRegisterCount,
compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
AliasingKind::OVERLAP,
#elif V8_TARGET_ARCH_X64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount, AliasingKind::OVERLAP,
#elif V8_TARGET_ARCH_ARM
FLAG_enable_embedded_constant_pool
? (kMaxAllocatableGeneralRegisterCount - 1)
......@@ -74,27 +72,22 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
CpuFeatures::IsSupported(VFP32DREGS)
? kMaxAllocatableDoubleRegisterCount
: (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0),
ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0,
AliasingKind::COMBINE,
#elif V8_TARGET_ARCH_ARM64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount, AliasingKind::OVERLAP,
#elif V8_TARGET_ARCH_MIPS
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount, AliasingKind::OVERLAP,
#elif V8_TARGET_ARCH_MIPS64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount, AliasingKind::OVERLAP,
#elif V8_TARGET_ARCH_PPC
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount, AliasingKind::OVERLAP,
#elif V8_TARGET_ARCH_S390
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount, AliasingKind::OVERLAP,
#else
#error Unsupported target architecture.
#endif
......@@ -135,17 +128,18 @@ const RegisterConfiguration* RegisterConfiguration::ArchDefault(
RegisterConfiguration::RegisterConfiguration(
int num_general_registers, int num_double_registers,
int num_allocatable_general_registers, int num_allocatable_double_registers,
int num_allocatable_aliased_double_registers,
const int* allocatable_general_codes, const int* allocatable_double_codes,
AliasingKind fp_aliasing_kind, const int* allocatable_general_codes,
const int* allocatable_double_codes,
const char* const* general_register_names,
const char* const* float_register_names,
const char* const* double_register_names)
: num_general_registers_(num_general_registers),
num_float_registers_(0),
num_double_registers_(num_double_registers),
num_allocatable_general_registers_(num_allocatable_general_registers),
num_allocatable_double_registers_(num_allocatable_double_registers),
num_allocatable_aliased_double_registers_(
num_allocatable_aliased_double_registers),
num_allocatable_float_registers_(0),
fp_aliasing_kind_(fp_aliasing_kind),
allocatable_general_codes_mask_(0),
allocatable_double_codes_mask_(0),
allocatable_general_codes_(allocatable_general_codes),
......@@ -161,6 +155,79 @@ RegisterConfiguration::RegisterConfiguration(
for (int i = 0; i < num_allocatable_double_registers_; ++i) {
allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
}
if (fp_aliasing_kind_ == COMBINE) {
num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters
? num_double_registers_ * 2
: kMaxFPRegisters;
num_allocatable_float_registers_ = 0;
for (int i = 0; i < num_allocatable_double_registers_; i++) {
int base_code = allocatable_double_codes_[i] * 2;
if (base_code >= kMaxFPRegisters) continue;
allocatable_float_codes_[num_allocatable_float_registers_++] = base_code;
allocatable_float_codes_[num_allocatable_float_registers_++] =
base_code + 1;
}
} else {
DCHECK(fp_aliasing_kind_ == OVERLAP);
num_float_registers_ = num_double_registers_;
num_allocatable_float_registers_ = num_allocatable_double_registers_;
for (int i = 0; i < num_allocatable_float_registers_; ++i) {
allocatable_float_codes_[i] = allocatable_double_codes_[i];
}
}
}
int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep,
int* alias_base_index) const {
DCHECK(fp_aliasing_kind_ == COMBINE);
DCHECK(rep == MachineRepresentation::kFloat32 ||
rep == MachineRepresentation::kFloat64);
DCHECK(other_rep == MachineRepresentation::kFloat32 ||
other_rep == MachineRepresentation::kFloat64);
if (rep == other_rep) {
*alias_base_index = index;
return 1;
}
if (rep == MachineRepresentation::kFloat32) {
DCHECK(other_rep == MachineRepresentation::kFloat64);
DCHECK(index < num_allocatable_float_registers_);
*alias_base_index = index / 2;
return 1;
}
DCHECK(rep == MachineRepresentation::kFloat64);
DCHECK(other_rep == MachineRepresentation::kFloat32);
if (index * 2 >= kMaxFPRegisters) {
// Alias indices are out of float register range.
return 0;
}
*alias_base_index = index * 2;
return 2;
}
bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep,
int other_index) const {
DCHECK(fp_aliasing_kind_ == COMBINE);
DCHECK(rep == MachineRepresentation::kFloat32 ||
rep == MachineRepresentation::kFloat64);
DCHECK(other_rep == MachineRepresentation::kFloat32 ||
other_rep == MachineRepresentation::kFloat64);
if (rep == other_rep) {
return index == other_index;
}
if (rep == MachineRepresentation::kFloat32) {
DCHECK(other_rep == MachineRepresentation::kFloat64);
return index / 2 == other_index;
}
DCHECK(rep == MachineRepresentation::kFloat64);
DCHECK(other_rep == MachineRepresentation::kFloat32);
if (index * 2 >= kMaxFPRegisters) {
// Alias indices are out of float register range.
return false;
}
return index == other_index / 2;
}
#undef REGISTER_COUNT
......
......@@ -6,6 +6,7 @@
#define V8_COMPILER_REGISTER_CONFIGURATION_H_
#include "src/base/macros.h"
#include "src/machine-type.h"
namespace v8 {
namespace internal {
......@@ -21,6 +22,13 @@ class RegisterConfiguration {
// until x87 TF supports all of the registers that Crankshaft does.
enum CompilerSelector { CRANKSHAFT, TURBOFAN };
enum AliasingKind {
// Registers alias a single register of every other size (e.g. Intel).
OVERLAP,
// Registers alias two registers of the next smaller size (e.g. ARM).
COMBINE
};
// Architecture independent maxes.
static const int kMaxGeneralRegisters = 32;
static const int kMaxFPRegisters = 32;
......@@ -30,7 +38,7 @@ class RegisterConfiguration {
RegisterConfiguration(int num_general_registers, int num_double_registers,
int num_allocatable_general_registers,
int num_allocatable_double_registers,
int num_allocatable_aliased_double_registers,
AliasingKind fp_aliasing_kind,
const int* allocatable_general_codes,
const int* allocatable_double_codes,
char const* const* general_names,
......@@ -38,6 +46,7 @@ class RegisterConfiguration {
char const* const* double_names);
int num_general_registers() const { return num_general_registers_; }
int num_float_registers() const { return num_float_registers_; }
int num_double_registers() const { return num_double_registers_; }
int num_allocatable_general_registers() const {
return num_allocatable_general_registers_;
......@@ -45,12 +54,10 @@ class RegisterConfiguration {
int num_allocatable_double_registers() const {
return num_allocatable_double_registers_;
}
// TODO(turbofan): This is a temporary work-around required because our
// register allocator does not yet support the aliasing of single/double
// registers on ARM.
int num_allocatable_aliased_double_registers() const {
return num_allocatable_aliased_double_registers_;
int num_allocatable_float_registers() const {
return num_allocatable_float_registers_;
}
AliasingKind fp_aliasing_kind() const { return fp_aliasing_kind_; }
int32_t allocatable_general_codes_mask() const {
return allocatable_general_codes_mask_;
}
......@@ -63,6 +70,9 @@ class RegisterConfiguration {
int GetAllocatableDoubleCode(int index) const {
return allocatable_double_codes_[index];
}
int GetAllocatableFloatCode(int index) const {
return allocatable_float_codes_[index];
}
const char* GetGeneralRegisterName(int code) const {
return general_register_names_[code];
}
......@@ -78,17 +88,35 @@ class RegisterConfiguration {
const int* allocatable_double_codes() const {
return allocatable_double_codes_;
}
const int* allocatable_float_codes() const {
return allocatable_float_codes_;
}
// Aliasing calculations for floating point registers, when fp_aliasing_kind()
// is COMBINE. Currently only implemented for kFloat32, or kFloat64 reps.
// Returns the number of aliases, and if > 0, alias_base_index is set to the
// index of the first alias.
int GetAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep, int* alias_base_index) const;
// Returns a value indicating whether two registers alias each other, when
// fp_aliasing_kind() is COMBINE. Currently only implemented for kFloat32, or
// kFloat64 reps.
bool AreAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep, int other_index) const;
private:
const int num_general_registers_;
int num_float_registers_;
const int num_double_registers_;
int num_allocatable_general_registers_;
int num_allocatable_double_registers_;
int num_allocatable_aliased_double_registers_;
int num_allocatable_float_registers_;
AliasingKind fp_aliasing_kind_;
int32_t allocatable_general_codes_mask_;
int32_t allocatable_double_codes_mask_;
const int* allocatable_general_codes_;
const int* allocatable_double_codes_;
int allocatable_float_codes_[kMaxFPRegisters];
char const* const* general_register_names_;
char const* const* float_register_names_;
char const* const* double_register_names_;
......
......@@ -91,10 +91,19 @@ class Float32RegisterPairs : public Pairs {
Float32RegisterPairs()
: Pairs(
100,
#if V8_TARGET_ARCH_ARM
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->num_allocatable_aliased_double_registers(),
->num_allocatable_double_registers() /
2 -
2,
#else
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->allocatable_double_codes()) {}
->num_allocatable_double_registers(),
#endif
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->allocatable_double_codes()) {
}
};
......@@ -105,7 +114,7 @@ class Float64RegisterPairs : public Pairs {
: Pairs(
100,
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->num_allocatable_aliased_double_registers(),
->num_allocatable_double_registers(),
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->allocatable_double_codes()) {}
};
......@@ -136,7 +145,12 @@ struct Allocator {
if (IsFloatingPoint(type.representation())) {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
return LinkageLocation::ForRegister(fp_regs[fp_offset++]);
int code = fp_regs[fp_offset++];
#if V8_TARGET_ARCH_ARM
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
if (type.representation() == MachineRepresentation::kFloat32) code *= 2;
#endif
return LinkageLocation::ForRegister(code);
} else {
int offset = -1 - stack_offset;
stack_offset += StackWords(type);
......
......@@ -67,8 +67,8 @@ RegisterConfiguration* InstructionSequenceTest::config() {
if (config_.is_empty()) {
config_.Reset(new RegisterConfiguration(
num_general_registers_, num_double_registers_, num_general_registers_,
num_double_registers_, num_double_registers_, allocatable_codes,
allocatable_double_codes, general_register_names_,
num_double_registers_, RegisterConfiguration::OVERLAP,
allocatable_codes, allocatable_double_codes, general_register_names_,
double_register_names_, // float register names
double_register_names_));
}
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/register-configuration.h"
#include "testing/gtest-support.h"
namespace v8 {
namespace internal {
const MachineRepresentation kFloat32 = MachineRepresentation::kFloat32;
const MachineRepresentation kFloat64 = MachineRepresentation::kFloat64;
class RegisterConfigurationUnitTest : public ::testing::Test {
public:
RegisterConfigurationUnitTest() {}
virtual ~RegisterConfigurationUnitTest() {}
private:
};
TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
const int kNumGeneralRegs = 3;
const int kNumDoubleRegs = 4;
const int kNumAllocatableGeneralRegs = 2;
const int kNumAllocatableDoubleRegs = 2;
int general_codes[kNumAllocatableGeneralRegs] = {1, 2};
int double_codes[kNumAllocatableDoubleRegs] = {2, 3};
RegisterConfiguration test(
kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, RegisterConfiguration::OVERLAP, general_codes,
double_codes, nullptr, nullptr, nullptr);
EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
EXPECT_EQ(test.num_allocatable_general_registers(),
kNumAllocatableGeneralRegs);
EXPECT_EQ(test.num_allocatable_double_registers(), kNumAllocatableDoubleRegs);
EXPECT_EQ(test.num_allocatable_float_registers(), kNumAllocatableDoubleRegs);
EXPECT_EQ(test.allocatable_general_codes_mask(),
(1 << general_codes[0]) | (1 << general_codes[1]));
EXPECT_EQ(test.GetAllocatableGeneralCode(0), general_codes[0]);
EXPECT_EQ(test.GetAllocatableGeneralCode(1), general_codes[1]);
EXPECT_EQ(test.allocatable_double_codes_mask(),
(1 << double_codes[0]) | (1 << double_codes[1]));
EXPECT_EQ(test.GetAllocatableDoubleCode(0), double_codes[0]);
EXPECT_EQ(test.GetAllocatableDoubleCode(1), double_codes[1]);
}
TEST_F(RegisterConfigurationUnitTest, Aliasing) {
const int kNumGeneralRegs = 3;
const int kNumDoubleRegs = 4;
const int kNumAllocatableGeneralRegs = 2;
const int kNumAllocatableDoubleRegs = 3;
int general_codes[] = {1, 2};
int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33.
RegisterConfiguration test(
kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, RegisterConfiguration::COMBINE, general_codes,
double_codes, nullptr, nullptr, nullptr);
// There are 3 allocatable double regs, but only 2 can alias float regs.
EXPECT_EQ(test.num_allocatable_float_registers(), 4);
// Test that float registers combine in pairs to form double registers.
EXPECT_EQ(test.GetAllocatableFloatCode(0), double_codes[0] * 2);
EXPECT_EQ(test.GetAllocatableFloatCode(1), double_codes[0] * 2 + 1);
EXPECT_EQ(test.GetAllocatableFloatCode(2), double_codes[1] * 2);
EXPECT_EQ(test.GetAllocatableFloatCode(3), double_codes[1] * 2 + 1);
// Registers alias themselves.
EXPECT_TRUE(test.AreAliases(kFloat32, 0, kFloat32, 0));
EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat64, 0));
// Registers don't alias other registers of the same size.
EXPECT_FALSE(test.AreAliases(kFloat32, 1, kFloat32, 0));
EXPECT_FALSE(test.AreAliases(kFloat64, 1, kFloat64, 0));
// Float registers combine in pairs and alias double registers.
EXPECT_TRUE(test.AreAliases(kFloat32, 0, kFloat64, 0));
EXPECT_TRUE(test.AreAliases(kFloat32, 1, kFloat64, 0));
EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 0));
EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 1));
EXPECT_FALSE(test.AreAliases(kFloat32, 0, kFloat64, 1));
EXPECT_FALSE(test.AreAliases(kFloat32, 1, kFloat64, 1));
EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 1));
EXPECT_TRUE(test.AreAliases(kFloat64, 1, kFloat32, 2));
EXPECT_TRUE(test.AreAliases(kFloat64, 1, kFloat32, 3));
EXPECT_TRUE(test.AreAliases(kFloat64, 2, kFloat32, 4));
EXPECT_TRUE(test.AreAliases(kFloat64, 2, kFloat32, 5));
int alias_base_index = -1;
EXPECT_EQ(test.GetAliases(kFloat32, 0, kFloat32, &alias_base_index), 1);
EXPECT_EQ(alias_base_index, 0);
EXPECT_EQ(test.GetAliases(kFloat64, 1, kFloat64, &alias_base_index), 1);
EXPECT_EQ(alias_base_index, 1);
EXPECT_EQ(test.GetAliases(kFloat32, 0, kFloat64, &alias_base_index), 1);
EXPECT_EQ(alias_base_index, 0);
EXPECT_EQ(test.GetAliases(kFloat32, 1, kFloat64, &alias_base_index), 1);
EXPECT_EQ(test.GetAliases(kFloat32, 2, kFloat64, &alias_base_index), 1);
EXPECT_EQ(alias_base_index, 1);
EXPECT_EQ(test.GetAliases(kFloat32, 3, kFloat64, &alias_base_index), 1);
EXPECT_EQ(alias_base_index, 1);
EXPECT_EQ(test.GetAliases(kFloat64, 0, kFloat32, &alias_base_index), 2);
EXPECT_EQ(alias_base_index, 0);
EXPECT_EQ(test.GetAliases(kFloat64, 1, kFloat32, &alias_base_index), 2);
EXPECT_EQ(alias_base_index, 2);
// Non-allocatable codes still alias.
EXPECT_EQ(test.GetAliases(kFloat64, 2, kFloat32, &alias_base_index), 2);
EXPECT_EQ(alias_base_index, 4);
// High numbered double registers don't alias nonexistent single registers.
EXPECT_EQ(
test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters / 2,
kFloat32, &alias_base_index),
0);
EXPECT_EQ(
test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters / 2 + 1,
kFloat32, &alias_base_index),
0);
EXPECT_EQ(test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters,
kFloat32, &alias_base_index),
0);
}
} // namespace internal
} // namespace v8
......@@ -120,6 +120,7 @@
'heap/scavenge-job-unittest.cc',
'heap/slot-set-unittest.cc',
'locked-queue-unittest.cc',
'register-configuration-unittest.cc',
'run-all-unittests.cc',
'test-utils.h',
'test-utils.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment