Commit a9059632 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[TurboProp] Add framework to pipeline.cc for a fast register allocator.

Adds basic framework to pipeline.cc to enable a seperate fast register
allocator for the TurboProp mid-tier. As part of this, common logic as
well as a base class for RegisterAllocationData is moved to a seperate
register-allocation.h header file. The current register allocator's
RegisterAllocationData is renamed to TopTierRegisterAllocationData, and
the former name is the new base class held in PipelineData.

BUG=v8:9684

Change-Id: I28285b7d6112505bf90e88ea3cda66d03dfabc74
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2295359
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68852}
parent c8679386
......@@ -1900,6 +1900,7 @@ v8_compiler_sources = [
"src/compiler/backend/live-range-separator.h",
"src/compiler/backend/move-optimizer.cc",
"src/compiler/backend/move-optimizer.h",
"src/compiler/backend/register-allocation.h",
"src/compiler/backend/register-allocator-verifier.cc",
"src/compiler/backend/register-allocator-verifier.h",
"src/compiler/backend/register-allocator.cc",
......
......@@ -16,7 +16,8 @@ namespace compiler {
namespace {
void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
void CreateSplinter(TopLevelLiveRange* range,
TopTierRegisterAllocationData* data,
LifetimePosition first_cut, LifetimePosition last_cut,
bool trace_alloc) {
DCHECK(!range->IsSplinter());
......@@ -68,7 +69,8 @@ void SetSlotUse(TopLevelLiveRange* range) {
}
}
void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) {
void SplinterLiveRange(TopLevelLiveRange* range,
TopTierRegisterAllocationData* data) {
const InstructionSequence* code = data->code();
UseInterval* interval = range->first_interval();
......
......@@ -13,21 +13,21 @@ class Zone;
namespace compiler {
class RegisterAllocationData;
class TopTierRegisterAllocationData;
// A register allocation pair of transformations: splinter and merge live ranges
class LiveRangeSeparator final : public ZoneObject {
public:
LiveRangeSeparator(RegisterAllocationData* data, Zone* zone)
LiveRangeSeparator(TopTierRegisterAllocationData* data, Zone* zone)
: data_(data), zone_(zone) {}
void Splinter();
private:
RegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; }
RegisterAllocationData* const data_;
TopTierRegisterAllocationData* const data_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeSeparator);
......@@ -35,13 +35,13 @@ class LiveRangeSeparator final : public ZoneObject {
class LiveRangeMerger final : public ZoneObject {
public:
LiveRangeMerger(RegisterAllocationData* data, Zone* zone)
LiveRangeMerger(TopTierRegisterAllocationData* data, Zone* zone)
: data_(data), zone_(zone) {}
void Merge();
private:
RegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; }
// Mark ranges spilled in deferred blocks, that also cover non-deferred code.
......@@ -49,7 +49,7 @@ class LiveRangeMerger final : public ZoneObject {
// because they would "spill in deferred blocks" anyway.
void MarkRangesSpilledInDeferredBlocks();
RegisterAllocationData* const data_;
TopTierRegisterAllocationData* const data_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeMerger);
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_BACKEND_REGISTER_ALLOCATION_H_
#define V8_COMPILER_BACKEND_REGISTER_ALLOCATION_H_
#include "src/codegen/register-configuration.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
enum class RegisterKind { kGeneral, kDouble };
inline int GetRegisterCount(const RegisterConfiguration* config,
RegisterKind kind) {
switch (kind) {
case RegisterKind::kGeneral:
return config->num_general_registers();
case RegisterKind::kDouble:
return config->num_double_registers();
}
}
inline int GetAllocatableRegisterCount(const RegisterConfiguration* config,
RegisterKind kind) {
switch (kind) {
case RegisterKind::kGeneral:
return config->num_allocatable_general_registers();
case RegisterKind::kDouble:
return config->num_allocatable_double_registers();
}
}
inline const int* GetAllocatableRegisterCodes(
const RegisterConfiguration* config, RegisterKind kind) {
switch (kind) {
case RegisterKind::kGeneral:
return config->allocatable_general_codes();
case RegisterKind::kDouble:
return config->allocatable_double_codes();
}
}
inline int ByteWidthForStackSlot(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
case MachineRepresentation::kFloat32:
return kSystemPointerSize;
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
// TODO(ishell): kTaggedSize once half size locations are supported.
return kSystemPointerSize;
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat64:
return kDoubleSize;
case MachineRepresentation::kSimd128:
return kSimd128Size;
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
}
class RegisterAllocationData : public ZoneObject {
public:
enum Type {
kTopTier,
kMidTier,
};
Type type() const { return type_; }
protected:
explicit RegisterAllocationData(Type type) : type_(type) {}
private:
Type type_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_BACKEND_REGISTER_ALLOCATION_H_
......@@ -32,22 +32,6 @@ static constexpr int kFloat32Bit =
static constexpr int kSimd128Bit =
RepresentationBit(MachineRepresentation::kSimd128);
int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
return kind == FP_REGISTERS ? cfg->num_double_registers()
: cfg->num_general_registers();
}
int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
RegisterKind kind) {
return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
: cfg->num_allocatable_general_registers();
}
const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
RegisterKind kind) {
return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
: cfg->allocatable_general_codes();
}
const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
const InstructionBlock* block) {
......@@ -66,33 +50,6 @@ Instruction* GetLastInstruction(InstructionSequence* code,
return code->InstructionAt(block->last_instruction_index());
}
// TODO(dcarney): fix frame to allow frame accesses to half size location.
int GetByteWidth(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
case MachineRepresentation::kFloat32:
return kSystemPointerSize;
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
// TODO(ishell): kTaggedSize once half size locations are supported.
return kSystemPointerSize;
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat64:
return kDoubleSize;
case MachineRepresentation::kSimd128:
return kSimd128Size;
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
}
} // namespace
class LiveRangeBound {
......@@ -204,7 +161,8 @@ class LiveRangeBoundArray {
class LiveRangeFinder {
public:
explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
explicit LiveRangeFinder(const TopTierRegisterAllocationData* data,
Zone* zone)
: data_(data),
bounds_length_(static_cast<int>(data_->live_ranges().size())),
bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
......@@ -226,7 +184,7 @@ class LiveRangeFinder {
}
private:
const RegisterAllocationData* const data_;
const TopTierRegisterAllocationData* const data_;
const int bounds_length_;
LiveRangeBoundArray* const bounds_;
Zone* const zone_;
......@@ -300,8 +258,8 @@ bool UsePosition::HintRegister(int* register_code) const {
return true;
}
case UsePositionHintType::kPhi: {
RegisterAllocationData::PhiMapValue* phi =
reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
TopTierRegisterAllocationData::PhiMapValue* phi =
reinterpret_cast<TopTierRegisterAllocationData::PhiMapValue*>(hint_);
int assigned_register = phi->assigned_register();
if (assigned_register == kUnassignedRegister) return false;
*register_code = assigned_register;
......@@ -470,7 +428,8 @@ void LiveRange::Spill() {
}
RegisterKind LiveRange::kind() const {
return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
return IsFloatingPoint(representation()) ? RegisterKind::kDouble
: RegisterKind::kGeneral;
}
UsePosition* LiveRange::FirstHintPosition(int* register_index) {
......@@ -934,7 +893,7 @@ void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
gap_index, operand, spill_move_insertion_locations_);
}
void TopLevelLiveRange::CommitSpillMoves(RegisterAllocationData* data,
void TopLevelLiveRange::CommitSpillMoves(TopTierRegisterAllocationData* data,
const InstructionOperand& op,
bool might_be_duplicated) {
DCHECK_IMPLIES(op.IsConstant(),
......@@ -1401,7 +1360,7 @@ void LinearScanAllocator::PrintRangeOverview(std::ostream& os) {
SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
: live_ranges_(zone),
assigned_slot_(kUnassignedSlot),
byte_width_(GetByteWidth(parent->representation())) {
byte_width_(ByteWidthForStackSlot(parent->representation())) {
// Spill ranges are created for top level, non-splintered ranges. This is so
// that, when merging decisions are made, we consider the full extent of the
// virtual register, and avoid clobbering it.
......@@ -1499,9 +1458,8 @@ void SpillRange::Print() const {
os << "}" << std::endl;
}
RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
const InstructionBlock* block,
Zone* zone)
TopTierRegisterAllocationData::PhiMapValue::PhiMapValue(
PhiInstruction* phi, const InstructionBlock* block, Zone* zone)
: phi_(phi),
block_(block),
incoming_operands_(zone),
......@@ -1509,23 +1467,24 @@ RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
incoming_operands_.reserve(phi->operands().size());
}
void RegisterAllocationData::PhiMapValue::AddOperand(
void TopTierRegisterAllocationData::PhiMapValue::AddOperand(
InstructionOperand* operand) {
incoming_operands_.push_back(operand);
}
void RegisterAllocationData::PhiMapValue::CommitAssignment(
void TopTierRegisterAllocationData::PhiMapValue::CommitAssignment(
const InstructionOperand& assigned) {
for (InstructionOperand* operand : incoming_operands_) {
InstructionOperand::ReplaceWith(operand, &assigned);
}
}
RegisterAllocationData::RegisterAllocationData(
TopTierRegisterAllocationData::TopTierRegisterAllocationData(
const RegisterConfiguration* config, Zone* zone, Frame* frame,
InstructionSequence* code, RegisterAllocationFlags flags,
TickCounter* tick_counter, const char* debug_name)
: allocation_zone_(zone),
: RegisterAllocationData(Type::kTopTier),
allocation_zone_(zone),
frame_(frame),
code_(code),
debug_name_(debug_name),
......@@ -1576,7 +1535,7 @@ RegisterAllocationData::RegisterAllocationData(
this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
}
MoveOperands* RegisterAllocationData::AddGapMove(
MoveOperands* TopTierRegisterAllocationData::AddGapMove(
int index, Instruction::GapPosition position,
const InstructionOperand& from, const InstructionOperand& to) {
Instruction* instr = code()->InstructionAt(index);
......@@ -1584,13 +1543,14 @@ MoveOperands* RegisterAllocationData::AddGapMove(
return moves->AddMove(from, to);
}
MachineRepresentation RegisterAllocationData::RepresentationFor(
MachineRepresentation TopTierRegisterAllocationData::RepresentationFor(
int virtual_register) {
DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
return code()->GetRepresentation(virtual_register);
}
TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
TopLevelLiveRange* TopTierRegisterAllocationData::GetOrCreateLiveRangeFor(
int index) {
if (index >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(index + 1, nullptr);
}
......@@ -1602,12 +1562,12 @@ TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
return result;
}
TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
TopLevelLiveRange* TopTierRegisterAllocationData::NewLiveRange(
int index, MachineRepresentation rep) {
return allocation_zone()->New<TopLevelLiveRange>(index, rep);
}
int RegisterAllocationData::GetNextLiveRangeId() {
int TopTierRegisterAllocationData::GetNextLiveRangeId() {
int vreg = virtual_register_count_++;
if (vreg >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(vreg + 1, nullptr);
......@@ -1615,17 +1575,18 @@ int RegisterAllocationData::GetNextLiveRangeId() {
return vreg;
}
TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
TopLevelLiveRange* TopTierRegisterAllocationData::NextLiveRange(
MachineRepresentation rep) {
int vreg = GetNextLiveRangeId();
TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
return ret;
}
RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
const InstructionBlock* block, PhiInstruction* phi) {
RegisterAllocationData::PhiMapValue* map_value =
allocation_zone()->New<RegisterAllocationData::PhiMapValue>(
TopTierRegisterAllocationData::PhiMapValue*
TopTierRegisterAllocationData::InitializePhiMap(const InstructionBlock* block,
PhiInstruction* phi) {
TopTierRegisterAllocationData::PhiMapValue* map_value =
allocation_zone()->New<TopTierRegisterAllocationData::PhiMapValue>(
phi, block, allocation_zone());
auto res =
phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
......@@ -1634,19 +1595,19 @@ RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
return map_value;
}
RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
int virtual_register) {
TopTierRegisterAllocationData::PhiMapValue*
TopTierRegisterAllocationData::GetPhiMapValueFor(int virtual_register) {
auto it = phi_map_.find(virtual_register);
DCHECK(it != phi_map_.end());
return it->second;
}
RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
TopLevelLiveRange* top_range) {
TopTierRegisterAllocationData::PhiMapValue*
TopTierRegisterAllocationData::GetPhiMapValueFor(TopLevelLiveRange* top_range) {
return GetPhiMapValueFor(top_range->vreg());
}
bool RegisterAllocationData::ExistsUseWithoutDefinition() {
bool TopTierRegisterAllocationData::ExistsUseWithoutDefinition() {
bool found = false;
BitVector::Iterator iterator(live_in_sets()[0]);
while (!iterator.Done()) {
......@@ -1674,7 +1635,7 @@ bool RegisterAllocationData::ExistsUseWithoutDefinition() {
// path, it will be as one of the inputs of a phi. In that case, the value
// will be transferred via a move in the Gap::END's of the last instruction
// of a deferred block.
bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
bool TopTierRegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
const size_t live_ranges_size = live_ranges().size();
for (const TopLevelLiveRange* range : live_ranges()) {
CHECK_EQ(live_ranges_size,
......@@ -1699,7 +1660,7 @@ bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
return true;
}
SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
SpillRange* TopTierRegisterAllocationData::AssignSpillRangeToLiveRange(
TopLevelLiveRange* range, SpillMode spill_mode) {
using SpillType = TopLevelLiveRange::SpillType;
DCHECK(!range->HasSpillOperand());
......@@ -1725,7 +1686,7 @@ SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
return spill_range;
}
SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
SpillRange* TopTierRegisterAllocationData::CreateSpillRangeForLiveRange(
TopLevelLiveRange* range) {
DCHECK(is_turbo_preprocess_ranges());
DCHECK(!range->HasSpillOperand());
......@@ -1735,8 +1696,8 @@ SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
return spill_range;
}
void RegisterAllocationData::MarkFixedUse(MachineRepresentation rep,
int index) {
void TopTierRegisterAllocationData::MarkFixedUse(MachineRepresentation rep,
int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
......@@ -1763,7 +1724,8 @@ void RegisterAllocationData::MarkFixedUse(MachineRepresentation rep,
}
}
bool RegisterAllocationData::HasFixedUse(MachineRepresentation rep, int index) {
bool TopTierRegisterAllocationData::HasFixedUse(MachineRepresentation rep,
int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
......@@ -1792,8 +1754,8 @@ bool RegisterAllocationData::HasFixedUse(MachineRepresentation rep, int index) {
}
}
void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
int index) {
void TopTierRegisterAllocationData::MarkAllocated(MachineRepresentation rep,
int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
......@@ -1820,13 +1782,14 @@ void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
}
}
bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
bool TopTierRegisterAllocationData::IsBlockBoundary(
LifetimePosition pos) const {
return pos.IsFullStart() &&
code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
pos.ToInstructionIndex();
}
ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
ConstraintBuilder::ConstraintBuilder(TopTierRegisterAllocationData* data)
: data_(data) {}
InstructionOperand* ConstraintBuilder::AllocateFixed(
......@@ -2027,7 +1990,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
DCHECK_NOT_NULL(gap_move);
if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
if (second->HasReferenceMap()) {
RegisterAllocationData::DelayedReference delayed_reference = {
TopTierRegisterAllocationData::DelayedReference delayed_reference = {
second->reference_map(), &gap_move->source()};
data()->delayed_references().push_back(delayed_reference);
}
......@@ -2046,7 +2009,7 @@ void ConstraintBuilder::ResolvePhis() {
void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
for (PhiInstruction* phi : block->phis()) {
int phi_vreg = phi->virtual_register();
RegisterAllocationData::PhiMapValue* map_value =
TopTierRegisterAllocationData::PhiMapValue* map_value =
data()->InitializePhiMap(block, phi);
InstructionOperand& output = phi->output();
// Map the destination operands, so the commitment phase can find them.
......@@ -2072,12 +2035,12 @@ void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
}
}
LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
LiveRangeBuilder::LiveRangeBuilder(TopTierRegisterAllocationData* data,
Zone* local_zone)
: data_(data), phi_hints_(local_zone) {}
BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
RegisterAllocationData* data) {
BitVector* LiveRangeBuilder::ComputeLiveOut(
const InstructionBlock* block, TopTierRegisterAllocationData* data) {
size_t block_index = block->rpo_number().ToSize();
BitVector* live_out = data->live_out_sets()[block_index];
if (live_out == nullptr) {
......@@ -2925,7 +2888,7 @@ void LiveRangeBundle::MergeSpillRanges() {
}
}
RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
RegisterAllocator::RegisterAllocator(TopTierRegisterAllocationData* data,
RegisterKind kind)
: data_(data),
mode_(kind),
......@@ -2935,7 +2898,7 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
allocatable_register_codes_(
GetAllocatableRegisterCodes(data->config(), kind)),
check_fp_aliasing_(false) {
if (!kSimpleFPAliasing && kind == FP_REGISTERS) {
if (!kSimpleFPAliasing && kind == RegisterKind::kDouble) {
check_fp_aliasing_ = (data->code()->representation_mask() &
(kFloat32Bit | kSimd128Bit)) != 0;
}
......@@ -3172,12 +3135,12 @@ void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
const char* RegisterAllocator::RegisterName(int register_code) const {
if (register_code == kUnassignedRegister) return "unassigned";
return mode() == GENERAL_REGISTERS
return mode() == RegisterKind::kGeneral
? i::RegisterName(Register::from_code(register_code))
: i::RegisterName(DoubleRegister::from_code(register_code));
}
LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
LinearScanAllocator::LinearScanAllocator(TopTierRegisterAllocationData* data,
RegisterKind kind, Zone* local_zone)
: RegisterAllocator(data, kind),
unhandled_live_ranges_(local_zone),
......@@ -3672,7 +3635,7 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
}
}
};
if (mode() == GENERAL_REGISTERS) {
if (mode() == RegisterKind::kGeneral) {
for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
if (current != nullptr) {
if (current->IsDeferredFixed()) {
......@@ -3769,7 +3732,7 @@ void LinearScanAllocator::AllocateRegisters() {
}
}
if (mode() == GENERAL_REGISTERS) {
if (mode() == RegisterKind::kGeneral) {
for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
if (current != nullptr) {
if (current->IsDeferredFixed()) continue;
......@@ -4664,7 +4627,7 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
DCHECK(!range->HasSpillOperand());
// Check how many operands belong to the same bundle as the output.
LiveRangeBundle* out_bundle = range->get_bundle();
RegisterAllocationData::PhiMapValue* phi_map_value =
TopTierRegisterAllocationData::PhiMapValue* phi_map_value =
data()->GetPhiMapValueFor(range);
const PhiInstruction* phi = phi_map_value->phi();
const InstructionBlock* block = phi_map_value->block();
......@@ -4777,7 +4740,7 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
}
}
SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
SpillSlotLocator::SpillSlotLocator(TopTierRegisterAllocationData* data)
: data_(data) {}
void SpillSlotLocator::LocateSpillSlots() {
......@@ -4801,7 +4764,8 @@ void SpillSlotLocator::LocateSpillSlots() {
}
}
OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
OperandAssigner::OperandAssigner(TopTierRegisterAllocationData* data)
: data_(data) {}
void OperandAssigner::DecideSpillingMode() {
if (data()->is_turbo_control_flow_aware_allocation()) {
......@@ -4912,7 +4876,8 @@ void OperandAssigner::CommitAssignment() {
}
}
ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
ReferenceMapPopulator::ReferenceMapPopulator(
TopTierRegisterAllocationData* data)
: data_(data) {}
bool ReferenceMapPopulator::SafePointsAreInOrder() const {
......@@ -4927,7 +4892,7 @@ bool ReferenceMapPopulator::SafePointsAreInOrder() const {
void ReferenceMapPopulator::PopulateReferenceMaps() {
DCHECK(SafePointsAreInOrder());
// Map all delayed references.
for (RegisterAllocationData::DelayedReference& delayed_reference :
for (TopTierRegisterAllocationData::DelayedReference& delayed_reference :
data()->delayed_references()) {
delayed_reference.map->RecordReference(
AllocatedOperand::cast(*delayed_reference.operand));
......@@ -5051,7 +5016,7 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
}
}
LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
LiveRangeConnector::LiveRangeConnector(TopTierRegisterAllocationData* data)
: data_(data) {}
bool LiveRangeConnector::CanEagerlyResolveControlFlow(
......
......@@ -10,6 +10,7 @@
#include "src/codegen/register-configuration.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/backend/register-allocation.h"
#include "src/flags/flags.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
......@@ -23,8 +24,6 @@ namespace compiler {
static const int32_t kUnassignedRegister = RegisterConfiguration::kMaxRegisters;
enum RegisterKind { GENERAL_REGISTERS, FP_REGISTERS };
// This class represents a single point of a InstructionOperand's lifetime. For
// each instruction there are four lifetime positions:
//
......@@ -188,8 +187,25 @@ class SpillRange;
class LiveRange;
class TopLevelLiveRange;
class RegisterAllocationData final : public ZoneObject {
class TopTierRegisterAllocationData final : public RegisterAllocationData {
public:
static const TopTierRegisterAllocationData* cast(
const RegisterAllocationData* data) {
DCHECK_EQ(data->type(), Type::kTopTier);
return static_cast<const TopTierRegisterAllocationData*>(data);
}
static TopTierRegisterAllocationData* cast(RegisterAllocationData* data) {
DCHECK_EQ(data->type(), Type::kTopTier);
return static_cast<TopTierRegisterAllocationData*>(data);
}
static const TopTierRegisterAllocationData& cast(
const RegisterAllocationData& data) {
DCHECK_EQ(data.type(), Type::kTopTier);
return static_cast<const TopTierRegisterAllocationData&>(data);
}
// Encodes whether a spill happens in deferred code (kSpillDeferred) or
// regular code (kSpillAtDefinition).
enum SpillMode { kSpillAtDefinition, kSpillDeferred };
......@@ -242,12 +258,12 @@ class RegisterAllocationData final : public ZoneObject {
using RangesWithPreassignedSlots =
ZoneVector<std::pair<TopLevelLiveRange*, int>>;
RegisterAllocationData(const RegisterConfiguration* config,
Zone* allocation_zone, Frame* frame,
InstructionSequence* code,
RegisterAllocationFlags flags,
TickCounter* tick_counter,
const char* debug_name = nullptr);
TopTierRegisterAllocationData(const RegisterConfiguration* config,
Zone* allocation_zone, Frame* frame,
InstructionSequence* code,
RegisterAllocationFlags flags,
TickCounter* tick_counter,
const char* debug_name = nullptr);
const ZoneVector<TopLevelLiveRange*>& live_ranges() const {
return live_ranges_;
......@@ -371,7 +387,7 @@ class RegisterAllocationData final : public ZoneObject {
RegisterAllocationFlags flags_;
TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
DISALLOW_COPY_AND_ASSIGN(TopTierRegisterAllocationData);
};
// Representation of the non-empty interval [start,end[.
......@@ -508,7 +524,7 @@ class V8_EXPORT_PRIVATE UsePosition final
};
class SpillRange;
class RegisterAllocationData;
class TopTierRegisterAllocationData;
class TopLevelLiveRange;
class LiveRangeBundle;
......@@ -903,7 +919,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
spill_start_index_ = Min(start, spill_start_index_);
}
void CommitSpillMoves(RegisterAllocationData* data,
void CommitSpillMoves(TopTierRegisterAllocationData* data,
const InstructionOperand& operand,
bool might_be_duplicated);
......@@ -969,7 +985,8 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
int GetMaxChildCount() const { return last_child_id_ + 1; }
bool IsSpilledOnlyInDeferredBlocks(const RegisterAllocationData* data) const {
bool IsSpilledOnlyInDeferredBlocks(
const TopTierRegisterAllocationData* data) const {
if (data->is_turbo_control_flow_aware_allocation()) {
return spill_type() == SpillType::kDeferredSpillRange;
}
......@@ -979,7 +996,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
struct SpillMoveInsertionList;
SpillMoveInsertionList* GetSpillMoveInsertionLocations(
const RegisterAllocationData* data) const {
const TopTierRegisterAllocationData* data) const {
DCHECK(!IsSpilledOnlyInDeferredBlocks(data));
return spill_move_insertion_locations_;
}
......@@ -998,14 +1015,14 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
bool has_preassigned_slot() const { return has_preassigned_slot_; }
void AddBlockRequiringSpillOperand(RpoNumber block_id,
const RegisterAllocationData* data) {
void AddBlockRequiringSpillOperand(
RpoNumber block_id, const TopTierRegisterAllocationData* data) {
DCHECK(IsSpilledOnlyInDeferredBlocks(data));
GetListOfBlocksRequiringSpillOperands(data)->Add(block_id.ToInt());
}
BitVector* GetListOfBlocksRequiringSpillOperands(
const RegisterAllocationData* data) const {
const TopTierRegisterAllocationData* data) const {
DCHECK(IsSpilledOnlyInDeferredBlocks(data));
return list_of_blocks_requiring_spill_operands_;
}
......@@ -1099,7 +1116,7 @@ class SpillRange final : public ZoneObject {
class ConstraintBuilder final : public ZoneObject {
public:
explicit ConstraintBuilder(RegisterAllocationData* data);
explicit ConstraintBuilder(TopTierRegisterAllocationData* data);
// Phase 1 : insert moves to account for fixed register operands.
void MeetRegisterConstraints();
......@@ -1109,7 +1126,7 @@ class ConstraintBuilder final : public ZoneObject {
void ResolvePhis();
private:
RegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
Zone* allocation_zone() const { return data()->allocation_zone(); }
......@@ -1122,26 +1139,27 @@ class ConstraintBuilder final : public ZoneObject {
const InstructionBlock* block);
void ResolvePhis(const InstructionBlock* block);
RegisterAllocationData* const data_;
TopTierRegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(ConstraintBuilder);
};
class LiveRangeBuilder final : public ZoneObject {
public:
explicit LiveRangeBuilder(RegisterAllocationData* data, Zone* local_zone);
explicit LiveRangeBuilder(TopTierRegisterAllocationData* data,
Zone* local_zone);
// Phase 3: compute liveness of all virtual register.
void BuildLiveRanges();
static BitVector* ComputeLiveOut(const InstructionBlock* block,
RegisterAllocationData* data);
TopTierRegisterAllocationData* data);
private:
using SpillMode = RegisterAllocationData::SpillMode;
using SpillMode = TopTierRegisterAllocationData::SpillMode;
static constexpr int kNumberOfFixedRangesPerRegister =
RegisterAllocationData::kNumberOfFixedRangesPerRegister;
TopTierRegisterAllocationData::kNumberOfFixedRangesPerRegister;
RegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
Zone* allocation_zone() const { return data()->allocation_zone(); }
Zone* code_zone() const { return code()->zone(); }
......@@ -1202,7 +1220,7 @@ class LiveRangeBuilder final : public ZoneObject {
}
return SpillMode::kSpillAtDefinition;
}
RegisterAllocationData* const data_;
TopTierRegisterAllocationData* const data_;
ZoneMap<InstructionOperand*, UsePosition*> phi_hints_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeBuilder);
......@@ -1210,24 +1228,24 @@ class LiveRangeBuilder final : public ZoneObject {
class BundleBuilder final : public ZoneObject {
public:
explicit BundleBuilder(RegisterAllocationData* data) : data_(data) {}
explicit BundleBuilder(TopTierRegisterAllocationData* data) : data_(data) {}
void BuildBundles();
private:
RegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data_->code(); }
RegisterAllocationData* data_;
TopTierRegisterAllocationData* data_;
int next_bundle_id_ = 0;
};
class RegisterAllocator : public ZoneObject {
public:
RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
RegisterAllocator(TopTierRegisterAllocationData* data, RegisterKind kind);
protected:
using SpillMode = RegisterAllocationData::SpillMode;
RegisterAllocationData* data() const { return data_; }
using SpillMode = TopTierRegisterAllocationData::SpillMode;
TopTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
RegisterKind mode() const { return mode_; }
int num_registers() const { return num_registers_; }
......@@ -1283,7 +1301,7 @@ class RegisterAllocator : public ZoneObject {
const char* RegisterName(int allocation_index) const;
private:
RegisterAllocationData* const data_;
TopTierRegisterAllocationData* const data_;
const RegisterKind mode_;
const int num_registers_;
int num_allocatable_registers_;
......@@ -1298,7 +1316,7 @@ class RegisterAllocator : public ZoneObject {
class LinearScanAllocator final : public RegisterAllocator {
public:
LinearScanAllocator(RegisterAllocationData* data, RegisterKind kind,
LinearScanAllocator(TopTierRegisterAllocationData* data, RegisterKind kind,
Zone* local_zone);
// Phase 4: compute register assignments.
......@@ -1453,21 +1471,21 @@ class LinearScanAllocator final : public RegisterAllocator {
class SpillSlotLocator final : public ZoneObject {
public:
explicit SpillSlotLocator(RegisterAllocationData* data);
explicit SpillSlotLocator(TopTierRegisterAllocationData* data);
void LocateSpillSlots();
private:
RegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* data() const { return data_; }
RegisterAllocationData* const data_;
TopTierRegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(SpillSlotLocator);
};
class OperandAssigner final : public ZoneObject {
public:
explicit OperandAssigner(RegisterAllocationData* data);
explicit OperandAssigner(TopTierRegisterAllocationData* data);
// Phase 5: final decision on spilling mode.
void DecideSpillingMode();
......@@ -1479,26 +1497,26 @@ class OperandAssigner final : public ZoneObject {
void CommitAssignment();
private:
RegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* data() const { return data_; }
RegisterAllocationData* const data_;
TopTierRegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(OperandAssigner);
};
class ReferenceMapPopulator final : public ZoneObject {
public:
explicit ReferenceMapPopulator(RegisterAllocationData* data);
explicit ReferenceMapPopulator(TopTierRegisterAllocationData* data);
// Phase 8: compute values for pointer maps.
void PopulateReferenceMaps();
private:
RegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* data() const { return data_; }
bool SafePointsAreInOrder() const;
RegisterAllocationData* const data_;
TopTierRegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(ReferenceMapPopulator);
};
......@@ -1513,7 +1531,7 @@ class LiveRangeBoundArray;
// assigned operand, be it a register or a slot.
class LiveRangeConnector final : public ZoneObject {
public:
explicit LiveRangeConnector(RegisterAllocationData* data);
explicit LiveRangeConnector(TopTierRegisterAllocationData* data);
// Phase 9: reconnect split ranges with moves, when the control flow
// between the ranges is trivial (no branches).
......@@ -1525,7 +1543,7 @@ class LiveRangeConnector final : public ZoneObject {
void ResolveControlFlow(Zone* local_zone);
private:
RegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
Zone* code_zone() const { return code()->zone(); }
......@@ -1540,7 +1558,7 @@ class LiveRangeConnector final : public ZoneObject {
LiveRangeBoundArray* array,
Zone* temp_zone);
RegisterAllocationData* const data_;
TopTierRegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector);
};
......
......@@ -11,6 +11,7 @@
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/source-position.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/backend/register-allocation.h"
#include "src/compiler/backend/register-allocator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
......@@ -424,7 +425,8 @@ class GraphC1Visualizer {
void PrintSchedule(const char* phase, const Schedule* schedule,
const SourcePositionTable* positions,
const InstructionSequence* instructions);
void PrintLiveRanges(const char* phase, const RegisterAllocationData* data);
void PrintLiveRanges(const char* phase,
const TopTierRegisterAllocationData* data);
Zone* zone() const { return zone_; }
private:
......@@ -708,9 +710,8 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
}
}
void GraphC1Visualizer::PrintLiveRanges(const char* phase,
const RegisterAllocationData* data) {
void GraphC1Visualizer::PrintLiveRanges(
const char* phase, const TopTierRegisterAllocationData* data) {
Tag tag(this, "intervals");
PrintStringProperty("name", phase);
......@@ -824,9 +825,14 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
std::ostream& operator<<(std::ostream& os,
const AsC1VRegisterAllocationData& ac) {
AccountingAllocator allocator;
Zone tmp_zone(&allocator, ZONE_NAME);
GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_);
// TODO(rmcilroy): Add support for fast register allocator.
if (ac.data_->type() == RegisterAllocationData::kTopTier) {
AccountingAllocator allocator;
Zone tmp_zone(&allocator, ZONE_NAME);
GraphC1Visualizer(os, &tmp_zone)
.PrintLiveRanges(ac.phase_,
TopTierRegisterAllocationData::cast(ac.data_));
}
return os;
}
......@@ -1067,12 +1073,22 @@ void PrintTopLevelLiveRanges(std::ostream& os,
std::ostream& operator<<(std::ostream& os,
const RegisterAllocationDataAsJSON& ac) {
os << "\"fixed_double_live_ranges\": ";
PrintTopLevelLiveRanges(os, ac.data_.fixed_double_live_ranges(), ac.code_);
os << ",\"fixed_live_ranges\": ";
PrintTopLevelLiveRanges(os, ac.data_.fixed_live_ranges(), ac.code_);
os << ",\"live_ranges\": ";
PrintTopLevelLiveRanges(os, ac.data_.live_ranges(), ac.code_);
if (ac.data_.type() == RegisterAllocationData::kTopTier) {
const TopTierRegisterAllocationData& ac_data =
TopTierRegisterAllocationData::cast(ac.data_);
os << "\"fixed_double_live_ranges\": ";
PrintTopLevelLiveRanges(os, ac_data.fixed_double_live_ranges(), ac.code_);
os << ",\"fixed_live_ranges\": ";
PrintTopLevelLiveRanges(os, ac_data.fixed_live_ranges(), ac.code_);
os << ",\"live_ranges\": ";
PrintTopLevelLiveRanges(os, ac_data.live_ranges(), ac.code_);
} else {
// TODO(rmcilroy): Add support for fast register allocation data. For now
// output the expected fields to keep Turbolizer happy.
os << "\"fixed_double_live_ranges\": {}";
os << ",\"fixed_live_ranges\": {}";
os << ",\"live_ranges\": {}";
}
return os;
}
......
......@@ -346,9 +346,13 @@ class PipelineData {
Frame* frame() const { return frame_; }
Zone* register_allocation_zone() const { return register_allocation_zone_; }
RegisterAllocationData* register_allocation_data() const {
return register_allocation_data_;
}
TopTierRegisterAllocationData* top_tier_register_allocation_data() const {
return TopTierRegisterAllocationData::cast(register_allocation_data_);
}
std::string const& source_position_output() const {
return source_position_output_;
......@@ -479,12 +483,12 @@ class PipelineData {
frame_ = codegen_zone()->New<Frame>(fixed_frame_size);
}
void InitializeRegisterAllocationData(const RegisterConfiguration* config,
CallDescriptor* call_descriptor,
RegisterAllocationFlags flags) {
void InitializeTopTierRegisterAllocationData(
const RegisterConfiguration* config, CallDescriptor* call_descriptor,
RegisterAllocationFlags flags) {
DCHECK_NULL(register_allocation_data_);
register_allocation_data_ =
register_allocation_zone()->New<RegisterAllocationData>(
register_allocation_zone()->New<TopTierRegisterAllocationData>(
config, register_allocation_zone(), frame(), sequence(), flags,
&info()->tick_counter(), debug_name());
}
......@@ -655,8 +659,12 @@ class PipelineImpl final {
void RunPrintAndVerify(const char* phase, bool untyped = false);
bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor);
MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* call_descriptor, bool run_verifier);
void AllocateRegistersForTopTier(const RegisterConfiguration* config,
CallDescriptor* call_descriptor,
bool run_verifier);
void AllocateRegistersForMidTier(const RegisterConfiguration* config,
CallDescriptor* call_descriptor,
bool run_verifier);
OptimizedCompilationInfo* info() const;
Isolate* isolate() const;
......@@ -2101,7 +2109,7 @@ struct InstructionSelectionPhase {
struct MeetRegisterConstraintsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(MeetRegisterConstraints)
void Run(PipelineData* data, Zone* temp_zone) {
ConstraintBuilder builder(data->register_allocation_data());
ConstraintBuilder builder(data->top_tier_register_allocation_data());
builder.MeetRegisterConstraints();
}
};
......@@ -2111,7 +2119,7 @@ struct ResolvePhisPhase {
DECL_PIPELINE_PHASE_CONSTANTS(ResolvePhis)
void Run(PipelineData* data, Zone* temp_zone) {
ConstraintBuilder builder(data->register_allocation_data());
ConstraintBuilder builder(data->top_tier_register_allocation_data());
builder.ResolvePhis();
}
};
......@@ -2121,7 +2129,8 @@ struct BuildLiveRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRanges)
void Run(PipelineData* data, Zone* temp_zone) {
LiveRangeBuilder builder(data->register_allocation_data(), temp_zone);
LiveRangeBuilder builder(data->top_tier_register_allocation_data(),
temp_zone);
builder.BuildLiveRanges();
}
};
......@@ -2130,7 +2139,7 @@ struct BuildBundlesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRangeBundles)
void Run(PipelineData* data, Zone* temp_zone) {
BundleBuilder builder(data->register_allocation_data());
BundleBuilder builder(data->top_tier_register_allocation_data());
builder.BuildBundles();
}
};
......@@ -2139,8 +2148,8 @@ struct SplinterLiveRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SplinterLiveRanges)
void Run(PipelineData* data, Zone* temp_zone) {
LiveRangeSeparator live_range_splinterer(data->register_allocation_data(),
temp_zone);
LiveRangeSeparator live_range_splinterer(
data->top_tier_register_allocation_data(), temp_zone);
live_range_splinterer.Splinter();
}
};
......@@ -2151,8 +2160,8 @@ struct AllocateGeneralRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTS(AllocateGeneralRegisters)
void Run(PipelineData* data, Zone* temp_zone) {
RegAllocator allocator(data->register_allocation_data(), GENERAL_REGISTERS,
temp_zone);
RegAllocator allocator(data->top_tier_register_allocation_data(),
RegisterKind::kGeneral, temp_zone);
allocator.AllocateRegisters();
}
};
......@@ -2162,8 +2171,8 @@ struct AllocateFPRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTS(AllocateFPRegisters)
void Run(PipelineData* data, Zone* temp_zone) {
RegAllocator allocator(data->register_allocation_data(), FP_REGISTERS,
temp_zone);
RegAllocator allocator(data->top_tier_register_allocation_data(),
RegisterKind::kDouble, temp_zone);
allocator.AllocateRegisters();
}
};
......@@ -2173,7 +2182,8 @@ struct MergeSplintersPhase {
DECL_PIPELINE_PHASE_CONSTANTS(MergeSplinteredRanges)
void Run(PipelineData* pipeline_data, Zone* temp_zone) {
RegisterAllocationData* data = pipeline_data->register_allocation_data();
TopTierRegisterAllocationData* data =
pipeline_data->top_tier_register_allocation_data();
LiveRangeMerger live_range_merger(data, temp_zone);
live_range_merger.Merge();
}
......@@ -2184,7 +2194,7 @@ struct LocateSpillSlotsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(LocateSpillSlots)
void Run(PipelineData* data, Zone* temp_zone) {
SpillSlotLocator locator(data->register_allocation_data());
SpillSlotLocator locator(data->top_tier_register_allocation_data());
locator.LocateSpillSlots();
}
};
......@@ -2193,7 +2203,7 @@ struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
void Run(PipelineData* data, Zone* temp_zone) {
OperandAssigner assigner(data->register_allocation_data());
OperandAssigner assigner(data->top_tier_register_allocation_data());
assigner.DecideSpillingMode();
}
};
......@@ -2202,7 +2212,7 @@ struct AssignSpillSlotsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(AssignSpillSlots)
void Run(PipelineData* data, Zone* temp_zone) {
OperandAssigner assigner(data->register_allocation_data());
OperandAssigner assigner(data->top_tier_register_allocation_data());
assigner.AssignSpillSlots();
}
};
......@@ -2212,7 +2222,7 @@ struct CommitAssignmentPhase {
DECL_PIPELINE_PHASE_CONSTANTS(CommitAssignment)
void Run(PipelineData* data, Zone* temp_zone) {
OperandAssigner assigner(data->register_allocation_data());
OperandAssigner assigner(data->top_tier_register_allocation_data());
assigner.CommitAssignment();
}
};
......@@ -2222,7 +2232,7 @@ struct PopulateReferenceMapsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(PopulatePointerMaps)
void Run(PipelineData* data, Zone* temp_zone) {
ReferenceMapPopulator populator(data->register_allocation_data());
ReferenceMapPopulator populator(data->top_tier_register_allocation_data());
populator.PopulateReferenceMaps();
}
};
......@@ -2232,7 +2242,7 @@ struct ConnectRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(ConnectRanges)
void Run(PipelineData* data, Zone* temp_zone) {
LiveRangeConnector connector(data->register_allocation_data());
LiveRangeConnector connector(data->top_tier_register_allocation_data());
connector.ConnectRanges(temp_zone);
}
};
......@@ -2242,12 +2252,11 @@ struct ResolveControlFlowPhase {
DECL_PIPELINE_PHASE_CONSTANTS(ResolveControlFlow)
void Run(PipelineData* data, Zone* temp_zone) {
LiveRangeConnector connector(data->register_allocation_data());
LiveRangeConnector connector(data->top_tier_register_allocation_data());
connector.ResolveControlFlow(temp_zone);
}
};
struct OptimizeMovesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(OptimizeMoves)
......@@ -3039,14 +3048,27 @@ void Pipeline::GenerateCodeForWasmFunction(
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool use_fast_register_allocator,
bool run_verifier) {
OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
Code::STUB);
ZoneStats zone_stats(sequence->isolate()->allocator());
PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
data.InitializeFrameData(nullptr);
if (info.trace_turbo_json()) {
TurboJsonFile json_of(&info, std::ios_base::trunc);
json_of << "{\"function\":\"" << info.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
}
PipelineImpl pipeline(&data);
pipeline.AllocateRegisters(config, nullptr, run_verifier);
if (use_fast_register_allocator) {
pipeline.AllocateRegistersForMidTier(config, nullptr, run_verifier);
} else {
pipeline.AllocateRegistersForTopTier(config, nullptr, run_verifier);
}
return !data.compilation_failed();
}
......@@ -3153,18 +3175,25 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
DCHECK_LT(0, NumRegs(registers));
std::unique_ptr<const RegisterConfiguration> config;
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegisters(config.get(), call_descriptor, run_verifier);
} else if (data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison) {
AllocateRegistersForTopTier(config.get(), call_descriptor, run_verifier);
} else {
const RegisterConfiguration* config;
if (data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison) {
#ifdef V8_TARGET_ARCH_IA32
FATAL("Poisoning is not supported on ia32.");
#else
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
config = RegisterConfiguration::Poisoning();
#endif // V8_TARGET_ARCH_IA32
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier);
} else {
config = RegisterConfiguration::Default();
}
if (FLAG_turboprop_fast_reg_alloc) {
AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
} else {
AllocateRegistersForTopTier(config, call_descriptor, run_verifier);
}
}
// Verify the instruction sequence has the same hash in two stages.
......@@ -3373,9 +3402,9 @@ void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
} // namespace
void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* call_descriptor,
bool run_verifier) {
void PipelineImpl::AllocateRegistersForTopTier(
const RegisterConfiguration* config, CallDescriptor* call_descriptor,
bool run_verifier) {
PipelineData* data = this->data_;
// Don't track usage for this zone in compiler stats.
std::unique_ptr<Zone> verifier_zone;
......@@ -3403,7 +3432,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
if (data->info()->trace_turbo_allocation()) {
flags |= RegisterAllocationFlag::kTraceAllocation;
}
data->InitializeRegisterAllocationData(config, call_descriptor, flags);
data->InitializeTopTierRegisterAllocationData(config, call_descriptor, flags);
if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
Run<MeetRegisterConstraintsPhase>();
......@@ -3413,23 +3442,24 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
TraceSequence(info(), data, "before register allocation");
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
CHECK(data->register_allocation_data()
CHECK(!data->top_tier_register_allocation_data()
->ExistsUseWithoutDefinition());
CHECK(data->top_tier_register_allocation_data()
->RangesDefinedInDeferredStayInDeferred());
}
if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData("PreAllocation",
data->register_allocation_data());
tcf << AsC1VRegisterAllocationData(
"PreAllocation", data->top_tier_register_allocation_data());
}
if (info()->turbo_preprocess_ranges()) {
Run<SplinterLiveRangesPhase>();
if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData("PostSplinter",
data->register_allocation_data());
tcf << AsC1VRegisterAllocationData(
"PostSplinter", data->top_tier_register_allocation_data());
}
}
......@@ -3473,13 +3503,20 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData("CodeGen",
data->register_allocation_data());
tcf << AsC1VRegisterAllocationData(
"CodeGen", data->top_tier_register_allocation_data());
}
data->DeleteRegisterAllocationZone();
}
void PipelineImpl::AllocateRegistersForMidTier(
const RegisterConfiguration* config, CallDescriptor* call_descriptor,
bool run_verifier) {
// TODO(rmcilroy): Implement fast register allocator.
UNREACHABLE();
}
OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
......
......@@ -101,7 +101,7 @@ class Pipeline : public AllStatic {
// Run just the register allocator phases.
V8_EXPORT_PRIVATE static bool AllocateRegistersForTesting(
const RegisterConfiguration* config, InstructionSequence* sequence,
bool run_verifier);
bool use_fast_register_allocator, bool run_verifier);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
......
......@@ -507,8 +507,9 @@ DEFINE_BOOL(trace_migration, false, "trace object migration")
DEFINE_BOOL(trace_generalization, false, "trace map generalization")
// Flags for TurboProp.
DEFINE_BOOL(turboprop, false,
"enable experimental turboprop mid-tier compiler.")
DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
DEFINE_BOOL(turboprop_fast_reg_alloc, false,
"enable experimental fast register allocator for mid-tier compiler")
DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining)
DEFINE_IMPLICATION(turboprop, concurrent_inlining)
DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB)
......
......@@ -78,7 +78,7 @@ class RegisterAllocatorTest : public InstructionSequenceTest {
public:
void Allocate() {
WireBlocks();
Pipeline::AllocateRegistersForTesting(config(), sequence(), true);
Pipeline::AllocateRegistersForTesting(config(), sequence(), false, true);
}
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment