Commit c092c081 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[TurboProp] Add initial DefineOutput phase for fast register allocator

Adds the first phase of the fast register allocator, which runs through

the instruction stream and defines a VirtualRegisterData for each
virtual register based on how that virtual register is produced. Also
adds logic to pipeline.cc to allocate and use FastRegistorAllocatorData
for use throughout the fast register allocation phases.

BUG=v8:9684

Change-Id: I2f4533467346d5f3fdf50a0a1fedd7e4082f0187
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2295364
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69010}
parent 3962a0f7
......@@ -1923,6 +1923,8 @@ v8_compiler_sources = [
"src/compiler/backend/jump-threading.h",
"src/compiler/backend/live-range-separator.cc",
"src/compiler/backend/live-range-separator.h",
"src/compiler/backend/mid-tier-register-allocator.cc",
"src/compiler/backend/mid-tier-register-allocator.h",
"src/compiler/backend/move-optimizer.cc",
"src/compiler/backend/move-optimizer.h",
"src/compiler/backend/register-allocation.h",
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/mid-tier-register-allocator.h"
#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/tick-counter.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/linkage.h"
#include "src/logging/counters.h"
#include "src/utils/bit-vector.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
class RegisterState;
MidTierRegisterAllocationData::MidTierRegisterAllocationData(
const RegisterConfiguration* config, Zone* zone, Frame* frame,
InstructionSequence* code, TickCounter* tick_counter,
const char* debug_name)
: RegisterAllocationData(Type::kMidTier),
allocation_zone_(zone),
frame_(frame),
code_(code),
debug_name_(debug_name),
config_(config),
virtual_register_data_(code->VirtualRegisterCount(), allocation_zone()),
reference_map_instructions_(allocation_zone()),
tick_counter_(tick_counter) {}
MachineRepresentation MidTierRegisterAllocationData::RepresentationFor(
int virtual_register) {
if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
return InstructionSequence::DefaultRepresentation();
} else {
DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
return code()->GetRepresentation(virtual_register);
}
}
// VirtualRegisterData stores data specific to a particular virtual register,
// and tracks spilled operands for that virtual register.
class VirtualRegisterData final {
public:
VirtualRegisterData() = default;
// Define VirtualRegisterData with the type of output that produces this
// virtual register.
void DefineAsUnallocatedOperand(int virtual_register, int instr_index);
void DefineAsFixedSpillOperand(AllocatedOperand* operand,
int virtual_register, int instr_index);
void DefineAsConstantOperand(ConstantOperand* operand, int instr_index);
void DefineAsPhi(int virtual_register, int instr_index);
int vreg() const { return vreg_; }
int output_instr_index() const { return output_instr_index_; }
bool is_constant() const { return is_constant_; }
bool is_phi() const { return is_phi_; }
void set_is_phi(bool value) { is_phi_ = value; }
private:
void Initialize(int virtual_register, InstructionOperand* spill_operand,
int instr_index, bool is_phi, bool is_constant);
InstructionOperand* spill_operand_;
int output_instr_index_;
int vreg_;
bool is_phi_ : 1;
bool is_constant_ : 1;
};
VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
int virtual_register) {
DCHECK_GE(virtual_register, 0);
DCHECK_LT(virtual_register, virtual_register_data_.size());
return virtual_register_data_[virtual_register];
}
void VirtualRegisterData::Initialize(int virtual_register,
InstructionOperand* spill_operand,
int instr_index, bool is_phi,
bool is_constant) {
vreg_ = virtual_register;
spill_operand_ = spill_operand;
output_instr_index_ = instr_index;
is_phi_ = is_phi;
is_constant_ = is_constant;
}
void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand,
int instr_index) {
Initialize(operand->virtual_register(), operand, instr_index, false, true);
}
void VirtualRegisterData::DefineAsFixedSpillOperand(AllocatedOperand* operand,
int virtual_register,
int instr_index) {
Initialize(virtual_register, operand, instr_index, false, false);
}
void VirtualRegisterData::DefineAsUnallocatedOperand(int virtual_register,
int instr_index) {
Initialize(virtual_register, nullptr, instr_index, false, false);
}
void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index) {
Initialize(virtual_register, nullptr, instr_index, true, false);
}
MidTierRegisterAllocator::MidTierRegisterAllocator(
MidTierRegisterAllocationData* data)
: data_(data) {}
MidTierRegisterAllocator::~MidTierRegisterAllocator() = default;
void MidTierRegisterAllocator::DefineOutputs() {
for (const InstructionBlock* block :
base::Reversed(code()->instruction_blocks())) {
data_->tick_counter()->DoTick();
DefineOutputs(block);
}
}
void MidTierRegisterAllocator::DefineOutputs(const InstructionBlock* block) {
int block_start = block->first_instruction_index();
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
Instruction* instr = code()->InstructionAt(index);
// For each instruction, define details of the output with the associated
// virtual register data.
for (size_t i = 0; i < instr->OutputCount(); i++) {
InstructionOperand* output = instr->OutputAt(i);
if (output->IsConstant()) {
ConstantOperand* constant_operand = ConstantOperand::cast(output);
int virtual_register = constant_operand->virtual_register();
VirtualRegisterDataFor(virtual_register)
.DefineAsConstantOperand(constant_operand, index);
} else {
DCHECK(output->IsUnallocated());
UnallocatedOperand* unallocated_operand =
UnallocatedOperand::cast(output);
int virtual_register = unallocated_operand->virtual_register();
if (unallocated_operand->HasFixedSlotPolicy()) {
// If output has a fixed slot policy, allocate its spill operand now
// so that the register allocator can use this knowledge.
MachineRepresentation rep = RepresentationFor(virtual_register);
AllocatedOperand* fixed_spill_operand = AllocatedOperand::New(
allocation_zone(), AllocatedOperand::STACK_SLOT, rep,
unallocated_operand->fixed_slot_index());
VirtualRegisterDataFor(virtual_register)
.DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register,
index);
} else {
VirtualRegisterDataFor(virtual_register)
.DefineAsUnallocatedOperand(virtual_register, index);
}
}
}
// Mark any instructions that require reference maps for later reference map
// processing.
if (instr->HasReferenceMap()) {
data()->reference_map_instructions().push_back(index);
}
}
// Define phi output operands.
for (PhiInstruction* phi : block->phis()) {
int virtual_register = phi->virtual_register();
VirtualRegisterDataFor(virtual_register)
.DefineAsPhi(virtual_register, block->first_instruction_index());
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_BACKEND_MID_TIER_REGISTER_ALLOCATOR_H_
#define V8_COMPILER_BACKEND_MID_TIER_REGISTER_ALLOCATOR_H_
#include "src/base/compiler-specific.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/backend/register-allocation.h"
#include "src/flags/flags.h"
#include "src/utils/bit-vector.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
class TickCounter;
namespace compiler {
class VirtualRegisterData;
// The MidTierRegisterAllocator is a register allocator specifically designed to
// perform register allocation as fast as possible while minimizing spill moves.
class MidTierRegisterAllocationData final : public RegisterAllocationData {
public:
MidTierRegisterAllocationData(const RegisterConfiguration* config,
Zone* allocation_zone, Frame* frame,
InstructionSequence* code,
TickCounter* tick_counter,
const char* debug_name = nullptr);
static MidTierRegisterAllocationData* cast(RegisterAllocationData* data) {
DCHECK_EQ(data->type(), Type::kMidTier);
return static_cast<MidTierRegisterAllocationData*>(data);
}
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register);
MachineRepresentation RepresentationFor(int virtual_register);
// List of all instruction indexs that require a reference map.
ZoneVector<int>& reference_map_instructions() {
return reference_map_instructions_;
}
// This zone is for data structures only needed during register allocation
// phases.
Zone* allocation_zone() const { return allocation_zone_; }
// This zone is for InstructionOperands and moves that live beyond register
// allocation.
Zone* code_zone() const { return code()->zone(); }
InstructionSequence* code() const { return code_; }
Frame* frame() const { return frame_; }
const char* debug_name() const { return debug_name_; }
const RegisterConfiguration* config() const { return config_; }
TickCounter* tick_counter() { return tick_counter_; }
private:
Zone* const allocation_zone_;
Frame* const frame_;
InstructionSequence* const code_;
const char* const debug_name_;
const RegisterConfiguration* const config_;
ZoneVector<VirtualRegisterData> virtual_register_data_;
ZoneVector<int> reference_map_instructions_;
TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocationData);
};
class MidTierRegisterAllocator final {
public:
explicit MidTierRegisterAllocator(MidTierRegisterAllocationData* data);
~MidTierRegisterAllocator();
// Phase 1: Process instruction outputs to determine how each virtual register
// is defined.
void DefineOutputs();
// TODO(rmcilroy): Phase 2 - allocate registers to instructions.
private:
// Define outputs operations.
void InitializeBlockState(const InstructionBlock* block);
void DefineOutputs(const InstructionBlock* block);
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
return data()->VirtualRegisterDataFor(virtual_register);
}
MachineRepresentation RepresentationFor(int virtual_register) const {
return data()->RepresentationFor(virtual_register);
}
MidTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
Zone* allocation_zone() const { return data()->allocation_zone(); }
MidTierRegisterAllocationData* data_;
DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocator);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_BACKEND_MID_TIER_REGISTER_ALLOCATOR_H_
......@@ -23,6 +23,7 @@
#include "src/compiler/backend/instruction.h"
#include "src/compiler/backend/jump-threading.h"
#include "src/compiler/backend/live-range-separator.h"
#include "src/compiler/backend/mid-tier-register-allocator.h"
#include "src/compiler/backend/move-optimizer.h"
#include "src/compiler/backend/register-allocator-verifier.h"
#include "src/compiler/backend/register-allocator.h"
......@@ -356,6 +357,9 @@ class PipelineData {
TopTierRegisterAllocationData* top_tier_register_allocation_data() const {
return TopTierRegisterAllocationData::cast(register_allocation_data_);
}
MidTierRegisterAllocationData* mid_tier_register_allocator_data() const {
return MidTierRegisterAllocationData::cast(register_allocation_data_);
}
std::string const& source_position_output() const {
return source_position_output_;
......@@ -496,6 +500,15 @@ class PipelineData {
&info()->tick_counter(), debug_name());
}
void InitializeMidTierRegisterAllocationData(
const RegisterConfiguration* config, CallDescriptor* call_descriptor) {
DCHECK_NULL(register_allocation_data_);
register_allocation_data_ =
register_allocation_zone()->New<MidTierRegisterAllocationData>(
config, register_allocation_zone(), frame(), sequence(),
&info()->tick_counter(), debug_name());
}
void InitializeOsrHelper() {
DCHECK(!osr_helper_.has_value());
osr_helper_.emplace(info());
......@@ -2267,6 +2280,16 @@ struct ResolveControlFlowPhase {
}
};
struct MidTierRegisterAllocatorPhase {
DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)
void Run(PipelineData* data, Zone* temp_zone) {
MidTierRegisterAllocator allocator(
data->mid_tier_register_allocator_data());
allocator.DefineOutputs();
}
};
struct OptimizeMovesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(OptimizeMoves)
......@@ -3148,7 +3171,7 @@ void Pipeline::GenerateCodeForWasmFunction(
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool use_fast_register_allocator,
bool use_mid_tier_register_allocator,
bool run_verifier) {
OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
Code::STUB);
......@@ -3163,7 +3186,7 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
}
PipelineImpl pipeline(&data);
if (use_fast_register_allocator) {
if (use_mid_tier_register_allocator) {
pipeline.AllocateRegistersForMidTier(config, nullptr, run_verifier);
} else {
pipeline.AllocateRegistersForTopTier(config, nullptr, run_verifier);
......@@ -3289,7 +3312,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
config = RegisterConfiguration::Default();
}
if (FLAG_turboprop_fast_reg_alloc) {
if (FLAG_turboprop_mid_tier_reg_alloc) {
AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
} else {
AllocateRegistersForTopTier(config, call_descriptor, run_verifier);
......@@ -3613,8 +3636,41 @@ void PipelineImpl::AllocateRegistersForTopTier(
void PipelineImpl::AllocateRegistersForMidTier(
const RegisterConfiguration* config, CallDescriptor* call_descriptor,
bool run_verifier) {
// TODO(rmcilroy): Implement fast register allocator.
UNREACHABLE();
PipelineData* data = data_;
// Don't track usage for this zone in compiler stats.
std::unique_ptr<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
verifier_zone.reset(
new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
verifier = verifier_zone->New<RegisterAllocatorVerifier>(
verifier_zone.get(), config, data->sequence(), data->frame());
}
#ifdef DEBUG
data->sequence()->ValidateEdgeSplitForm();
data->sequence()->ValidateDeferredBlockEntryPaths();
data->sequence()->ValidateDeferredBlockExitPaths();
#endif
if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
data->InitializeMidTierRegisterAllocationData(config, call_descriptor);
TraceSequence(info(), data, "before register allocation");
Run<MidTierRegisterAllocatorPhase>();
// TODO(rmcilroy): Run spill slot allocation and reference map population
// phases
TraceSequence(info(), data, "after register allocation");
if (verifier != nullptr) {
verifier->VerifyAssignment("End of regalloc pipeline.");
verifier->VerifyGapMoves();
}
data->DeleteRegisterAllocationZone();
}
OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
......
......@@ -502,8 +502,8 @@ DEFINE_BOOL(trace_generalization, false, "trace map generalization")
// Flags for TurboProp.
DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
DEFINE_BOOL(turboprop_fast_reg_alloc, false,
"enable experimental fast register allocator for mid-tier compiler")
DEFINE_BOOL(turboprop_mid_tier_reg_alloc, false,
"enable experimental mid-tier register allocator")
DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining)
DEFINE_IMPLICATION(turboprop, concurrent_inlining)
DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB)
......
......@@ -907,6 +907,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyTrimming) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment