Commit a9059632 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[TurboProp] Add framework to pipeline.cc for a fast register allocator.

Adds basic framework to pipeline.cc to enable a seperate fast register
allocator for the TurboProp mid-tier. As part of this, common logic as
well as a base class for RegisterAllocationData is moved to a seperate
register-allocation.h header file. The current register allocator's
RegisterAllocationData is renamed to TopTierRegisterAllocationData, and
the former name is the new base class held in PipelineData.

BUG=v8:9684

Change-Id: I28285b7d6112505bf90e88ea3cda66d03dfabc74
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2295359
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68852}
parent c8679386
...@@ -1900,6 +1900,7 @@ v8_compiler_sources = [ ...@@ -1900,6 +1900,7 @@ v8_compiler_sources = [
"src/compiler/backend/live-range-separator.h", "src/compiler/backend/live-range-separator.h",
"src/compiler/backend/move-optimizer.cc", "src/compiler/backend/move-optimizer.cc",
"src/compiler/backend/move-optimizer.h", "src/compiler/backend/move-optimizer.h",
"src/compiler/backend/register-allocation.h",
"src/compiler/backend/register-allocator-verifier.cc", "src/compiler/backend/register-allocator-verifier.cc",
"src/compiler/backend/register-allocator-verifier.h", "src/compiler/backend/register-allocator-verifier.h",
"src/compiler/backend/register-allocator.cc", "src/compiler/backend/register-allocator.cc",
......
...@@ -16,7 +16,8 @@ namespace compiler { ...@@ -16,7 +16,8 @@ namespace compiler {
namespace { namespace {
void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data, void CreateSplinter(TopLevelLiveRange* range,
TopTierRegisterAllocationData* data,
LifetimePosition first_cut, LifetimePosition last_cut, LifetimePosition first_cut, LifetimePosition last_cut,
bool trace_alloc) { bool trace_alloc) {
DCHECK(!range->IsSplinter()); DCHECK(!range->IsSplinter());
...@@ -68,7 +69,8 @@ void SetSlotUse(TopLevelLiveRange* range) { ...@@ -68,7 +69,8 @@ void SetSlotUse(TopLevelLiveRange* range) {
} }
} }
void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) { void SplinterLiveRange(TopLevelLiveRange* range,
TopTierRegisterAllocationData* data) {
const InstructionSequence* code = data->code(); const InstructionSequence* code = data->code();
UseInterval* interval = range->first_interval(); UseInterval* interval = range->first_interval();
......
...@@ -13,21 +13,21 @@ class Zone; ...@@ -13,21 +13,21 @@ class Zone;
namespace compiler { namespace compiler {
class RegisterAllocationData; class TopTierRegisterAllocationData;
// A register allocation pair of transformations: splinter and merge live ranges // A register allocation pair of transformations: splinter and merge live ranges
class LiveRangeSeparator final : public ZoneObject { class LiveRangeSeparator final : public ZoneObject {
public: public:
LiveRangeSeparator(RegisterAllocationData* data, Zone* zone) LiveRangeSeparator(TopTierRegisterAllocationData* data, Zone* zone)
: data_(data), zone_(zone) {} : data_(data), zone_(zone) {}
void Splinter(); void Splinter();
private: private:
RegisterAllocationData* data() const { return data_; } TopTierRegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
RegisterAllocationData* const data_; TopTierRegisterAllocationData* const data_;
Zone* const zone_; Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeSeparator); DISALLOW_COPY_AND_ASSIGN(LiveRangeSeparator);
...@@ -35,13 +35,13 @@ class LiveRangeSeparator final : public ZoneObject { ...@@ -35,13 +35,13 @@ class LiveRangeSeparator final : public ZoneObject {
class LiveRangeMerger final : public ZoneObject { class LiveRangeMerger final : public ZoneObject {
public: public:
LiveRangeMerger(RegisterAllocationData* data, Zone* zone) LiveRangeMerger(TopTierRegisterAllocationData* data, Zone* zone)
: data_(data), zone_(zone) {} : data_(data), zone_(zone) {}
void Merge(); void Merge();
private: private:
RegisterAllocationData* data() const { return data_; } TopTierRegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
// Mark ranges spilled in deferred blocks, that also cover non-deferred code. // Mark ranges spilled in deferred blocks, that also cover non-deferred code.
...@@ -49,7 +49,7 @@ class LiveRangeMerger final : public ZoneObject { ...@@ -49,7 +49,7 @@ class LiveRangeMerger final : public ZoneObject {
// because they would "spill in deferred blocks" anyway. // because they would "spill in deferred blocks" anyway.
void MarkRangesSpilledInDeferredBlocks(); void MarkRangesSpilledInDeferredBlocks();
RegisterAllocationData* const data_; TopTierRegisterAllocationData* const data_;
Zone* const zone_; Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeMerger); DISALLOW_COPY_AND_ASSIGN(LiveRangeMerger);
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_BACKEND_REGISTER_ALLOCATION_H_
#define V8_COMPILER_BACKEND_REGISTER_ALLOCATION_H_
#include "src/codegen/register-configuration.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
enum class RegisterKind { kGeneral, kDouble };
inline int GetRegisterCount(const RegisterConfiguration* config,
RegisterKind kind) {
switch (kind) {
case RegisterKind::kGeneral:
return config->num_general_registers();
case RegisterKind::kDouble:
return config->num_double_registers();
}
}
inline int GetAllocatableRegisterCount(const RegisterConfiguration* config,
RegisterKind kind) {
switch (kind) {
case RegisterKind::kGeneral:
return config->num_allocatable_general_registers();
case RegisterKind::kDouble:
return config->num_allocatable_double_registers();
}
}
inline const int* GetAllocatableRegisterCodes(
const RegisterConfiguration* config, RegisterKind kind) {
switch (kind) {
case RegisterKind::kGeneral:
return config->allocatable_general_codes();
case RegisterKind::kDouble:
return config->allocatable_double_codes();
}
}
inline int ByteWidthForStackSlot(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
case MachineRepresentation::kFloat32:
return kSystemPointerSize;
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
// TODO(ishell): kTaggedSize once half size locations are supported.
return kSystemPointerSize;
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat64:
return kDoubleSize;
case MachineRepresentation::kSimd128:
return kSimd128Size;
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
}
class RegisterAllocationData : public ZoneObject {
public:
enum Type {
kTopTier,
kMidTier,
};
Type type() const { return type_; }
protected:
explicit RegisterAllocationData(Type type) : type_(type) {}
private:
Type type_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_BACKEND_REGISTER_ALLOCATION_H_
This diff is collapsed.
This diff is collapsed.
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "src/codegen/optimized-compilation-info.h" #include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/source-position.h" #include "src/codegen/source-position.h"
#include "src/compiler/all-nodes.h" #include "src/compiler/all-nodes.h"
#include "src/compiler/backend/register-allocation.h"
#include "src/compiler/backend/register-allocator.h" #include "src/compiler/backend/register-allocator.h"
#include "src/compiler/compiler-source-position-table.h" #include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h" #include "src/compiler/graph.h"
...@@ -424,7 +425,8 @@ class GraphC1Visualizer { ...@@ -424,7 +425,8 @@ class GraphC1Visualizer {
void PrintSchedule(const char* phase, const Schedule* schedule, void PrintSchedule(const char* phase, const Schedule* schedule,
const SourcePositionTable* positions, const SourcePositionTable* positions,
const InstructionSequence* instructions); const InstructionSequence* instructions);
void PrintLiveRanges(const char* phase, const RegisterAllocationData* data); void PrintLiveRanges(const char* phase,
const TopTierRegisterAllocationData* data);
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
private: private:
...@@ -708,9 +710,8 @@ void GraphC1Visualizer::PrintSchedule(const char* phase, ...@@ -708,9 +710,8 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
} }
} }
void GraphC1Visualizer::PrintLiveRanges(
void GraphC1Visualizer::PrintLiveRanges(const char* phase, const char* phase, const TopTierRegisterAllocationData* data) {
const RegisterAllocationData* data) {
Tag tag(this, "intervals"); Tag tag(this, "intervals");
PrintStringProperty("name", phase); PrintStringProperty("name", phase);
...@@ -824,9 +825,14 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac) { ...@@ -824,9 +825,14 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
std::ostream& operator<<(std::ostream& os, std::ostream& operator<<(std::ostream& os,
const AsC1VRegisterAllocationData& ac) { const AsC1VRegisterAllocationData& ac) {
// TODO(rmcilroy): Add support for fast register allocator.
if (ac.data_->type() == RegisterAllocationData::kTopTier) {
AccountingAllocator allocator; AccountingAllocator allocator;
Zone tmp_zone(&allocator, ZONE_NAME); Zone tmp_zone(&allocator, ZONE_NAME);
GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_); GraphC1Visualizer(os, &tmp_zone)
.PrintLiveRanges(ac.phase_,
TopTierRegisterAllocationData::cast(ac.data_));
}
return os; return os;
} }
...@@ -1067,12 +1073,22 @@ void PrintTopLevelLiveRanges(std::ostream& os, ...@@ -1067,12 +1073,22 @@ void PrintTopLevelLiveRanges(std::ostream& os,
std::ostream& operator<<(std::ostream& os, std::ostream& operator<<(std::ostream& os,
const RegisterAllocationDataAsJSON& ac) { const RegisterAllocationDataAsJSON& ac) {
if (ac.data_.type() == RegisterAllocationData::kTopTier) {
const TopTierRegisterAllocationData& ac_data =
TopTierRegisterAllocationData::cast(ac.data_);
os << "\"fixed_double_live_ranges\": "; os << "\"fixed_double_live_ranges\": ";
PrintTopLevelLiveRanges(os, ac.data_.fixed_double_live_ranges(), ac.code_); PrintTopLevelLiveRanges(os, ac_data.fixed_double_live_ranges(), ac.code_);
os << ",\"fixed_live_ranges\": "; os << ",\"fixed_live_ranges\": ";
PrintTopLevelLiveRanges(os, ac.data_.fixed_live_ranges(), ac.code_); PrintTopLevelLiveRanges(os, ac_data.fixed_live_ranges(), ac.code_);
os << ",\"live_ranges\": "; os << ",\"live_ranges\": ";
PrintTopLevelLiveRanges(os, ac.data_.live_ranges(), ac.code_); PrintTopLevelLiveRanges(os, ac_data.live_ranges(), ac.code_);
} else {
// TODO(rmcilroy): Add support for fast register allocation data. For now
// output the expected fields to keep Turbolizer happy.
os << "\"fixed_double_live_ranges\": {}";
os << ",\"fixed_live_ranges\": {}";
os << ",\"live_ranges\": {}";
}
return os; return os;
} }
......
This diff is collapsed.
...@@ -101,7 +101,7 @@ class Pipeline : public AllStatic { ...@@ -101,7 +101,7 @@ class Pipeline : public AllStatic {
// Run just the register allocator phases. // Run just the register allocator phases.
V8_EXPORT_PRIVATE static bool AllocateRegistersForTesting( V8_EXPORT_PRIVATE static bool AllocateRegistersForTesting(
const RegisterConfiguration* config, InstructionSequence* sequence, const RegisterConfiguration* config, InstructionSequence* sequence,
bool run_verifier); bool use_fast_register_allocator, bool run_verifier);
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline); DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
......
...@@ -507,8 +507,9 @@ DEFINE_BOOL(trace_migration, false, "trace object migration") ...@@ -507,8 +507,9 @@ DEFINE_BOOL(trace_migration, false, "trace object migration")
DEFINE_BOOL(trace_generalization, false, "trace map generalization") DEFINE_BOOL(trace_generalization, false, "trace map generalization")
// Flags for TurboProp. // Flags for TurboProp.
DEFINE_BOOL(turboprop, false, DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
"enable experimental turboprop mid-tier compiler.") DEFINE_BOOL(turboprop_fast_reg_alloc, false,
"enable experimental fast register allocator for mid-tier compiler")
DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining) DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining)
DEFINE_IMPLICATION(turboprop, concurrent_inlining) DEFINE_IMPLICATION(turboprop, concurrent_inlining)
DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB) DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB)
......
...@@ -78,7 +78,7 @@ class RegisterAllocatorTest : public InstructionSequenceTest { ...@@ -78,7 +78,7 @@ class RegisterAllocatorTest : public InstructionSequenceTest {
public: public:
void Allocate() { void Allocate() {
WireBlocks(); WireBlocks();
Pipeline::AllocateRegistersForTesting(config(), sequence(), true); Pipeline::AllocateRegistersForTesting(config(), sequence(), false, true);
} }
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment