Commit abfd1220 authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[TurboFan] Use SparseBitVector in top-tier register allocator

Similar to https://crrev.com/c/3634781, this switches some {BitVector}s
in the regular top-tier register allocator to {SparseBitVector}. For
functions with a huge amount of virtual registers, this would save a
significant amount of memory, and also performance.

For small number of registers though we probably introduce a slight
regression, because there is one more dynamic check to be performed
before accessing the bits.

R=mslekova@chromium.org

Bug: chromium:1313379, v8:12780
Change-Id: Ib65d22ad0a8e2c933f7178f4fefc36e500be623c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3650602Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80806}
parent 7e32d8a5
......@@ -1885,23 +1885,23 @@ LiveRangeBuilder::LiveRangeBuilder(TopTierRegisterAllocationData* data,
Zone* local_zone)
: data_(data), phi_hints_(local_zone) {}
BitVector* LiveRangeBuilder::ComputeLiveOut(
SparseBitVector* LiveRangeBuilder::ComputeLiveOut(
const InstructionBlock* block, TopTierRegisterAllocationData* data) {
size_t block_index = block->rpo_number().ToSize();
BitVector* live_out = data->live_out_sets()[block_index];
SparseBitVector* live_out = data->live_out_sets()[block_index];
if (live_out == nullptr) {
// Compute live out for the given block, except not including backward
// successor edges.
Zone* zone = data->allocation_zone();
const InstructionSequence* code = data->code();
live_out = zone->New<BitVector>(code->VirtualRegisterCount(), zone);
live_out = zone->New<SparseBitVector>(zone);
// Process all successor blocks.
for (const RpoNumber& succ : block->successors()) {
// Add values live on entry to the successor.
if (succ <= block->rpo_number()) continue;
BitVector* live_in = data->live_in_sets()[succ.ToSize()];
SparseBitVector* live_in = data->live_in_sets()[succ.ToSize()];
if (live_in != nullptr) live_out->Union(*live_in);
// All phi input operands corresponding to this successor edge are live
......@@ -1919,7 +1919,7 @@ BitVector* LiveRangeBuilder::ComputeLiveOut(
}
void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
BitVector* live_out) {
SparseBitVector* live_out) {
// Add an interval that includes the entire block to the live range for
// each live_out value.
LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
......@@ -2114,7 +2114,7 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
}
void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
BitVector* live) {
SparseBitVector* live) {
int block_start = block->first_instruction_index();
LifetimePosition block_start_position =
LifetimePosition::GapFromInstructionIndex(block_start);
......@@ -2348,7 +2348,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
BitVector* live) {
SparseBitVector* live) {
for (PhiInstruction* phi : block->phis()) {
// The live range interval already ends at the first instruction of the
// block.
......@@ -2470,7 +2470,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
}
void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
BitVector* live) {
SparseBitVector* live) {
DCHECK(block->IsLoopHeader());
// Add a live range stretching from the first loop instruction to the last
// for each value live on entry to the header.
......@@ -2498,7 +2498,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
data_->tick_counter()->TickAndMaybeEnterSafepoint();
InstructionBlock* block =
code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
BitVector* live = ComputeLiveOut(block, data());
SparseBitVector* live = ComputeLiveOut(block, data());
// Initially consider all live_out values live for the entire block. We
// will shorten these intervals if necessary.
AddInitialIntervals(block, live);
......@@ -4877,10 +4877,10 @@ bool LiveRangeConnector::CanEagerlyResolveControlFlow(
void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
// Lazily linearize live ranges in memory for fast lookup.
LiveRangeFinder finder(data(), local_zone);
ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
ZoneVector<SparseBitVector*>& live_in_sets = data()->live_in_sets();
for (const InstructionBlock* block : code()->instruction_blocks()) {
if (CanEagerlyResolveControlFlow(block)) continue;
BitVector* live = live_in_sets[block->rpo_number().ToInt()];
SparseBitVector* live = live_in_sets[block->rpo_number().ToInt()];
auto it = live->begin();
auto end = live->end();
while (it != end) {
......@@ -5133,9 +5133,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
// Seek the deferred blocks that dominate locations requiring spill operands,
// and spill there. We only need to spill at the start of such blocks.
BitVector done_blocks(
range->GetListOfBlocksRequiringSpillOperands(data())->length(),
temp_zone);
SparseBitVector done_blocks(temp_zone);
while (!worklist.empty()) {
int block_id = worklist.front();
worklist.pop();
......
......@@ -13,6 +13,7 @@
#include "src/compiler/backend/register-allocation.h"
#include "src/flags/flags.h"
#include "src/utils/ostreams.h"
#include "src/utils/sparse-bit-vector.h"
#include "src/zone/zone-containers.h"
namespace v8 {
......@@ -285,8 +286,8 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
const ZoneVector<TopLevelLiveRange*>& fixed_simd128_live_ranges() const {
return fixed_simd128_live_ranges_;
}
ZoneVector<BitVector*>& live_in_sets() { return live_in_sets_; }
ZoneVector<BitVector*>& live_out_sets() { return live_out_sets_; }
ZoneVector<SparseBitVector*>& live_in_sets() { return live_in_sets_; }
ZoneVector<SparseBitVector*>& live_out_sets() { return live_out_sets_; }
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
DelayedReferences& delayed_references() { return delayed_references_; }
InstructionSequence* code() const { return code_; }
......@@ -361,8 +362,8 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
const char* const debug_name_;
const RegisterConfiguration* const config_;
PhiMap phi_map_;
ZoneVector<BitVector*> live_in_sets_;
ZoneVector<BitVector*> live_out_sets_;
ZoneVector<SparseBitVector*> live_in_sets_;
ZoneVector<SparseBitVector*> live_out_sets_;
ZoneVector<TopLevelLiveRange*> live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_float_live_ranges_;
......@@ -931,8 +932,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
spill_start_index_ = -1;
spilled_in_deferred_blocks_ = true;
spill_move_insertion_locations_ = nullptr;
list_of_blocks_requiring_spill_operands_ =
zone->New<BitVector>(total_block_count, zone);
list_of_blocks_requiring_spill_operands_ = zone->New<SparseBitVector>(zone);
}
// Updates internal data structures to reflect that this range is not
......@@ -940,8 +940,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
void TransitionRangeToDeferredSpill(Zone* zone, int total_block_count) {
spill_start_index_ = -1;
spill_move_insertion_locations_ = nullptr;
list_of_blocks_requiring_spill_operands_ =
zone->New<BitVector>(total_block_count, zone);
list_of_blocks_requiring_spill_operands_ = zone->New<SparseBitVector>(zone);
}
// Promotes this range to spill at definition if it was marked for spilling
......@@ -1014,7 +1013,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
GetListOfBlocksRequiringSpillOperands(data)->Add(block_id.ToInt());
}
BitVector* GetListOfBlocksRequiringSpillOperands(
SparseBitVector* GetListOfBlocksRequiringSpillOperands(
const TopTierRegisterAllocationData* data) const {
DCHECK(IsSpilledOnlyInDeferredBlocks(data));
return list_of_blocks_requiring_spill_operands_;
......@@ -1049,7 +1048,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
union {
SpillMoveInsertionList* spill_move_insertion_locations_;
BitVector* list_of_blocks_requiring_spill_operands_;
SparseBitVector* list_of_blocks_requiring_spill_operands_;
};
// TODO(mtrofin): generalize spilling after definition, currently specialized
......@@ -1211,7 +1210,7 @@ class LiveRangeBuilder final : public ZoneObject {
// Phase 3: compute liveness of all virtual register.
void BuildLiveRanges();
static BitVector* ComputeLiveOut(const InstructionBlock* block,
static SparseBitVector* ComputeLiveOut(const InstructionBlock* block,
TopTierRegisterAllocationData* data);
private:
......@@ -1224,7 +1223,7 @@ class LiveRangeBuilder final : public ZoneObject {
Zone* allocation_zone() const { return data()->allocation_zone(); }
Zone* code_zone() const { return code()->zone(); }
const RegisterConfiguration* config() const { return data()->config(); }
ZoneVector<BitVector*>& live_in_sets() const {
ZoneVector<SparseBitVector*>& live_in_sets() const {
return data()->live_in_sets();
}
......@@ -1236,10 +1235,12 @@ class LiveRangeBuilder final : public ZoneObject {
bool NextIntervalStartsInDifferentBlocks(const UseInterval* interval) const;
// Liveness analysis support.
void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
void ProcessInstructions(const InstructionBlock* block, BitVector* live);
void ProcessPhis(const InstructionBlock* block, BitVector* live);
void ProcessLoopHeader(const InstructionBlock* block, BitVector* live);
void AddInitialIntervals(const InstructionBlock* block,
SparseBitVector* live_out);
void ProcessInstructions(const InstructionBlock* block,
SparseBitVector* live);
void ProcessPhis(const InstructionBlock* block, SparseBitVector* live);
void ProcessLoopHeader(const InstructionBlock* block, SparseBitVector* live);
static int FixedLiveRangeID(int index) { return -index - 1; }
int FixedFPLiveRangeID(int index, MachineRepresentation rep);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment