Commit 7b3ede33 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Remove diff encoding of checkpoints

Remove StoreToFrame and the general diff encoding for checkpoints, and
instead make all Checkpoints immediately copy the live part of the
interpreter frame state.

This means that we don't need to recreate the frame state during graph
processing, and we don't have to copy the checkpoint's state for storing
in the deferred DeoptimizationInfo.

In theory the diff encoding was meant to save zone memory for unused
checkpoints, and for checkpoints that don't differ much from each other.
However,

  a) We expect to do most checkpoint elimination during graph building,
     so the assumption that many checkpoints will be unused seems less
     probable, and

  b) We need to copy the checkpoint's frame state for emitting deopts,
     so we don't actually end up avoiding doing the copies.

So, we can simplify things by removing this complexity.

Bug: v8:7700
Change-Id: Iff9743fabbf7a017cccf0ece76a797c571764ea6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3545178Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79602}
parent 672bf4ee
......@@ -24,12 +24,12 @@ class InterpreterFrameState;
class DeoptimizationInfo {
public:
DeoptimizationInfo(BytecodeOffset bytecode_position,
InterpreterFrameState* checkpoint_state)
const CompactInterpreterFrameState* checkpoint_state)
: bytecode_position(bytecode_position),
checkpoint_state(checkpoint_state) {}
BytecodeOffset bytecode_position;
InterpreterFrameState* checkpoint_state;
const CompactInterpreterFrameState* checkpoint_state;
Label entry_label;
int index = -1;
};
......
......@@ -43,8 +43,6 @@ using StackToRegisterMoves =
class MaglevCodeGeneratingNodeProcessor {
public:
static constexpr bool kNeedsCheckpointStates = true;
explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
: code_gen_state_(code_gen_state) {}
......@@ -397,19 +395,22 @@ class MaglevCodeGeneratorImpl final {
deopt_info->bytecode_position, kFunctionLiteralIndex,
code_gen_state_.register_count(), return_offset, return_count);
auto* liveness = code_gen_state_.bytecode_analysis().GetInLivenessFor(
deopt_info->bytecode_position.ToInt());
// Closure
int closure_index = DeoptStackSlotIndexFromFPOffset(
StandardFrameConstants::kFunctionOffset);
translation_array_builder_.StoreStackSlot(closure_index);
// Parameters
for (int i = 0; i < code_gen_state_.parameter_count(); ++i) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
translation_array_builder_.StoreStackSlot(DeoptStackSlotFromStackSlot(
deopt_info->checkpoint_state->get(reg)->spill_slot()));
{
int i = 0;
deopt_info->checkpoint_state->ForEachParameter(
*code_gen_state_.compilation_unit(),
[&](ValueNode* value, interpreter::Register reg) {
DCHECK_EQ(reg.ToParameterIndex(), i);
translation_array_builder_.StoreStackSlot(
DeoptStackSlotFromStackSlot(value->spill_slot()));
i++;
});
}
// Context
......@@ -418,22 +419,42 @@ class MaglevCodeGeneratorImpl final {
translation_array_builder_.StoreStackSlot(context_index);
// Locals
for (int i = 0; i < code_gen_state_.register_count(); ++i) {
interpreter::Register reg(i);
if (liveness->RegisterIsLive(i)) {
translation_array_builder_.StoreStackSlot(DeoptStackSlotFromStackSlot(
deopt_info->checkpoint_state->get(reg)->spill_slot()));
} else {
{
int i = 0;
deopt_info->checkpoint_state->ForEachLocal(
*code_gen_state_.compilation_unit(),
[&](ValueNode* value, interpreter::Register reg) {
DCHECK_LE(i, reg.index());
while (i < reg.index()) {
translation_array_builder_.StoreLiteral(
kOptimizedOutConstantIndex);
i++;
}
DCHECK_EQ(i, reg.index());
translation_array_builder_.StoreStackSlot(
DeoptStackSlotFromStackSlot(value->spill_slot()));
i++;
});
while (i < code_gen_state_.register_count()) {
translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
i++;
}
}
// Accumulator
if (liveness->AccumulatorIsLive()) {
translation_array_builder_.StoreStackSlot(
deopt_info->checkpoint_state->accumulator()->spill_slot().index());
} else {
translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
{
// TODO(leszeks): Bit ugly to use a did_emit boolean here rather than
// explicitly checking for accumulator liveness.
bool did_emit = false;
deopt_info->checkpoint_state->ForAccumulator(
*code_gen_state_.compilation_unit(), [&](ValueNode* value) {
translation_array_builder_.StoreStackSlot(
DeoptStackSlotFromStackSlot(value->spill_slot()));
did_emit = true;
});
if (!did_emit) {
translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
}
}
}
......
......@@ -46,8 +46,6 @@ namespace maglev {
class NumberingProcessor {
public:
static constexpr bool kNeedsCheckpointStates = false;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) { node_id_ = 1; }
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
......@@ -62,8 +60,6 @@ class NumberingProcessor {
class UseMarkingProcessor {
public:
static constexpr bool kNeedsCheckpointStates = true;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
......@@ -106,23 +102,15 @@ class UseMarkingProcessor {
private:
void MarkCheckpointNodes(NodeBase* node, const ProcessingState& state) {
const InterpreterFrameState* checkpoint_state =
state.checkpoint_frame_state();
const CompactInterpreterFrameState* checkpoint_state =
state.checkpoint()->frame();
int use_id = node->id();
for (int i = 0; i < state.parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
ValueNode* node = checkpoint_state->get(reg);
if (node) node->mark_use(use_id, nullptr);
}
for (int i = 0; i < state.register_count(); i++) {
interpreter::Register reg = interpreter::Register(i);
ValueNode* node = checkpoint_state->get(reg);
if (node) node->mark_use(use_id, nullptr);
}
if (checkpoint_state->accumulator()) {
checkpoint_state->accumulator()->mark_use(use_id, nullptr);
}
checkpoint_state->ForEachValue(
*state.compilation_unit(),
[use_id](ValueNode* node, interpreter::Register reg) {
if (node) node->mark_use(use_id, nullptr);
});
}
};
......
......@@ -188,14 +188,14 @@ class MaglevGraphBuilder {
return;
}
current_interpreter_frame_.set(target, value);
AddNewNode<StoreToFrame>({}, value, target);
}
void AddCheckpoint() {
// TODO(v8:7700): Verify this calls the initializer list overload.
AddNewNode<Checkpoint>({}, BytecodeOffset(iterator_.current_offset()),
GetInLiveness()->AccumulatorIsLive(),
GetAccumulator());
AddNewNode<Checkpoint>(
{}, BytecodeOffset(iterator_.current_offset()),
zone()->New<CompactInterpreterFrameState>(
*compilation_unit_, GetInLiveness(), current_interpreter_frame_));
has_valid_checkpoint_ = true;
}
......
......@@ -26,9 +26,6 @@ class ProcessingState;
class MaglevPrintingVisitor {
public:
// Could be interesting to print checkpoints too.
static constexpr bool kNeedsCheckpointStates = false;
explicit MaglevPrintingVisitor(std::ostream& os);
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
......
......@@ -24,10 +24,6 @@ namespace maglev {
//
// It expects a NodeProcessor class with:
//
// // True if the GraphProcessor should snapshot Checkpoint states for
// // deopting nodes.
// static constexpr bool kNeedsCheckpointStates;
//
// // A function that processes the graph before the nodes are walked.
// void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
//
......@@ -51,14 +47,10 @@ class ProcessingState {
public:
explicit ProcessingState(MaglevCompilationUnit* compilation_unit,
BlockConstIterator block_it,
const InterpreterFrameState* interpreter_frame_state,
const Checkpoint* checkpoint,
const InterpreterFrameState* checkpoint_frame_state)
const Checkpoint* checkpoint)
: compilation_unit_(compilation_unit),
block_it_(block_it),
interpreter_frame_state_(interpreter_frame_state),
checkpoint_(checkpoint),
checkpoint_frame_state_(checkpoint_frame_state) {}
checkpoint_(checkpoint) {}
// Disallow copies, since the underlying frame states stay mutable.
ProcessingState(const ProcessingState&) = delete;
......@@ -67,20 +59,12 @@ class ProcessingState {
BasicBlock* block() const { return *block_it_; }
BasicBlock* next_block() const { return *(block_it_ + 1); }
const InterpreterFrameState* interpreter_frame_state() const {
DCHECK_NOT_NULL(interpreter_frame_state_);
return interpreter_frame_state_;
}
const Checkpoint* checkpoint() const {
DCHECK_NOT_NULL(checkpoint_);
return checkpoint_;
}
const InterpreterFrameState* checkpoint_frame_state() const {
DCHECK_NOT_NULL(checkpoint_frame_state_);
return checkpoint_frame_state_;
}
MaglevCompilationUnit* compilation_unit() const { return compilation_unit_; }
int register_count() const { return compilation_unit_->register_count(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
......@@ -92,27 +76,17 @@ class ProcessingState {
private:
MaglevCompilationUnit* compilation_unit_;
BlockConstIterator block_it_;
const InterpreterFrameState* interpreter_frame_state_;
const Checkpoint* checkpoint_;
const InterpreterFrameState* checkpoint_frame_state_;
};
template <typename NodeProcessor>
class GraphProcessor {
public:
static constexpr bool kNeedsCheckpointStates =
NodeProcessor::kNeedsCheckpointStates;
template <typename... Args>
explicit GraphProcessor(MaglevCompilationUnit* compilation_unit,
Args&&... args)
: compilation_unit_(compilation_unit),
node_processor_(std::forward<Args>(args)...),
current_frame_state_(*compilation_unit_) {
if (kNeedsCheckpointStates) {
checkpoint_state_.emplace(*compilation_unit_);
}
}
node_processor_(std::forward<Args>(args)...) {}
void ProcessGraph(Graph* graph) {
graph_ = graph;
......@@ -124,14 +98,6 @@ class GraphProcessor {
node_processor_.PreProcessBasicBlock(compilation_unit_, block);
if (block->has_state()) {
current_frame_state_.CopyFrom(*compilation_unit_, *block->state());
if (kNeedsCheckpointStates) {
checkpoint_state_->last_checkpoint_block_it = block_it_;
checkpoint_state_->last_checkpoint_node_it = NodeConstIterator();
}
}
if (block->has_phi()) {
for (Phi* phi : *block->phis()) {
node_processor_.Process(phi, GetCurrentState());
......@@ -155,11 +121,7 @@ class GraphProcessor {
private:
ProcessingState GetCurrentState() {
return ProcessingState(
compilation_unit_, block_it_, &current_frame_state_,
kNeedsCheckpointStates ? checkpoint_state_->latest_checkpoint : nullptr,
kNeedsCheckpointStates ? &checkpoint_state_->checkpoint_frame_state
: nullptr);
return ProcessingState(compilation_unit_, block_it_, latest_checkpoint_);
}
void ProcessNodeBase(NodeBase* node, const ProcessingState& state) {
......@@ -177,167 +139,7 @@ class GraphProcessor {
void PreProcess(NodeBase* node, const ProcessingState& state) {}
void PreProcess(Checkpoint* checkpoint, const ProcessingState& state) {
current_frame_state_.set_accumulator(checkpoint->accumulator());
if (kNeedsCheckpointStates) {
checkpoint_state_->latest_checkpoint = checkpoint;
if (checkpoint->is_used()) {
checkpoint_state_->checkpoint_frame_state.CopyFrom(
*compilation_unit_, current_frame_state_);
checkpoint_state_->last_checkpoint_block_it = block_it_;
checkpoint_state_->last_checkpoint_node_it = node_it_;
ClearDeadCheckpointNodes();
}
}
}
void PreProcess(StoreToFrame* store_to_frame, const ProcessingState& state) {
current_frame_state_.set(store_to_frame->target(), store_to_frame->value());
}
void PreProcess(SoftDeopt* node, const ProcessingState& state) {
PreProcessDeoptingNode();
}
void PreProcess(CheckMaps* node, const ProcessingState& state) {
PreProcessDeoptingNode();
}
void PreProcessDeoptingNode() {
if (!kNeedsCheckpointStates) return;
Checkpoint* checkpoint = checkpoint_state_->latest_checkpoint;
if (checkpoint->is_used()) {
DCHECK(!checkpoint_state_->last_checkpoint_node_it.is_null());
DCHECK_EQ(checkpoint, *checkpoint_state_->last_checkpoint_node_it);
return;
}
DCHECK_IMPLIES(!checkpoint_state_->last_checkpoint_node_it.is_null(),
checkpoint != *checkpoint_state_->last_checkpoint_node_it);
// TODO(leszeks): The following code is _ugly_, should figure out how to
// clean it up.
// Go to the previous state checkpoint (either on the Checkpoint that
// provided the current checkpoint snapshot, or on a BasicBlock).
BlockConstIterator block_it = checkpoint_state_->last_checkpoint_block_it;
NodeConstIterator node_it = checkpoint_state_->last_checkpoint_node_it;
if (node_it.is_null()) {
// There was no recent enough Checkpoint node, and the block iterator
// points at a basic block with a state snapshot. Copy that snapshot and
// start iterating from there.
BasicBlock* block = *block_it;
DCHECK(block->has_state());
checkpoint_state_->checkpoint_frame_state.CopyFrom(*compilation_unit_,
*block->state());
// Start iterating from the first node in the block.
node_it = block->nodes().begin();
} else {
// The node iterator should point at the previous Checkpoint node. We
// don't need that Checkpoint state snapshot anymore, we're making a new
// one, so we can just reuse the snapshot as-is without copying it.
DCHECK_NE(*node_it, checkpoint);
DCHECK((*node_it)->Is<Checkpoint>());
DCHECK((*node_it)->Cast<Checkpoint>()->is_used());
// Advance it by one since we don't need to check this node anymore.
++node_it;
}
// Now walk forward to the checkpoint, and apply any StoreToFrame operations
// along the way into the snapshotted checkpoint state.
BasicBlock* block = *block_it;
while (true) {
// Check if we've run out of nodes in this block, and advance to the
// next block if so.
while (node_it == block->nodes().end()) {
DCHECK_NE(block_it, graph_->end());
// We should only end up visiting blocks with fallthrough to the next
// block -- otherwise, the block should have had a frame state snapshot,
// as either a merge block or a non-fallthrough jump target.
if ((*block_it)->control_node()->Is<Jump>()) {
DCHECK_EQ((*block_it)->control_node()->Cast<Jump>()->target(),
*(block_it + 1));
} else {
DCHECK_IMPLIES((*block_it)
->control_node()
->Cast<ConditionalControlNode>()
->if_true() != *(block_it + 1),
(*block_it)
->control_node()
->Cast<ConditionalControlNode>()
->if_false() != *(block_it + 1));
}
// Advance to the next block (which the above DCHECKs confirm is the
// unconditional fallthrough from the previous block), and update the
// cached block pointer.
block_it++;
block = *block_it;
// We should never visit a block with state (aside from the very first
// block we visit), since then that should have been our start point
// to start with.
DCHECK(!(*block_it)->has_state());
node_it = (*block_it)->nodes().begin();
}
// We should never reach the current node, the "until" checkpoint node
// should be before it.
DCHECK_NE(node_it, node_it_);
Node* node = *node_it;
// Break once we hit the given Checkpoint node. This could be right at
// the start of the iteration, if the BasicBlock held the snapshot and the
// Checkpoint was the first node in it.
if (node == checkpoint) break;
// Update the state from the current node, if it's a state update.
if (node->Is<StoreToFrame>()) {
StoreToFrame* store_to_frame = node->Cast<StoreToFrame>();
checkpoint_state_->checkpoint_frame_state.set(store_to_frame->target(),
store_to_frame->value());
} else {
// Any checkpoints we meet along the way should be unused, otherwise
// they should have provided the most recent state snapshot.
DCHECK_IMPLIES(node->Is<Checkpoint>(),
!node->Cast<Checkpoint>()->is_used());
}
// Continue to the next node.
++node_it;
}
checkpoint_state_->last_checkpoint_block_it = block_it;
checkpoint_state_->last_checkpoint_node_it = node_it;
checkpoint_state_->checkpoint_frame_state.set_accumulator(
checkpoint->accumulator());
ClearDeadCheckpointNodes();
checkpoint->SetUsed();
}
// Walk the checkpointed state, and null out any values that are dead at this
// checkpoint.
// TODO(leszeks): Consider doing this on checkpoint copy, not as a
// post-process step.
void ClearDeadCheckpointNodes() {
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(
checkpoint_state_->latest_checkpoint->bytecode_position().ToInt());
for (int i = 0; i < register_count(); ++i) {
if (!liveness->RegisterIsLive(i)) {
checkpoint_state_->checkpoint_frame_state.set(interpreter::Register(i),
nullptr);
}
}
// The accumulator is on the checkpoint node itself, and should have already
// been nulled out during graph building if it's dead.
DCHECK_EQ(
!liveness->AccumulatorIsLive(),
checkpoint_state_->checkpoint_frame_state.accumulator() == nullptr);
latest_checkpoint_ = checkpoint;
}
int register_count() const { return compilation_unit_->register_count(); }
......@@ -350,19 +152,7 @@ class GraphProcessor {
Graph* graph_;
BlockConstIterator block_it_;
NodeConstIterator node_it_;
InterpreterFrameState current_frame_state_;
// The CheckpointState field only exists if the node processor needs
// checkpoint states.
struct CheckpointState {
explicit CheckpointState(const MaglevCompilationUnit& compilation_unit)
: checkpoint_frame_state(compilation_unit) {}
Checkpoint* latest_checkpoint = nullptr;
BlockConstIterator last_checkpoint_block_it;
NodeConstIterator last_checkpoint_node_it;
InterpreterFrameState checkpoint_frame_state;
};
base::Optional<CheckpointState> checkpoint_state_;
Checkpoint* latest_checkpoint_ = nullptr;
};
// A NodeProcessor that wraps multiple NodeProcessors, and forwards to each of
......@@ -373,8 +163,6 @@ class NodeMultiProcessor;
template <>
class NodeMultiProcessor<> {
public:
static constexpr bool kNeedsCheckpointStates = false;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
......@@ -387,9 +175,6 @@ class NodeMultiProcessor<Processor, Processors...>
using Base = NodeMultiProcessor<Processors...>;
public:
static constexpr bool kNeedsCheckpointStates =
Processor::kNeedsCheckpointStates || Base::kNeedsCheckpointStates;
template <typename Node>
void Process(Node* node, const ProcessingState& state) {
processor_.Process(node, state);
......
......@@ -71,6 +71,120 @@ class InterpreterFrameState {
RegisterFrameArray<ValueNode*> frame_;
};
class CompactInterpreterFrameState {
public:
CompactInterpreterFrameState(const MaglevCompilationUnit& info,
const compiler::BytecodeLivenessState* liveness)
: live_registers_and_accumulator_(
info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
liveness_(liveness) {}
CompactInterpreterFrameState(const MaglevCompilationUnit& info,
const compiler::BytecodeLivenessState* liveness,
const InterpreterFrameState& state)
: CompactInterpreterFrameState(info, liveness) {
ForEachValue(info, [&](ValueNode*& entry, interpreter::Register reg) {
entry = state.get(reg);
});
}
CompactInterpreterFrameState(const CompactInterpreterFrameState&) = delete;
CompactInterpreterFrameState(CompactInterpreterFrameState&&) = delete;
CompactInterpreterFrameState& operator=(const CompactInterpreterFrameState&) =
delete;
CompactInterpreterFrameState& operator=(CompactInterpreterFrameState&&) =
delete;
template <typename Function>
void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) const {
for (int i = 0; i < info.parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
f(live_registers_and_accumulator_[i], reg);
}
}
template <typename Function>
void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) {
for (int i = 0; i < info.parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
f(live_registers_and_accumulator_[i], reg);
}
}
template <typename Function>
void ForEachLocal(const MaglevCompilationUnit& info, Function&& f) const {
for (int register_index : *liveness_) {
interpreter::Register reg = interpreter::Register(register_index);
f(live_registers_and_accumulator_[info.parameter_count() +
register_index],
reg);
}
}
template <typename Function>
void ForEachLocal(const MaglevCompilationUnit& info, Function&& f) {
for (int register_index : *liveness_) {
interpreter::Register reg = interpreter::Register(register_index);
f(live_registers_and_accumulator_[info.parameter_count() +
register_index],
reg);
}
}
template <typename Function>
void ForAccumulator(const MaglevCompilationUnit& info, Function&& f) {
if (liveness_->AccumulatorIsLive()) {
f(live_registers_and_accumulator_[0]);
}
}
template <typename Function>
void ForAccumulator(const MaglevCompilationUnit& info, Function&& f) const {
if (liveness_->AccumulatorIsLive()) {
f(live_registers_and_accumulator_[SizeFor(info, liveness_) - 1]);
}
}
template <typename Function>
void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
ForEachParameter(info, f);
ForEachLocal(info, f);
}
template <typename Function>
void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) const {
ForEachParameter(info, f);
ForEachLocal(info, f);
}
template <typename Function>
void ForEachValue(const MaglevCompilationUnit& info, Function&& f) {
ForEachRegister(info, f);
ForAccumulator(info, [&](ValueNode* value) {
f(value, interpreter::Register::virtual_accumulator());
});
}
template <typename Function>
void ForEachValue(const MaglevCompilationUnit& info, Function&& f) const {
ForEachRegister(info, f);
ForAccumulator(info, [&](ValueNode*& value) {
f(value, interpreter::Register::virtual_accumulator());
});
}
const compiler::BytecodeLivenessState* liveness() const { return liveness_; }
private:
static int SizeFor(const MaglevCompilationUnit& info,
const compiler::BytecodeLivenessState* liveness) {
return info.parameter_count() + liveness->live_value_count();
}
ValueNode** const live_registers_and_accumulator_;
const compiler::BytecodeLivenessState* const liveness_;
};
class MergePointRegisterState {
public:
class Iterator {
......@@ -136,17 +250,8 @@ class MergePointInterpreterFrameState {
const compiler::BytecodeLivenessState* liveness)
: predecessor_count_(predecessor_count),
predecessors_so_far_(1),
live_registers_and_accumulator_(
info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
liveness_(liveness),
predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
int live_index = 0;
ForEachRegister(info, [&](interpreter::Register reg) {
live_registers_and_accumulator_[live_index++] = state.get(reg);
});
if (liveness_->AccumulatorIsLive()) {
live_registers_and_accumulator_[live_index++] = state.accumulator();
}
predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)),
frame_state_(info, liveness, state) {
predecessors_[0] = predecessor;
}
......@@ -156,27 +261,24 @@ class MergePointInterpreterFrameState {
const compiler::LoopInfo* loop_info)
: predecessor_count_(predecessor_count),
predecessors_so_far_(1),
live_registers_and_accumulator_(
info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
liveness_(liveness),
predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
int live_index = 0;
predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)),
frame_state_(info, liveness) {
auto& assignments = loop_info->assignments();
ForEachParameter(info, [&](interpreter::Register reg) {
ValueNode* value = nullptr;
if (assignments.ContainsParameter(reg.ToParameterIndex())) {
value = NewLoopPhi(info.zone(), reg, merge_offset, value);
}
live_registers_and_accumulator_[live_index++] = value;
});
ForEachLocal([&](interpreter::Register reg) {
ValueNode* value = nullptr;
if (assignments.ContainsLocal(reg.index())) {
value = NewLoopPhi(info.zone(), reg, merge_offset, value);
}
live_registers_and_accumulator_[live_index++] = value;
});
DCHECK(!liveness_->AccumulatorIsLive());
frame_state_.ForEachParameter(
info, [&](ValueNode*& entry, interpreter::Register reg) {
entry = nullptr;
if (assignments.ContainsParameter(reg.ToParameterIndex())) {
entry = NewLoopPhi(info.zone(), reg, merge_offset);
}
});
frame_state_.ForEachLocal(
info, [&](ValueNode*& entry, interpreter::Register reg) {
entry = nullptr;
if (assignments.ContainsLocal(reg.index())) {
entry = NewLoopPhi(info.zone(), reg, merge_offset);
}
});
DCHECK(!frame_state_.liveness()->AccumulatorIsLive());
#ifdef DEBUG
predecessors_[0] = nullptr;
......@@ -192,8 +294,8 @@ class MergePointInterpreterFrameState {
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessors_[predecessors_so_far_] = predecessor;
ForEachValue(
compilation_unit, [&](interpreter::Register reg, ValueNode*& value) {
frame_state_.ForEachValue(
compilation_unit, [&](ValueNode*& value, interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
value = MergeValue(compilation_unit.zone(), reg, value,
......@@ -203,8 +305,6 @@ class MergePointInterpreterFrameState {
DCHECK_LE(predecessors_so_far_, predecessor_count_);
}
MergePointRegisterState& register_state() { return register_state_; }
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
void MergeLoop(const MaglevCompilationUnit& compilation_unit,
......@@ -214,16 +314,20 @@ class MergePointInterpreterFrameState {
DCHECK_NULL(predecessors_[0]);
predecessors_[0] = loop_end_block;
ForEachValue(
compilation_unit, [&](interpreter::Register reg, ValueNode* value) {
frame_state_.ForEachValue(
compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
MergeLoopValue(compilation_unit.zone(), reg, value,
loop_end_state.get(reg), merge_offset);
});
DCHECK(!liveness_->AccumulatorIsLive());
}
const CompactInterpreterFrameState& frame_state() const {
return frame_state_;
}
MergePointRegisterState& register_state() { return register_state_; }
bool has_phi() const { return !phis_.is_empty(); }
Phi::List* phis() { return &phis_; }
......@@ -301,8 +405,8 @@ class MergePointInterpreterFrameState {
result->set_input(0, unmerged);
}
ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg, int merge_offset,
ValueNode* initial_value) {
ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg,
int merge_offset) {
DCHECK_EQ(predecessors_so_far_, 1);
// Create a new loop phi, which for now is empty.
Phi* result = Node::New<Phi>(zone, predecessor_count_, reg, merge_offset);
......@@ -312,84 +416,23 @@ class MergePointInterpreterFrameState {
phis_.Add(result);
return result;
}
static int SizeFor(const MaglevCompilationUnit& info,
const compiler::BytecodeLivenessState* liveness) {
return info.parameter_count() + liveness->live_value_count();
}
template <typename Function>
void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) const {
for (int i = 0; i < info.parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
f(reg);
}
}
template <typename Function>
void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) {
for (int i = 0; i < info.parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
f(reg);
}
}
template <typename Function>
void ForEachLocal(Function&& f) const {
for (int register_index : *liveness_) {
interpreter::Register reg = interpreter::Register(register_index);
f(reg);
}
}
template <typename Function>
void ForEachLocal(Function&& f) {
for (int register_index : *liveness_) {
interpreter::Register reg = interpreter::Register(register_index);
f(reg);
}
}
template <typename Function>
void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
ForEachParameter(info, f);
ForEachLocal(f);
}
template <typename Function>
void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) const {
ForEachParameter(info, f);
ForEachLocal(f);
}
template <typename Function>
void ForEachValue(const MaglevCompilationUnit& info, Function&& f) const {
int live_index = 0;
ForEachRegister(info, [&](interpreter::Register reg) {
f(reg, live_registers_and_accumulator_[live_index++]);
});
if (liveness_->AccumulatorIsLive()) {
f(interpreter::Register::virtual_accumulator(),
live_registers_and_accumulator_[live_index++]);
}
DCHECK_EQ(live_index, SizeFor(info, liveness_));
}
int predecessor_count_;
int predecessors_so_far_;
Phi::List phis_;
ValueNode** live_registers_and_accumulator_;
const compiler::BytecodeLivenessState* liveness_ = nullptr;
BasicBlock** predecessors_;
CompactInterpreterFrameState frame_state_;
MergePointRegisterState register_state_;
};
void InterpreterFrameState::CopyFrom(
const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state) {
state.ForEachValue(info, [&](interpreter::Register reg, ValueNode*& value) {
frame_[reg] = value;
});
state.frame_state().ForEachValue(
info, [&](ValueNode* value, interpreter::Register reg) {
frame_[reg] = value;
});
}
} // namespace maglev
......
......@@ -164,6 +164,10 @@ struct CopyForDeferredHelper<const InterpreterFrameState*> {
*compilation_unit, *frame_state);
}
};
// CompactInterpreterFrameState is copied by value.
template <>
struct CopyForDeferredHelper<const CompactInterpreterFrameState*>
: public CopyForDeferredByValue<const CompactInterpreterFrameState*> {};
template <typename T>
T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T&& value) {
......@@ -260,14 +264,10 @@ void JumpToDeferredIf(Condition cond, MaglevCodeGenState* code_gen_state,
DeoptimizationInfo* CreateEagerDeopt(
MaglevCodeGenState* code_gen_state, BytecodeOffset bytecode_position,
const InterpreterFrameState* checkpoint_state) {
const CompactInterpreterFrameState* checkpoint_state) {
Zone* zone = code_gen_state->compilation_unit()->zone();
DeoptimizationInfo* deopt_info = zone->New<DeoptimizationInfo>(
bytecode_position,
// TODO(leszeks): Right now we unconditionally copy the IFS. If we made
// checkpoint states already always be copies, we could remove this copy.
zone->New<InterpreterFrameState>(*code_gen_state->compilation_unit(),
*checkpoint_state));
DeoptimizationInfo* deopt_info =
zone->New<DeoptimizationInfo>(bytecode_position, checkpoint_state);
code_gen_state->PushNonLazyDeopt(deopt_info);
return deopt_info;
......@@ -275,7 +275,7 @@ DeoptimizationInfo* CreateEagerDeopt(
void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
BytecodeOffset bytecode_position,
const InterpreterFrameState* checkpoint_state) {
const CompactInterpreterFrameState* checkpoint_state) {
DeoptimizationInfo* deopt_info =
CreateEagerDeopt(code_gen_state, bytecode_position, checkpoint_state);
__ RecordComment("-- Jump to eager deopt");
......@@ -287,7 +287,7 @@ void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
DCHECK(node->properties().can_deopt());
EmitEagerDeoptIf(cond, code_gen_state,
state.checkpoint()->bytecode_position(),
state.checkpoint_frame_state());
state.checkpoint()->frame());
}
// ---
......@@ -378,7 +378,7 @@ void Checkpoint::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {}
void Checkpoint::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << PrintNodeLabel(graph_labeller, accumulator()) << ")";
os << "(" << ToString(*frame()->liveness()) << ")";
}
void SoftDeopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
......@@ -500,7 +500,7 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
not_equal, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label,
Register object, CheckMaps* node, BytecodeOffset checkpoint_position,
const InterpreterFrameState* checkpoint_state_snapshot,
const CompactInterpreterFrameState* checkpoint_state_snapshot,
Register map_tmp) {
DeoptimizationInfo* deopt = CreateEagerDeopt(
code_gen_state, checkpoint_position, checkpoint_state_snapshot);
......@@ -530,7 +530,7 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
__ jmp(&deopt->entry_label);
},
object, this, state.checkpoint()->bytecode_position(),
state.checkpoint_frame_state(), map_tmp);
state.checkpoint()->frame(), map_tmp);
} else {
EmitEagerDeoptIf(not_equal, code_gen_state, this, state);
}
......@@ -623,16 +623,6 @@ void LoadNamedGeneric::PrintParams(std::ostream& os,
os << "(" << name_ << ")";
}
void StoreToFrame::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void StoreToFrame::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {}
void StoreToFrame::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << target().ToString() << " ← "
<< PrintNodeLabel(graph_labeller, value()) << ")";
}
void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UNREACHABLE();
......@@ -742,9 +732,7 @@ void Phi::AllocateVregInPostProcess(MaglevVregAllocationState* vreg_state) {
}
}
void Phi::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DCHECK_EQ(state.interpreter_frame_state()->get(owner()), this);
}
const ProcessingState& state) {}
void Phi::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << owner().ToString() << ")";
......
......@@ -28,6 +28,7 @@ class ProcessingState;
class MaglevCodeGenState;
class MaglevGraphLabeller;
class MaglevVregAllocationState;
class CompactInterpreterFrameState;
// Nodes are either
// 1. side-effecting or value-holding SSA nodes in the body of basic blocks, or
......@@ -80,7 +81,6 @@ class MaglevVregAllocationState;
V(GapMove) \
V(SoftDeopt) \
V(StoreField) \
V(StoreToFrame) \
VALUE_NODE_LIST(V)
#define CONDITIONAL_CONTROL_NODE_LIST(V) \
......@@ -870,25 +870,21 @@ class Checkpoint : public FixedInputNodeT<0, Checkpoint> {
public:
explicit Checkpoint(size_t input_count, BytecodeOffset bytecode_position,
bool accumulator_is_live, ValueNode* accumulator)
CompactInterpreterFrameState* frame)
: Base(input_count),
bytecode_position_(bytecode_position),
accumulator_(accumulator_is_live ? accumulator : nullptr) {}
frame_(frame) {}
BytecodeOffset bytecode_position() const { return bytecode_position_; }
bool is_used() const { return IsUsedBit::decode(bit_field_); }
void SetUsed() { bit_field_ = IsUsedBit::update(bit_field_, true); }
ValueNode* accumulator() const { return accumulator_; }
const CompactInterpreterFrameState* frame() const { return frame_; }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
using IsUsedBit = NextBitField<bool, 1>;
const BytecodeOffset bytecode_position_;
ValueNode* const accumulator_;
const CompactInterpreterFrameState* const frame_;
};
class SoftDeopt : public FixedInputNodeT<0, SoftDeopt> {
......@@ -1022,26 +1018,6 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
const compiler::NameRef name_;
};
class StoreToFrame : public FixedInputNodeT<0, StoreToFrame> {
using Base = FixedInputNodeT<0, StoreToFrame>;
public:
StoreToFrame(size_t input_count, ValueNode* value,
interpreter::Register target)
: Base(input_count), value_(value), target_(target) {}
interpreter::Register target() const { return target_; }
ValueNode* value() const { return value_; }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
ValueNode* const value_;
const interpreter::Register target_;
};
class GapMove : public FixedInputNodeT<0, GapMove> {
using Base = FixedInputNodeT<0, GapMove>;
......
......@@ -270,8 +270,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
compiler::AllocatedOperand::cast(allocation));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
phi, ProcessingState(compilation_unit_, block_it_, nullptr,
nullptr, nullptr));
phi, ProcessingState(compilation_unit_, block_it_, nullptr));
printing_visitor_->os()
<< "phi (new reg) " << phi->result().operand() << std::endl;
}
......@@ -285,8 +284,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
phi->result().SetAllocated(phi->spill_slot());
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
phi, ProcessingState(compilation_unit_, block_it_, nullptr,
nullptr, nullptr));
phi, ProcessingState(compilation_unit_, block_it_, nullptr));
printing_visitor_->os()
<< "phi (stack) " << phi->result().operand() << std::endl;
}
......@@ -344,8 +342,7 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
nullptr));
node, ProcessingState(compilation_unit_, block_it_, nullptr));
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
printing_visitor_->os() << "\n";
......@@ -514,8 +511,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
nullptr));
node, ProcessingState(compilation_unit_, block_it_, nullptr));
}
}
......@@ -528,8 +524,7 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
phi->result().SetAllocated(ForceAllocate(reg, phi));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
phi, ProcessingState(compilation_unit_, block_it_, nullptr,
nullptr, nullptr));
phi, ProcessingState(compilation_unit_, block_it_, nullptr));
printing_visitor_->os()
<< "phi (reuse) " << input.operand() << std::endl;
}
......
......@@ -26,8 +26,6 @@ class MaglevVregAllocationState {
class MaglevVregAllocator {
public:
static constexpr bool kNeedsCheckpointStates = true;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
for (BasicBlock* block : *graph) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment