Commit 0ca0b849 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Move Checkpoints out of the IR

Change Checkpoints from IR Nodes to just normal Zone objects, pointed to
by the deopting Node. Also merge Checkpoint and DeoptimizationInfo --
this has the side effect that multiple Nodes that share a checkpoint
will point to the exact same deopt call.

Bug: v8:7700
Change-Id: Ib36aa13afe3af6a0a22d2cfe80a13fef4bea1227
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3545179
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79607}
parent f8e0b4c6
......@@ -21,19 +21,6 @@ namespace maglev {
class InterpreterFrameState;
class DeoptimizationInfo {
public:
DeoptimizationInfo(BytecodeOffset bytecode_position,
const CompactInterpreterFrameState* checkpoint_state)
: bytecode_position(bytecode_position),
checkpoint_state(checkpoint_state) {}
BytecodeOffset bytecode_position;
const CompactInterpreterFrameState* checkpoint_state;
Label entry_label;
int index = -1;
};
class DeferredCodeInfo {
public:
virtual void Generate(MaglevCodeGenState* code_gen_state,
......@@ -58,18 +45,12 @@ class MaglevCodeGenState {
const std::vector<DeferredCodeInfo*>& deferred_code() const {
return deferred_code_;
}
void PushNonLazyDeopt(DeoptimizationInfo* info) {
non_lazy_deopts_.push_back(info);
}
void PushLazyDeopt(DeoptimizationInfo* info) {
non_lazy_deopts_.push_back(info);
}
const std::vector<DeoptimizationInfo*> non_lazy_deopts() const {
void PushNonLazyDeopt(Checkpoint* info) { non_lazy_deopts_.push_back(info); }
void PushLazyDeopt(Checkpoint* info) { non_lazy_deopts_.push_back(info); }
const std::vector<Checkpoint*> non_lazy_deopts() const {
return non_lazy_deopts_;
}
const std::vector<DeoptimizationInfo*> lazy_deopts() const {
return lazy_deopts_;
}
const std::vector<Checkpoint*> lazy_deopts() const { return lazy_deopts_; }
compiler::NativeContextRef native_context() const {
return broker()->target_native_context();
......@@ -108,8 +89,8 @@ class MaglevCodeGenState {
MacroAssembler masm_;
std::vector<DeferredCodeInfo*> deferred_code_;
std::vector<DeoptimizationInfo*> non_lazy_deopts_;
std::vector<DeoptimizationInfo*> lazy_deopts_;
std::vector<Checkpoint*> non_lazy_deopts_;
std::vector<Checkpoint*> lazy_deopts_;
int vreg_slots_ = 0;
// Allow marking some codegen paths as unsupported, so that we can test maglev
......@@ -126,7 +107,7 @@ inline constexpr int GetFramePointerOffsetForStackSlot(int index) {
index * kSystemPointerSize;
}
inline constexpr int GetFramePointerOffsetForStackSlot(
inline int GetFramePointerOffsetForStackSlot(
const compiler::AllocatedOperand& operand) {
return GetFramePointerOffsetForStackSlot(operand.index());
}
......
......@@ -307,8 +307,7 @@ constexpr int DeoptStackSlotIndexFromFPOffset(int offset) {
return 1 - offset / kSystemPointerSize;
}
constexpr int DeoptStackSlotFromStackSlot(
const compiler::AllocatedOperand& operand) {
int DeoptStackSlotFromStackSlot(const compiler::AllocatedOperand& operand) {
return DeoptStackSlotIndexFromFPOffset(
GetFramePointerOffsetForStackSlot(operand));
}
......@@ -359,32 +358,32 @@ class MaglevCodeGeneratorImpl final {
deopt_exit_start_offset_ = __ pc_offset();
__ RecordComment("-- Non-lazy deopts");
for (DeoptimizationInfo* deopt_info : code_gen_state_.non_lazy_deopts()) {
EmitDeopt(deopt_info);
for (Checkpoint* checkpoint : code_gen_state_.non_lazy_deopts()) {
EmitDeopt(checkpoint);
__ bind(&deopt_info->entry_label);
__ bind(&checkpoint->deopt_entry_label);
// TODO(leszeks): Add soft deopt entry.
__ CallForDeoptimization(Builtin::kDeoptimizationEntry_Eager, 0,
&deopt_info->entry_label, DeoptimizeKind::kEager,
nullptr, nullptr);
&checkpoint->deopt_entry_label,
DeoptimizeKind::kEager, nullptr, nullptr);
}
__ RecordComment("-- Lazy deopts");
for (DeoptimizationInfo* deopt_info : code_gen_state_.lazy_deopts()) {
for (Checkpoint* deopt_info : code_gen_state_.lazy_deopts()) {
EmitDeopt(deopt_info);
__ bind(&deopt_info->entry_label);
__ bind(&deopt_info->deopt_entry_label);
__ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, 0,
&deopt_info->entry_label, DeoptimizeKind::kLazy,
nullptr, nullptr);
&deopt_info->deopt_entry_label,
DeoptimizeKind::kLazy, nullptr, nullptr);
}
}
void EmitDeopt(DeoptimizationInfo* deopt_info) {
void EmitDeopt(Checkpoint* checkpoint) {
int frame_count = 1;
int jsframe_count = 1;
int update_feedback_count = 0;
deopt_info->index = translation_array_builder_.BeginTranslation(
checkpoint->deopt_index = translation_array_builder_.BeginTranslation(
frame_count, jsframe_count, update_feedback_count);
// Returns are used for updating an accumulator or register after a lazy
......@@ -392,7 +391,7 @@ class MaglevCodeGeneratorImpl final {
int return_offset = 0;
int return_count = 0;
translation_array_builder_.BeginInterpretedFrame(
deopt_info->bytecode_position, kFunctionLiteralIndex,
checkpoint->bytecode_position, kFunctionLiteralIndex,
code_gen_state_.register_count(), return_offset, return_count);
// Closure
......@@ -403,7 +402,7 @@ class MaglevCodeGeneratorImpl final {
// Parameters
{
int i = 0;
deopt_info->checkpoint_state->ForEachParameter(
checkpoint->state->ForEachParameter(
*code_gen_state_.compilation_unit(),
[&](ValueNode* value, interpreter::Register reg) {
DCHECK_EQ(reg.ToParameterIndex(), i);
......@@ -421,7 +420,7 @@ class MaglevCodeGeneratorImpl final {
// Locals
{
int i = 0;
deopt_info->checkpoint_state->ForEachLocal(
checkpoint->state->ForEachLocal(
*code_gen_state_.compilation_unit(),
[&](ValueNode* value, interpreter::Register reg) {
DCHECK_LE(i, reg.index());
......@@ -446,7 +445,7 @@ class MaglevCodeGeneratorImpl final {
// TODO(leszeks): Bit ugly to use a did_emit boolean here rather than
// explicitly checking for accumulator liveness.
bool did_emit = false;
deopt_info->checkpoint_state->ForAccumulator(
checkpoint->state->ForAccumulator(
*code_gen_state_.compilation_unit(), [&](ValueNode* value) {
translation_array_builder_.StoreStackSlot(
DeoptStackSlotFromStackSlot(value->spill_slot()));
......@@ -527,21 +526,21 @@ class MaglevCodeGeneratorImpl final {
// Populate deoptimization entries.
int i = 0;
for (DeoptimizationInfo* deopt_info : code_gen_state_.non_lazy_deopts()) {
DCHECK_NE(deopt_info->index, -1);
data->SetBytecodeOffset(i, deopt_info->bytecode_position);
data->SetTranslationIndex(i, Smi::FromInt(deopt_info->index));
data->SetPc(i, Smi::FromInt(deopt_info->entry_label.pos()));
for (Checkpoint* checkpoint : code_gen_state_.non_lazy_deopts()) {
DCHECK_NE(checkpoint->deopt_index, -1);
data->SetBytecodeOffset(i, checkpoint->bytecode_position);
data->SetTranslationIndex(i, Smi::FromInt(checkpoint->deopt_index));
data->SetPc(i, Smi::FromInt(checkpoint->deopt_entry_label.pos()));
#ifdef DEBUG
data->SetNodeId(i, Smi::FromInt(i));
#endif // DEBUG
i++;
}
for (DeoptimizationInfo* deopt_info : code_gen_state_.lazy_deopts()) {
DCHECK_NE(deopt_info->index, -1);
data->SetBytecodeOffset(i, deopt_info->bytecode_position);
data->SetTranslationIndex(i, Smi::FromInt(deopt_info->index));
data->SetPc(i, Smi::FromInt(deopt_info->entry_label.pos()));
for (Checkpoint* checkpoint : code_gen_state_.lazy_deopts()) {
DCHECK_NE(checkpoint->deopt_index, -1);
data->SetBytecodeOffset(i, checkpoint->bytecode_position);
data->SetTranslationIndex(i, Smi::FromInt(checkpoint->deopt_index));
data->SetPc(i, Smi::FromInt(checkpoint->deopt_entry_label.pos()));
#ifdef DEBUG
data->SetNodeId(i, Smi::FromInt(i));
#endif // DEBUG
......
......@@ -64,8 +64,11 @@ class UseMarkingProcessor {
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
void Process(NodeBase* node, const ProcessingState& state) {
if (node->properties().can_deopt()) MarkCheckpointNodes(node, state);
template <typename NodeT>
void Process(NodeT* node, const ProcessingState& state) {
if constexpr (NodeT::kProperties.can_deopt()) {
MarkCheckpointNodes(node, node->checkpoint(), state);
}
for (Input& input : *node) {
input.node()->mark_use(node->id(), &input);
}
......@@ -101,9 +104,9 @@ class UseMarkingProcessor {
}
private:
void MarkCheckpointNodes(NodeBase* node, const ProcessingState& state) {
const CompactInterpreterFrameState* checkpoint_state =
state.checkpoint()->frame();
void MarkCheckpointNodes(NodeBase* node, Checkpoint* checkpoint,
const ProcessingState& state) {
const CompactInterpreterFrameState* checkpoint_state = checkpoint->state;
int use_id = node->id();
checkpoint_state->ForEachValue(
......
......@@ -125,9 +125,6 @@ class MaglevGraphBuilder {
template <typename NodeT>
NodeT* AddNode(NodeT* node) {
if (node->properties().can_deopt()) {
EnsureCheckpoint();
}
if (node->properties().is_required_when_unused()) {
MarkPossibleSideEffect();
}
......@@ -143,13 +140,22 @@ class MaglevGraphBuilder {
template <typename NodeT, typename... Args>
NodeT* AddNewNode(size_t input_count, Args&&... args) {
return AddNode(
Node::New<NodeT>(zone(), input_count, std::forward<Args>(args)...));
CreateNewNode<NodeT>(input_count, std::forward<Args>(args)...));
}
template <typename NodeT, typename... Args>
NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
return AddNode(
Node::New<NodeT>(zone(), inputs, std::forward<Args>(args)...));
return AddNode(CreateNewNode<NodeT>(inputs, std::forward<Args>(args)...));
}
template <typename NodeT, typename... Args>
NodeT* CreateNewNode(Args&&... args) {
if constexpr (NodeT::kProperties.can_deopt()) {
return Node::New<NodeT>(zone(), std::forward<Args>(args)...,
GetCheckpoint());
} else {
return Node::New<NodeT>(zone(), std::forward<Args>(args)...);
}
}
ValueNode* GetContext() const {
......@@ -191,21 +197,24 @@ class MaglevGraphBuilder {
}
void AddCheckpoint() {
// TODO(v8:7700): Verify this calls the initializer list overload.
AddNewNode<Checkpoint>(
{}, BytecodeOffset(iterator_.current_offset()),
latest_checkpoint_ = zone()->New<Checkpoint>(
BytecodeOffset(iterator_.current_offset()),
zone()->New<CompactInterpreterFrameState>(
*compilation_unit_, GetInLiveness(), current_interpreter_frame_));
has_valid_checkpoint_ = true;
}
void EnsureCheckpoint() {
if (!has_valid_checkpoint_) AddCheckpoint();
if (!latest_checkpoint_) AddCheckpoint();
}
Checkpoint* GetCheckpoint() {
EnsureCheckpoint();
return latest_checkpoint_;
}
void MarkPossibleSideEffect() {
// If there was a potential side effect, invalidate the previous checkpoint.
has_valid_checkpoint_ = false;
latest_checkpoint_ = nullptr;
}
int next_offset() const {
......@@ -259,7 +268,7 @@ class MaglevGraphBuilder {
// If the next block has merge states, then it's not a simple fallthrough,
// and we should reset the checkpoint validity.
if (merge_states_[next_block_offset] != nullptr) {
has_valid_checkpoint_ = false;
latest_checkpoint_ = nullptr;
}
// Start a new block for the fallthrough path, unless it's a merge point, in
// which case we merge our state into it. That merge-point could also be a
......@@ -355,7 +364,7 @@ class MaglevGraphBuilder {
// Current block information.
BasicBlock* current_block_ = nullptr;
int block_offset_ = 0;
bool has_valid_checkpoint_ = false;
Checkpoint* latest_checkpoint_ = nullptr;
BasicBlockRef* jump_targets_;
MergePointInterpreterFrameState** merge_states_;
......
......@@ -46,11 +46,8 @@ class GraphProcessor;
class ProcessingState {
public:
explicit ProcessingState(MaglevCompilationUnit* compilation_unit,
BlockConstIterator block_it,
const Checkpoint* checkpoint)
: compilation_unit_(compilation_unit),
block_it_(block_it),
checkpoint_(checkpoint) {}
BlockConstIterator block_it)
: compilation_unit_(compilation_unit), block_it_(block_it) {}
// Disallow copies, since the underlying frame states stay mutable.
ProcessingState(const ProcessingState&) = delete;
......@@ -59,11 +56,6 @@ class ProcessingState {
BasicBlock* block() const { return *block_it_; }
BasicBlock* next_block() const { return *(block_it_ + 1); }
const Checkpoint* checkpoint() const {
DCHECK_NOT_NULL(checkpoint_);
return checkpoint_;
}
MaglevCompilationUnit* compilation_unit() const { return compilation_unit_; }
int register_count() const { return compilation_unit_->register_count(); }
......@@ -76,7 +68,6 @@ class ProcessingState {
private:
MaglevCompilationUnit* compilation_unit_;
BlockConstIterator block_it_;
const Checkpoint* checkpoint_;
};
template <typename NodeProcessor>
......@@ -121,7 +112,7 @@ class GraphProcessor {
private:
ProcessingState GetCurrentState() {
return ProcessingState(compilation_unit_, block_it_, latest_checkpoint_);
return ProcessingState(compilation_unit_, block_it_);
}
void ProcessNodeBase(NodeBase* node, const ProcessingState& state) {
......@@ -138,10 +129,6 @@ class GraphProcessor {
void PreProcess(NodeBase* node, const ProcessingState& state) {}
void PreProcess(Checkpoint* checkpoint, const ProcessingState& state) {
latest_checkpoint_ = checkpoint;
}
int register_count() const { return compilation_unit_->register_count(); }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis();
......@@ -152,7 +139,6 @@ class GraphProcessor {
Graph* graph_;
BlockConstIterator block_it_;
NodeConstIterator node_it_;
Checkpoint* latest_checkpoint_ = nullptr;
};
// A NodeProcessor that wraps multiple NodeProcessors, and forwards to each of
......
......@@ -164,10 +164,10 @@ struct CopyForDeferredHelper<const InterpreterFrameState*> {
*compilation_unit, *frame_state);
}
};
// CompactInterpreterFrameState is copied by value.
// Checkpoint is copied by value.
template <>
struct CopyForDeferredHelper<const CompactInterpreterFrameState*>
: public CopyForDeferredByValue<const CompactInterpreterFrameState*> {};
struct CopyForDeferredHelper<Checkpoint*>
: public CopyForDeferredByValue<Checkpoint*> {};
template <typename T>
T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T&& value) {
......@@ -262,32 +262,25 @@ void JumpToDeferredIf(Condition cond, MaglevCodeGenState* code_gen_state,
// Deopt
// ---
DeoptimizationInfo* CreateEagerDeopt(
MaglevCodeGenState* code_gen_state, BytecodeOffset bytecode_position,
const CompactInterpreterFrameState* checkpoint_state) {
Zone* zone = code_gen_state->compilation_unit()->zone();
DeoptimizationInfo* deopt_info =
zone->New<DeoptimizationInfo>(bytecode_position, checkpoint_state);
code_gen_state->PushNonLazyDeopt(deopt_info);
return deopt_info;
void RegisterEagerDeoptCheckpoint(MaglevCodeGenState* code_gen_state,
Checkpoint* checkpoint) {
if (checkpoint->deopt_entry_label.is_unused()) {
code_gen_state->PushNonLazyDeopt(checkpoint);
}
}
void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
BytecodeOffset bytecode_position,
const CompactInterpreterFrameState* checkpoint_state) {
DeoptimizationInfo* deopt_info =
CreateEagerDeopt(code_gen_state, bytecode_position, checkpoint_state);
Checkpoint* checkpoint) {
RegisterEagerDeoptCheckpoint(code_gen_state, checkpoint);
__ RecordComment("-- Jump to eager deopt");
__ j(cond, &deopt_info->entry_label);
__ j(cond, &checkpoint->deopt_entry_label);
}
template <typename NodeT>
void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
Node* node, const ProcessingState& state) {
DCHECK(node->properties().can_deopt());
EmitEagerDeoptIf(cond, code_gen_state,
state.checkpoint()->bytecode_position(),
state.checkpoint()->frame());
NodeT* node) {
STATIC_ASSERT(NodeT::kProperties.can_deopt());
EmitEagerDeoptIf(cond, code_gen_state, node->checkpoint());
}
// ---
......@@ -372,21 +365,12 @@ void SmiConstant::PrintParams(std::ostream& os,
os << "(" << value() << ")";
}
void Checkpoint::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void Checkpoint::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {}
void Checkpoint::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << ToString(*frame()->liveness()) << ")";
}
void SoftDeopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void SoftDeopt::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
// TODO(leszeks): Make this a soft deopt.
EmitEagerDeoptIf(always, code_gen_state, this, state);
EmitEagerDeoptIf(always, code_gen_state, this);
}
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
......@@ -499,18 +483,16 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
JumpToDeferredIf(
not_equal, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label,
Register object, CheckMaps* node, BytecodeOffset checkpoint_position,
const CompactInterpreterFrameState* checkpoint_state_snapshot,
Register object, CheckMaps* node, Checkpoint* checkpoint,
Register map_tmp) {
DeoptimizationInfo* deopt = CreateEagerDeopt(
code_gen_state, checkpoint_position, checkpoint_state_snapshot);
RegisterEagerDeoptCheckpoint(code_gen_state, checkpoint);
// If the map is not deprecated, deopt straight away.
__ movl(kScratchRegister,
FieldOperand(map_tmp, Map::kBitField3Offset));
__ testl(kScratchRegister,
Immediate(Map::Bits3::IsDeprecatedBit::kMask));
__ j(zero, &deopt->entry_label);
__ j(zero, &checkpoint->deopt_entry_label);
// Otherwise, try migrating the object. If the migration returns Smi
// zero, then it failed and we should deopt.
......@@ -520,19 +502,18 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
// TODO(verwaest): We're calling so we need to spill around it.
__ CallRuntime(Runtime::kTryMigrateInstance);
__ cmpl(kReturnRegister0, Immediate(0));
__ j(equal, &deopt->entry_label);
__ j(equal, &checkpoint->deopt_entry_label);
// The migrated object is returned on success, retry the map check.
__ Move(object, kReturnRegister0);
__ LoadMap(map_tmp, object);
__ Cmp(map_tmp, node->map().object());
__ j(equal, return_label);
__ jmp(&deopt->entry_label);
__ jmp(&checkpoint->deopt_entry_label);
},
object, this, state.checkpoint()->bytecode_position(),
state.checkpoint()->frame(), map_tmp);
object, this, checkpoint(), map_tmp);
} else {
EmitEagerDeoptIf(not_equal, code_gen_state, this, state);
EmitEagerDeoptIf(not_equal, code_gen_state, this);
}
}
void CheckMaps::PrintParams(std::ostream& os,
......
......@@ -9,6 +9,7 @@
#include "src/base/macros.h"
#include "src/base/small-vector.h"
#include "src/base/threaded-list.h"
#include "src/codegen/label.h"
#include "src/codegen/reglist.h"
#include "src/common/globals.h"
#include "src/common/operation.h"
......@@ -76,7 +77,6 @@ class CompactInterpreterFrameState;
GENERIC_OPERATIONS_NODE_LIST(V)
#define NODE_LIST(V) \
V(Checkpoint) \
V(CheckMaps) \
V(GapMove) \
V(SoftDeopt) \
......@@ -168,16 +168,18 @@ static constexpr uint32_t kInvalidNodeId = 0;
class OpProperties {
public:
bool is_call() const { return kIsCallBit::decode(bitfield_); }
bool can_deopt() const { return kCanDeoptBit::decode(bitfield_); }
bool can_read() const { return kCanReadBit::decode(bitfield_); }
bool can_write() const { return kCanWriteBit::decode(bitfield_); }
bool non_memory_side_effects() const {
constexpr bool is_call() const { return kIsCallBit::decode(bitfield_); }
constexpr bool can_deopt() const { return kCanDeoptBit::decode(bitfield_); }
constexpr bool can_read() const { return kCanReadBit::decode(bitfield_); }
constexpr bool can_write() const { return kCanWriteBit::decode(bitfield_); }
constexpr bool non_memory_side_effects() const {
return kNonMemorySideEffectsBit::decode(bitfield_);
}
bool is_pure() const { return (bitfield_ | kPureMask) == kPureValue; }
bool is_required_when_unused() const {
constexpr bool is_pure() const {
return (bitfield_ | kPureMask) == kPureValue;
}
constexpr bool is_required_when_unused() const {
return can_write() || non_memory_side_effects();
}
......@@ -224,7 +226,7 @@ class OpProperties {
constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {}
uint32_t bitfield_;
const uint32_t bitfield_;
};
class ValueLocation {
......@@ -283,6 +285,18 @@ class Input : public ValueLocation {
NodeIdT next_use_id_ = kInvalidNodeId;
};
class Checkpoint {
public:
Checkpoint(BytecodeOffset bytecode_position,
const CompactInterpreterFrameState* state)
: bytecode_position(bytecode_position), state(state) {}
BytecodeOffset bytecode_position;
const CompactInterpreterFrameState* state;
Label deopt_entry_label;
int deopt_index = -1;
};
// Dummy type for the initial raw allocation.
struct NodeWithInlineInputs {};
......@@ -300,6 +314,7 @@ struct opcode_of_helper;
};
NODE_BASE_LIST(DEF_OPCODE_OF)
#undef DEF_OPCODE_OF
} // namespace detail
class NodeBase : public ZoneObject {
......@@ -865,47 +880,32 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
const RootIndex index_;
};
class Checkpoint : public FixedInputNodeT<0, Checkpoint> {
using Base = FixedInputNodeT<0, Checkpoint>;
public:
explicit Checkpoint(size_t input_count, BytecodeOffset bytecode_position,
CompactInterpreterFrameState* frame)
: Base(input_count),
bytecode_position_(bytecode_position),
frame_(frame) {}
BytecodeOffset bytecode_position() const { return bytecode_position_; }
const CompactInterpreterFrameState* frame() const { return frame_; }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
const BytecodeOffset bytecode_position_;
const CompactInterpreterFrameState* const frame_;
};
class SoftDeopt : public FixedInputNodeT<0, SoftDeopt> {
using Base = FixedInputNodeT<0, SoftDeopt>;
public:
explicit SoftDeopt(size_t input_count) : Base(input_count) {}
explicit SoftDeopt(size_t input_count, Checkpoint* checkpoint)
: Base(input_count), checkpoint_(checkpoint) {}
static constexpr OpProperties kProperties = OpProperties::Deopt();
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
Checkpoint* checkpoint() { return checkpoint_; }
private:
Checkpoint* const checkpoint_;
};
class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
using Base = FixedInputNodeT<1, CheckMaps>;
public:
explicit CheckMaps(size_t input_count, const compiler::MapRef& map)
: Base(input_count), map_(map) {}
explicit CheckMaps(size_t input_count, const compiler::MapRef& map,
Checkpoint* checkpoint)
: Base(input_count), map_(map), checkpoint_(checkpoint) {}
// TODO(verwaest): This just calls in deferred code, so probably we'll need to
// mark that to generate stack maps. Mark as call so we at least clear the
......@@ -922,8 +922,11 @@ class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
Checkpoint* checkpoint() { return checkpoint_; }
private:
const compiler::MapRef map_;
Checkpoint* checkpoint_;
};
class LoadField : public FixedInputValueNodeT<1, LoadField> {
......
......@@ -270,7 +270,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
compiler::AllocatedOperand::cast(allocation));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
phi, ProcessingState(compilation_unit_, block_it_, nullptr));
phi, ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os()
<< "phi (new reg) " << phi->result().operand() << std::endl;
}
......@@ -284,7 +284,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
phi->result().SetAllocated(phi->spill_slot());
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
phi, ProcessingState(compilation_unit_, block_it_, nullptr));
phi, ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os()
<< "phi (stack) " << phi->result().operand() << std::endl;
}
......@@ -341,8 +341,8 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
if (node->Is<ValueNode>()) AllocateNodeResult(node->Cast<ValueNode>());
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
node, ProcessingState(compilation_unit_, block_it_, nullptr));
printing_visitor_->Process(node,
ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
printing_visitor_->os() << "\n";
......@@ -510,8 +510,8 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
}
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
node, ProcessingState(compilation_unit_, block_it_, nullptr));
printing_visitor_->Process(node,
ProcessingState(compilation_unit_, block_it_));
}
}
......@@ -524,7 +524,7 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
phi->result().SetAllocated(ForceAllocate(reg, phi));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
phi, ProcessingState(compilation_unit_, block_it_, nullptr));
phi, ProcessingState(compilation_unit_, block_it_));
printing_visitor_->os()
<< "phi (reuse) " << input.operand() << std::endl;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment