Commit 7ea0412e authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Split interrupt budget updates to separate nodes

Having interrupt budget updates be part of register allocation caused
various difficulties around gap moves for temporaries vs. gap moves for
phis. This patch splits them off into a separate node which is
separately allocated, and adds invariant checks that phi-updating nodes
don't do any other tricky register allocation.

Bug: v8:7700
Change-Id: I5a454fe4c5a5adff08d5a327ee34fbb43cda97ce
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3751196Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81604}
parent d34170f2
...@@ -1130,11 +1130,8 @@ void MaglevGraphBuilder::InlineCallFromRegisters( ...@@ -1130,11 +1130,8 @@ void MaglevGraphBuilder::InlineCallFromRegisters(
inner_graph_builder.ProcessMergePoint( inner_graph_builder.ProcessMergePoint(
inner_graph_builder.inline_exit_offset()); inner_graph_builder.inline_exit_offset());
inner_graph_builder.StartNewBlock(inner_graph_builder.inline_exit_offset()); inner_graph_builder.StartNewBlock(inner_graph_builder.inline_exit_offset());
// See also: InterpreterAssembler::UpdateInterruptBudgetOnReturn. BasicBlock* end_block =
const uint32_t relative_jump_bytecode_offset = inner_graph_builder.CreateBlock<JumpFromInlined>({}, &end_ref);
inner_graph_builder.iterator_.current_offset();
BasicBlock* end_block = inner_graph_builder.CreateBlock<JumpFromInlined>(
{}, &end_ref, relative_jump_bytecode_offset);
inner_graph_builder.ResolveJumpsToBlockAtOffset( inner_graph_builder.ResolveJumpsToBlockAtOffset(
end_block, inner_graph_builder.inline_exit_offset()); end_block, inner_graph_builder.inline_exit_offset());
...@@ -1386,14 +1383,17 @@ void MaglevGraphBuilder::VisitJumpLoop() { ...@@ -1386,14 +1383,17 @@ void MaglevGraphBuilder::VisitJumpLoop() {
const int32_t loop_offset = iterator_.GetImmediateOperand(1); const int32_t loop_offset = iterator_.GetImmediateOperand(1);
const FeedbackSlot feedback_slot = iterator_.GetSlotOperand(2); const FeedbackSlot feedback_slot = iterator_.GetSlotOperand(2);
int target = iterator_.GetJumpTargetOffset(); int target = iterator_.GetJumpTargetOffset();
if (relative_jump_bytecode_offset > 0) {
AddNewNode<ReduceInterruptBudget>({}, relative_jump_bytecode_offset);
}
BasicBlock* block = BasicBlock* block =
target == iterator_.current_offset() target == iterator_.current_offset()
? FinishBlock<JumpLoop>(next_offset(), {}, &jump_targets_[target], ? FinishBlock<JumpLoop>(next_offset(), {}, &jump_targets_[target],
relative_jump_bytecode_offset, loop_offset, loop_offset, feedback_slot)
feedback_slot) : FinishBlock<JumpLoop>(next_offset(), {},
: FinishBlock<JumpLoop>( jump_targets_[target].block_ptr(),
next_offset(), {}, jump_targets_[target].block_ptr(), loop_offset, feedback_slot);
relative_jump_bytecode_offset, loop_offset, feedback_slot);
merge_states_[target]->MergeLoop(*compilation_unit_, merge_states_[target]->MergeLoop(*compilation_unit_,
current_interpreter_frame_, block, target); current_interpreter_frame_, block, target);
...@@ -1402,9 +1402,11 @@ void MaglevGraphBuilder::VisitJumpLoop() { ...@@ -1402,9 +1402,11 @@ void MaglevGraphBuilder::VisitJumpLoop() {
void MaglevGraphBuilder::VisitJump() { void MaglevGraphBuilder::VisitJump() {
const uint32_t relative_jump_bytecode_offset = const uint32_t relative_jump_bytecode_offset =
iterator_.GetUnsignedImmediateOperand(0); iterator_.GetUnsignedImmediateOperand(0);
if (relative_jump_bytecode_offset > 0) {
AddNewNode<IncreaseInterruptBudget>({}, relative_jump_bytecode_offset);
}
BasicBlock* block = FinishBlock<Jump>( BasicBlock* block = FinishBlock<Jump>(
next_offset(), {}, &jump_targets_[iterator_.GetJumpTargetOffset()], next_offset(), {}, &jump_targets_[iterator_.GetJumpTargetOffset()]);
relative_jump_bytecode_offset);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset()); MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
DCHECK_LT(next_offset(), bytecode().length()); DCHECK_LT(next_offset(), bytecode().length());
} }
...@@ -1545,9 +1547,12 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(ReThrow) ...@@ -1545,9 +1547,12 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(ReThrow)
void MaglevGraphBuilder::VisitReturn() { void MaglevGraphBuilder::VisitReturn() {
// See also: InterpreterAssembler::UpdateInterruptBudgetOnReturn. // See also: InterpreterAssembler::UpdateInterruptBudgetOnReturn.
const uint32_t relative_jump_bytecode_offset = iterator_.current_offset(); const uint32_t relative_jump_bytecode_offset = iterator_.current_offset();
if (relative_jump_bytecode_offset > 0) {
AddNewNode<ReduceInterruptBudget>({}, relative_jump_bytecode_offset);
}
if (!is_inline()) { if (!is_inline()) {
FinishBlock<Return>(next_offset(), {GetAccumulatorTagged()}, FinishBlock<Return>(next_offset(), {GetAccumulatorTagged()});
relative_jump_bytecode_offset);
return; return;
} }
...@@ -1556,9 +1561,8 @@ void MaglevGraphBuilder::VisitReturn() { ...@@ -1556,9 +1561,8 @@ void MaglevGraphBuilder::VisitReturn() {
// execution of the caller. // execution of the caller.
// TODO(leszeks): Consider shortcutting this Jump for cases where there is // TODO(leszeks): Consider shortcutting this Jump for cases where there is
// only one return and no need to merge return states. // only one return and no need to merge return states.
BasicBlock* block = BasicBlock* block = FinishBlock<Jump>(next_offset(), {},
FinishBlock<Jump>(next_offset(), {}, &jump_targets_[inline_exit_offset()], &jump_targets_[inline_exit_offset()]);
relative_jump_bytecode_offset);
MergeIntoInlinedReturnFrameState(block); MergeIntoInlinedReturnFrameState(block);
} }
MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowReferenceErrorIfHole) MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowReferenceErrorIfHole)
......
...@@ -75,6 +75,8 @@ class MaglevGraphVerifier { ...@@ -75,6 +75,8 @@ class MaglevGraphVerifier {
case Opcode::kRegisterInput: case Opcode::kRegisterInput:
case Opcode::kRootConstant: case Opcode::kRootConstant:
case Opcode::kSmiConstant: case Opcode::kSmiConstant:
case Opcode::kIncreaseInterruptBudget:
case Opcode::kReduceInterruptBudget:
// No input. // No input.
DCHECK_EQ(node->input_count(), 0); DCHECK_EQ(node->input_count(), 0);
break; break;
......
...@@ -1543,52 +1543,60 @@ void Construct::GenerateCode(MaglevCodeGenState* code_gen_state, ...@@ -1543,52 +1543,60 @@ void Construct::GenerateCode(MaglevCodeGenState* code_gen_state,
code_gen_state->DefineLazyDeoptPoint(lazy_deopt_info()); code_gen_state->DefineLazyDeoptPoint(lazy_deopt_info());
} }
namespace { void IncreaseInterruptBudget::AllocateVreg(
MaglevVregAllocationState* vreg_state) {
void AttemptOnStackReplacement(MaglevCodeGenState* code_gen_state, set_temporaries_needed(1);
int32_t loop_depth, FeedbackSlot feedback_slot) {
// TODO(v8:7700): Implement me. See also
// InterpreterAssembler::OnStackReplacement.
} }
void IncreaseInterruptBudget::GenerateCode(MaglevCodeGenState* code_gen_state,
void UpdateInterruptBudgetAndMaybeCallRuntime( const ProcessingState& state) {
MaglevCodeGenState* code_gen_state, Register scratch, Register scratch = temporaries().first();
int32_t relative_jump_bytecode_offset) {
// TODO(v8:7700): Remove once regalloc is fixed. See crrev.com/c/3625978.
__ Push(scratch);
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); __ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset)); scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
__ addl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset), __ addl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
Immediate(relative_jump_bytecode_offset)); Immediate(amount()));
}
// Only check the interrupt if the above add can drop the interrupt budget void IncreaseInterruptBudget::PrintParams(
// below zero. std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
if (relative_jump_bytecode_offset < 0) { os << "(" << amount() << ")";
JumpToDeferredIf( }
less, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label) {
// TODO(leszeks): Only save registers if they're not free (requires
// fixing the regalloc, same as for scratch).
__ PushCallerSaved(SaveFPRegsMode::kSave);
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck, 1);
__ PopCallerSaved(SaveFPRegsMode::kSave);
__ jmp(return_label);
});
}
// TODO(v8:7700): Remove once regalloc is fixed. See crrev.com/c/3625978. void ReduceInterruptBudget::AllocateVreg(
__ Pop(scratch); MaglevVregAllocationState* vreg_state) {
set_temporaries_needed(1);
}
void ReduceInterruptBudget::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register scratch = temporaries().first();
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
__ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
Immediate(amount()));
JumpToDeferredIf(
less, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label) {
// TODO(leszeks): Only save registers if they're not free (requires
// fixing the regalloc, same as for scratch).
__ PushCallerSaved(SaveFPRegsMode::kSave);
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck, 1);
__ PopCallerSaved(SaveFPRegsMode::kSave);
__ jmp(return_label);
});
}
void ReduceInterruptBudget::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
os << "(" << amount() << ")";
} }
void UpdateInterruptBudgetAndMaybeCallRuntime( namespace {
MaglevCodeGenState* code_gen_state, Register scratch,
base::Optional<uint32_t> relative_jump_bytecode_offset) { void AttemptOnStackReplacement(MaglevCodeGenState* code_gen_state,
if (!relative_jump_bytecode_offset.has_value()) return; int32_t loop_depth, FeedbackSlot feedback_slot) {
UpdateInterruptBudgetAndMaybeCallRuntime( // TODO(v8:7700): Implement me. See also
code_gen_state, scratch, relative_jump_bytecode_offset.value()); // InterpreterAssembler::OnStackReplacement.
} }
} // namespace } // namespace
...@@ -1603,13 +1611,6 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state, ...@@ -1603,13 +1611,6 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {
DCHECK_EQ(ToRegister(value_input()), kReturnRegister0); DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
// We're not going to continue execution, so we can use an arbitrary register
// here instead of relying on temporaries from the register allocator.
Register scratch = r8;
UpdateInterruptBudgetAndMaybeCallRuntime(code_gen_state, scratch,
relative_jump_bytecode_offset_);
// Read the formal number of parameters from the top level compilation unit // Read the formal number of parameters from the top level compilation unit
// (i.e. the outermost, non inlined function). // (i.e. the outermost, non inlined function).
int formal_params_size = code_gen_state->compilation_info() int formal_params_size = code_gen_state->compilation_info()
...@@ -1618,7 +1619,7 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state, ...@@ -1618,7 +1619,7 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
// We're not going to continue execution, so we can use an arbitrary register // We're not going to continue execution, so we can use an arbitrary register
// here instead of relying on temporaries from the register allocator. // here instead of relying on temporaries from the register allocator.
Register actual_params_size = scratch; Register actual_params_size = r8;
// Compute the size of the actual parameters + receiver (in bytes). // Compute the size of the actual parameters + receiver (in bytes).
// TODO(leszeks): Consider making this an input into Return to re-use the // TODO(leszeks): Consider making this an input into Return to re-use the
...@@ -1652,14 +1653,9 @@ void Deopt::GenerateCode(MaglevCodeGenState* code_gen_state, ...@@ -1652,14 +1653,9 @@ void Deopt::GenerateCode(MaglevCodeGenState* code_gen_state,
EmitEagerDeopt(code_gen_state, this); EmitEagerDeopt(code_gen_state, this);
} }
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state) { void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
set_temporaries_needed(1);
}
void Jump::GenerateCode(MaglevCodeGenState* code_gen_state, void Jump::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {
UpdateInterruptBudgetAndMaybeCallRuntime(
code_gen_state, temporaries().PopFirst(), relative_jump_bytecode_offset_);
// Avoid emitting a jump to the next block. // Avoid emitting a jump to the next block.
if (target() != state.next_block()) { if (target() != state.next_block()) {
__ jmp(target()->label()); __ jmp(target()->label());
...@@ -1679,29 +1675,19 @@ void JumpToInlined::PrintParams(std::ostream& os, ...@@ -1679,29 +1675,19 @@ void JumpToInlined::PrintParams(std::ostream& os,
os << "(" << Brief(*unit()->shared_function_info().object()) << ")"; os << "(" << Brief(*unit()->shared_function_info().object()) << ")";
} }
void JumpFromInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) { void JumpFromInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
set_temporaries_needed(1);
}
void JumpFromInlined::GenerateCode(MaglevCodeGenState* code_gen_state, void JumpFromInlined::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {
UpdateInterruptBudgetAndMaybeCallRuntime(
code_gen_state, temporaries().PopFirst(), relative_jump_bytecode_offset_);
// Avoid emitting a jump to the next block. // Avoid emitting a jump to the next block.
if (target() != state.next_block()) { if (target() != state.next_block()) {
__ jmp(target()->label()); __ jmp(target()->label());
} }
} }
void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state) { void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
set_temporaries_needed(1);
}
void JumpLoop::GenerateCode(MaglevCodeGenState* code_gen_state, void JumpLoop::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {
AttemptOnStackReplacement(code_gen_state, loop_depth_, feedback_slot_); AttemptOnStackReplacement(code_gen_state, loop_depth_, feedback_slot_);
UpdateInterruptBudgetAndMaybeCallRuntime(code_gen_state,
temporaries().PopFirst(),
-relative_jump_bytecode_offset_);
__ jmp(target()->label()); __ jmp(target()->label());
} }
......
...@@ -144,13 +144,15 @@ class CompactInterpreterFrameState; ...@@ -144,13 +144,15 @@ class CompactInterpreterFrameState;
V(ConstantGapMove) \ V(ConstantGapMove) \
V(GapMove) V(GapMove)
#define NODE_LIST(V) \ #define NODE_LIST(V) \
V(CheckMaps) \ V(CheckMaps) \
V(CheckSmi) \ V(CheckSmi) \
V(CheckHeapObject) \ V(CheckHeapObject) \
V(CheckMapsWithMigration) \ V(CheckMapsWithMigration) \
V(StoreField) \ V(StoreField) \
GAP_MOVE_NODE_LIST(V) \ V(IncreaseInterruptBudget) \
V(ReduceInterruptBudget) \
GAP_MOVE_NODE_LIST(V) \
VALUE_NODE_LIST(V) VALUE_NODE_LIST(V)
#define CONDITIONAL_CONTROL_NODE_LIST(V) \ #define CONDITIONAL_CONTROL_NODE_LIST(V) \
...@@ -2111,6 +2113,45 @@ class Construct : public ValueNodeT<Construct> { ...@@ -2111,6 +2113,45 @@ class Construct : public ValueNodeT<Construct> {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
}; };
class IncreaseInterruptBudget
: public FixedInputNodeT<0, IncreaseInterruptBudget> {
using Base = FixedInputNodeT<0, IncreaseInterruptBudget>;
public:
explicit IncreaseInterruptBudget(uint64_t bitfield, int amount)
: Base(bitfield), amount_(amount) {
DCHECK_GT(amount, 0);
}
int amount() const { return amount_; }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
const int amount_;
};
class ReduceInterruptBudget : public FixedInputNodeT<0, ReduceInterruptBudget> {
using Base = FixedInputNodeT<0, ReduceInterruptBudget>;
public:
explicit ReduceInterruptBudget(uint64_t bitfield, int amount)
: Base(bitfield), amount_(amount) {
DCHECK_GT(amount, 0);
}
int amount() const { return amount_; }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
const int amount_;
};
// Represents either a direct BasicBlock pointer, or an entry in a list of // Represents either a direct BasicBlock pointer, or an entry in a list of
// unresolved BasicBlockRefs which will be mutated (in place) at some point into // unresolved BasicBlockRefs which will be mutated (in place) at some point into
// direct BasicBlock pointers. // direct BasicBlock pointers.
...@@ -2319,37 +2360,27 @@ class Jump : public UnconditionalControlNodeT<Jump> { ...@@ -2319,37 +2360,27 @@ class Jump : public UnconditionalControlNodeT<Jump> {
using Base = UnconditionalControlNodeT<Jump>; using Base = UnconditionalControlNodeT<Jump>;
public: public:
Jump(uint64_t bitfield, BasicBlockRef* target_refs, Jump(uint64_t bitfield, BasicBlockRef* target_refs)
base::Optional<uint32_t> relative_jump_bytecode_offset = {}) : Base(bitfield, target_refs) {}
: Base(bitfield, target_refs),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset) {}
void AllocateVreg(MaglevVregAllocationState*); void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private:
// For maintaining the interrupt_budget.
const base::Optional<uint32_t> relative_jump_bytecode_offset_;
}; };
class JumpLoop : public UnconditionalControlNodeT<JumpLoop> { class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
using Base = UnconditionalControlNodeT<JumpLoop>; using Base = UnconditionalControlNodeT<JumpLoop>;
public: public:
explicit JumpLoop(uint64_t bitfield, BasicBlock* target, explicit JumpLoop(uint64_t bitfield, BasicBlock* target, int32_t loop_depth,
uint32_t relative_jump_bytecode_offset, int32_t loop_depth,
FeedbackSlot feedback_slot) FeedbackSlot feedback_slot)
: Base(bitfield, target), : Base(bitfield, target),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset),
loop_depth_(loop_depth), loop_depth_(loop_depth),
feedback_slot_(feedback_slot) {} feedback_slot_(feedback_slot) {}
explicit JumpLoop(uint64_t bitfield, BasicBlockRef* ref, explicit JumpLoop(uint64_t bitfield, BasicBlockRef* ref, int32_t loop_depth,
uint32_t relative_jump_bytecode_offset, int32_t loop_depth,
FeedbackSlot feedback_slot) FeedbackSlot feedback_slot)
: Base(bitfield, ref), : Base(bitfield, ref),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset),
loop_depth_(loop_depth), loop_depth_(loop_depth),
feedback_slot_(feedback_slot) {} feedback_slot_(feedback_slot) {}
...@@ -2358,8 +2389,6 @@ class JumpLoop : public UnconditionalControlNodeT<JumpLoop> { ...@@ -2358,8 +2389,6 @@ class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private: private:
// For maintaining the interrupt_budget.
const uint32_t relative_jump_bytecode_offset_;
// For OSR. // For OSR.
const int32_t loop_depth_; const int32_t loop_depth_;
const FeedbackSlot feedback_slot_; const FeedbackSlot feedback_slot_;
...@@ -2387,27 +2416,17 @@ class JumpFromInlined : public UnconditionalControlNodeT<JumpFromInlined> { ...@@ -2387,27 +2416,17 @@ class JumpFromInlined : public UnconditionalControlNodeT<JumpFromInlined> {
using Base = UnconditionalControlNodeT<JumpFromInlined>; using Base = UnconditionalControlNodeT<JumpFromInlined>;
public: public:
explicit JumpFromInlined( explicit JumpFromInlined(uint64_t bitfield, BasicBlockRef* target_refs)
uint64_t bitfield, BasicBlockRef* target_refs, : Base(bitfield, target_refs) {}
base::Optional<uint32_t> relative_jump_bytecode_offset = {})
: Base(bitfield, target_refs),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset) {}
void AllocateVreg(MaglevVregAllocationState*); void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private:
// For maintaining the interrupt_budget.
const base::Optional<uint32_t> relative_jump_bytecode_offset_;
}; };
class Return : public ControlNode { class Return : public ControlNode {
public: public:
explicit Return(uint64_t bitfield, explicit Return(uint64_t bitfield) : ControlNode(bitfield) {
base::Optional<uint32_t> relative_jump_bytecode_offset = {})
: ControlNode(bitfield),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset) {
DCHECK_EQ(NodeBase::opcode(), opcode_of<Return>); DCHECK_EQ(NodeBase::opcode(), opcode_of<Return>);
} }
...@@ -2416,10 +2435,6 @@ class Return : public ControlNode { ...@@ -2416,10 +2435,6 @@ class Return : public ControlNode {
void AllocateVreg(MaglevVregAllocationState*); void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private:
// For maintaining the interrupt_budget.
const base::Optional<uint32_t> relative_jump_bytecode_offset_;
}; };
class Deopt : public ControlNode { class Deopt : public ControlNode {
......
...@@ -682,59 +682,90 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, ...@@ -682,59 +682,90 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
BasicBlock* block) { BasicBlock* block) {
current_node_ = node; current_node_ = node;
// We first allocate fixed inputs (including fixed temporaries), then inject // Control nodes can't lazy deopt at the moment.
// phis (because these may be fixed too), and finally arbitrary inputs and DCHECK(!node->properties().can_lazy_deopt());
// temporaries.
for (Input& input : *node) AssignFixedInput(input);
AssignFixedTemporaries(node);
if (node->Is<JumpToInlined>()) { if (node->Is<JumpToInlined>()) {
// Do nothing. // Do nothing.
// TODO(leszeks): DCHECK any useful invariants here. DCHECK(node->temporaries().is_empty());
DCHECK_EQ(node->num_temporaries_needed(), 0);
DCHECK_EQ(node->input_count(), 0);
DCHECK_EQ(node->properties(), OpProperties(0));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_));
}
} else if (node->Is<Deopt>()) {
// No fixed temporaries.
DCHECK(node->temporaries().is_empty());
DCHECK_EQ(node->num_temporaries_needed(), 0);
DCHECK_EQ(node->input_count(), 0);
DCHECK_EQ(node->properties(), OpProperties::EagerDeopt());
UpdateUse(*node->eager_deopt_info());
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_));
}
} else if (auto unconditional = node->TryCast<UnconditionalControlNode>()) { } else if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
// No fixed temporaries.
DCHECK(node->temporaries().is_empty());
DCHECK_EQ(node->num_temporaries_needed(), 0);
DCHECK_EQ(node->input_count(), 0);
DCHECK(!node->properties().can_eager_deopt());
DCHECK(!node->properties().can_lazy_deopt());
// Initialize phis before assigning inputs, in case one of the inputs // Initialize phis before assigning inputs, in case one of the inputs
// conflicts with a fixed phi. // conflicts with a fixed phi.
InitializeBranchTargetPhis(block->predecessor_id(), InitializeBranchTargetPhis(block->predecessor_id(),
unconditional->target()); unconditional->target());
}
for (Input& input : *node) AssignArbitraryRegisterInput(input); DCHECK(!node->properties().is_call());
AssignArbitraryTemporaries(node);
VerifyInputs(node);
if (node->properties().can_eager_deopt()) {
UpdateUse(*node->eager_deopt_info());
}
for (Input& input : *node) UpdateUse(&input);
if (node->properties().is_call()) SpillAndClearRegisters(); general_registers_.clear_blocked();
double_registers_.clear_blocked();
DCHECK_EQ(general_registers_.free() | node->temporaries(), VerifyRegisterState();
general_registers_.free());
general_registers_.clear_blocked();
double_registers_.clear_blocked();
VerifyRegisterState();
if (FLAG_trace_maglev_regalloc) { if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node, printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_)); ProcessingState(compilation_info_, block_it_));
} }
// Finally, initialize the merge states of branch targets, including the
// fallthrough, with the final state after all allocation
if (node->Is<JumpToInlined>()) {
// Do nothing.
// TODO(leszeks): DCHECK any useful invariants here.
} else if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
// Merge register values. Values only flowing into phis and not being // Merge register values. Values only flowing into phis and not being
// independently live will be killed as part of the merge. // independently live will be killed as part of the merge.
MergeRegisterValues(unconditional, unconditional->target(), MergeRegisterValues(unconditional, unconditional->target(),
block->predecessor_id()); block->predecessor_id());
} else if (auto conditional = node->TryCast<ConditionalControlNode>()) { } else {
InitializeConditionalBranchTarget(conditional, conditional->if_true()); DCHECK(node->Is<ConditionalControlNode>() || node->Is<Return>());
InitializeConditionalBranchTarget(conditional, conditional->if_false()); AssignInputs(node);
VerifyInputs(node);
DCHECK(!node->properties().can_eager_deopt());
for (Input& input : *node) UpdateUse(&input);
DCHECK(!node->properties().can_lazy_deopt());
if (node->properties().is_call()) SpillAndClearRegisters();
DCHECK_EQ(general_registers_.free() | node->temporaries(),
general_registers_.free());
general_registers_.clear_blocked();
double_registers_.clear_blocked();
VerifyRegisterState();
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_));
}
// Finally, initialize the merge states of branch targets, including the
// fallthrough, with the final state after all allocation
if (auto conditional = node->TryCast<ConditionalControlNode>()) {
InitializeConditionalBranchTarget(conditional, conditional->if_true());
InitializeConditionalBranchTarget(conditional, conditional->if_false());
}
} }
VerifyRegisterState(); VerifyRegisterState();
...@@ -906,7 +937,7 @@ void StraightForwardRegisterAllocator::AssignArbitraryRegisterInput( ...@@ -906,7 +937,7 @@ void StraightForwardRegisterAllocator::AssignArbitraryRegisterInput(
} }
} }
void StraightForwardRegisterAllocator::AssignInputs(Node* node) { void StraightForwardRegisterAllocator::AssignInputs(NodeBase* node) {
// We allocate arbitrary register inputs after fixed inputs, since the fixed // We allocate arbitrary register inputs after fixed inputs, since the fixed
// inputs may clobber the arbitrarily chosen ones. // inputs may clobber the arbitrarily chosen ones.
for (Input& input : *node) AssignFixedInput(input); for (Input& input : *node) AssignFixedInput(input);
......
...@@ -143,7 +143,7 @@ class StraightForwardRegisterAllocator { ...@@ -143,7 +143,7 @@ class StraightForwardRegisterAllocator {
void AllocateNodeResult(ValueNode* node); void AllocateNodeResult(ValueNode* node);
void AssignFixedInput(Input& input); void AssignFixedInput(Input& input);
void AssignArbitraryRegisterInput(Input& input); void AssignArbitraryRegisterInput(Input& input);
void AssignInputs(Node* node); void AssignInputs(NodeBase* node);
void AssignFixedTemporaries(NodeBase* node); void AssignFixedTemporaries(NodeBase* node);
void AssignArbitraryTemporaries(NodeBase* node); void AssignArbitraryTemporaries(NodeBase* node);
void TryAllocateToInput(Phi* phi); void TryAllocateToInput(Phi* phi);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment