Commit 7ea0412e authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Split interrupt budget updates to separate nodes

Having interrupt budget updates be part of register allocation caused
various difficulties around gap moves for temporaries vs. gap moves for
phis. This patch splits them off into a separate node which is
separately allocated, and adds invariant checks that phi-updating nodes
don't do any other tricky register allocation.

Bug: v8:7700
Change-Id: I5a454fe4c5a5adff08d5a327ee34fbb43cda97ce
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3751196Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81604}
parent d34170f2
......@@ -1130,11 +1130,8 @@ void MaglevGraphBuilder::InlineCallFromRegisters(
inner_graph_builder.ProcessMergePoint(
inner_graph_builder.inline_exit_offset());
inner_graph_builder.StartNewBlock(inner_graph_builder.inline_exit_offset());
// See also: InterpreterAssembler::UpdateInterruptBudgetOnReturn.
const uint32_t relative_jump_bytecode_offset =
inner_graph_builder.iterator_.current_offset();
BasicBlock* end_block = inner_graph_builder.CreateBlock<JumpFromInlined>(
{}, &end_ref, relative_jump_bytecode_offset);
BasicBlock* end_block =
inner_graph_builder.CreateBlock<JumpFromInlined>({}, &end_ref);
inner_graph_builder.ResolveJumpsToBlockAtOffset(
end_block, inner_graph_builder.inline_exit_offset());
......@@ -1386,14 +1383,17 @@ void MaglevGraphBuilder::VisitJumpLoop() {
const int32_t loop_offset = iterator_.GetImmediateOperand(1);
const FeedbackSlot feedback_slot = iterator_.GetSlotOperand(2);
int target = iterator_.GetJumpTargetOffset();
if (relative_jump_bytecode_offset > 0) {
AddNewNode<ReduceInterruptBudget>({}, relative_jump_bytecode_offset);
}
BasicBlock* block =
target == iterator_.current_offset()
? FinishBlock<JumpLoop>(next_offset(), {}, &jump_targets_[target],
relative_jump_bytecode_offset, loop_offset,
feedback_slot)
: FinishBlock<JumpLoop>(
next_offset(), {}, jump_targets_[target].block_ptr(),
relative_jump_bytecode_offset, loop_offset, feedback_slot);
loop_offset, feedback_slot)
: FinishBlock<JumpLoop>(next_offset(), {},
jump_targets_[target].block_ptr(),
loop_offset, feedback_slot);
merge_states_[target]->MergeLoop(*compilation_unit_,
current_interpreter_frame_, block, target);
......@@ -1402,9 +1402,11 @@ void MaglevGraphBuilder::VisitJumpLoop() {
void MaglevGraphBuilder::VisitJump() {
const uint32_t relative_jump_bytecode_offset =
iterator_.GetUnsignedImmediateOperand(0);
if (relative_jump_bytecode_offset > 0) {
AddNewNode<IncreaseInterruptBudget>({}, relative_jump_bytecode_offset);
}
BasicBlock* block = FinishBlock<Jump>(
next_offset(), {}, &jump_targets_[iterator_.GetJumpTargetOffset()],
relative_jump_bytecode_offset);
next_offset(), {}, &jump_targets_[iterator_.GetJumpTargetOffset()]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
DCHECK_LT(next_offset(), bytecode().length());
}
......@@ -1545,9 +1547,12 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(ReThrow)
void MaglevGraphBuilder::VisitReturn() {
// See also: InterpreterAssembler::UpdateInterruptBudgetOnReturn.
const uint32_t relative_jump_bytecode_offset = iterator_.current_offset();
if (relative_jump_bytecode_offset > 0) {
AddNewNode<ReduceInterruptBudget>({}, relative_jump_bytecode_offset);
}
if (!is_inline()) {
FinishBlock<Return>(next_offset(), {GetAccumulatorTagged()},
relative_jump_bytecode_offset);
FinishBlock<Return>(next_offset(), {GetAccumulatorTagged()});
return;
}
......@@ -1556,9 +1561,8 @@ void MaglevGraphBuilder::VisitReturn() {
// execution of the caller.
// TODO(leszeks): Consider shortcutting this Jump for cases where there is
// only one return and no need to merge return states.
BasicBlock* block =
FinishBlock<Jump>(next_offset(), {}, &jump_targets_[inline_exit_offset()],
relative_jump_bytecode_offset);
BasicBlock* block = FinishBlock<Jump>(next_offset(), {},
&jump_targets_[inline_exit_offset()]);
MergeIntoInlinedReturnFrameState(block);
}
MAGLEV_UNIMPLEMENTED_BYTECODE(ThrowReferenceErrorIfHole)
......
......@@ -75,6 +75,8 @@ class MaglevGraphVerifier {
case Opcode::kRegisterInput:
case Opcode::kRootConstant:
case Opcode::kSmiConstant:
case Opcode::kIncreaseInterruptBudget:
case Opcode::kReduceInterruptBudget:
// No input.
DCHECK_EQ(node->input_count(), 0);
break;
......
......@@ -1543,52 +1543,60 @@ void Construct::GenerateCode(MaglevCodeGenState* code_gen_state,
code_gen_state->DefineLazyDeoptPoint(lazy_deopt_info());
}
namespace {
void AttemptOnStackReplacement(MaglevCodeGenState* code_gen_state,
int32_t loop_depth, FeedbackSlot feedback_slot) {
// TODO(v8:7700): Implement me. See also
// InterpreterAssembler::OnStackReplacement.
void IncreaseInterruptBudget::AllocateVreg(
MaglevVregAllocationState* vreg_state) {
set_temporaries_needed(1);
}
void UpdateInterruptBudgetAndMaybeCallRuntime(
MaglevCodeGenState* code_gen_state, Register scratch,
int32_t relative_jump_bytecode_offset) {
// TODO(v8:7700): Remove once regalloc is fixed. See crrev.com/c/3625978.
__ Push(scratch);
void IncreaseInterruptBudget::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register scratch = temporaries().first();
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
__ addl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
Immediate(relative_jump_bytecode_offset));
// Only check the interrupt if the above add can drop the interrupt budget
// below zero.
if (relative_jump_bytecode_offset < 0) {
JumpToDeferredIf(
less, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label) {
// TODO(leszeks): Only save registers if they're not free (requires
// fixing the regalloc, same as for scratch).
__ PushCallerSaved(SaveFPRegsMode::kSave);
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck, 1);
__ PopCallerSaved(SaveFPRegsMode::kSave);
__ jmp(return_label);
});
}
Immediate(amount()));
}
void IncreaseInterruptBudget::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
os << "(" << amount() << ")";
}
// TODO(v8:7700): Remove once regalloc is fixed. See crrev.com/c/3625978.
__ Pop(scratch);
void ReduceInterruptBudget::AllocateVreg(
MaglevVregAllocationState* vreg_state) {
set_temporaries_needed(1);
}
void ReduceInterruptBudget::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register scratch = temporaries().first();
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
__ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
Immediate(amount()));
JumpToDeferredIf(
less, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label) {
// TODO(leszeks): Only save registers if they're not free (requires
// fixing the regalloc, same as for scratch).
__ PushCallerSaved(SaveFPRegsMode::kSave);
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Push(MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck, 1);
__ PopCallerSaved(SaveFPRegsMode::kSave);
__ jmp(return_label);
});
}
void ReduceInterruptBudget::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
os << "(" << amount() << ")";
}
void UpdateInterruptBudgetAndMaybeCallRuntime(
MaglevCodeGenState* code_gen_state, Register scratch,
base::Optional<uint32_t> relative_jump_bytecode_offset) {
if (!relative_jump_bytecode_offset.has_value()) return;
UpdateInterruptBudgetAndMaybeCallRuntime(
code_gen_state, scratch, relative_jump_bytecode_offset.value());
namespace {
void AttemptOnStackReplacement(MaglevCodeGenState* code_gen_state,
int32_t loop_depth, FeedbackSlot feedback_slot) {
// TODO(v8:7700): Implement me. See also
// InterpreterAssembler::OnStackReplacement.
}
} // namespace
......@@ -1603,13 +1611,6 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
// We're not going to continue execution, so we can use an arbitrary register
// here instead of relying on temporaries from the register allocator.
Register scratch = r8;
UpdateInterruptBudgetAndMaybeCallRuntime(code_gen_state, scratch,
relative_jump_bytecode_offset_);
// Read the formal number of parameters from the top level compilation unit
// (i.e. the outermost, non inlined function).
int formal_params_size = code_gen_state->compilation_info()
......@@ -1618,7 +1619,7 @@ void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
// We're not going to continue execution, so we can use an arbitrary register
// here instead of relying on temporaries from the register allocator.
Register actual_params_size = scratch;
Register actual_params_size = r8;
// Compute the size of the actual parameters + receiver (in bytes).
// TODO(leszeks): Consider making this an input into Return to re-use the
......@@ -1652,14 +1653,9 @@ void Deopt::GenerateCode(MaglevCodeGenState* code_gen_state,
EmitEagerDeopt(code_gen_state, this);
}
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state) {
set_temporaries_needed(1);
}
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
void Jump::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
UpdateInterruptBudgetAndMaybeCallRuntime(
code_gen_state, temporaries().PopFirst(), relative_jump_bytecode_offset_);
// Avoid emitting a jump to the next block.
if (target() != state.next_block()) {
__ jmp(target()->label());
......@@ -1679,29 +1675,19 @@ void JumpToInlined::PrintParams(std::ostream& os,
os << "(" << Brief(*unit()->shared_function_info().object()) << ")";
}
void JumpFromInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) {
set_temporaries_needed(1);
}
void JumpFromInlined::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
void JumpFromInlined::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
UpdateInterruptBudgetAndMaybeCallRuntime(
code_gen_state, temporaries().PopFirst(), relative_jump_bytecode_offset_);
// Avoid emitting a jump to the next block.
if (target() != state.next_block()) {
__ jmp(target()->label());
}
}
void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state) {
set_temporaries_needed(1);
}
void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
void JumpLoop::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
AttemptOnStackReplacement(code_gen_state, loop_depth_, feedback_slot_);
UpdateInterruptBudgetAndMaybeCallRuntime(code_gen_state,
temporaries().PopFirst(),
-relative_jump_bytecode_offset_);
__ jmp(target()->label());
}
......
......@@ -144,13 +144,15 @@ class CompactInterpreterFrameState;
V(ConstantGapMove) \
V(GapMove)
#define NODE_LIST(V) \
V(CheckMaps) \
V(CheckSmi) \
V(CheckHeapObject) \
V(CheckMapsWithMigration) \
V(StoreField) \
GAP_MOVE_NODE_LIST(V) \
#define NODE_LIST(V) \
V(CheckMaps) \
V(CheckSmi) \
V(CheckHeapObject) \
V(CheckMapsWithMigration) \
V(StoreField) \
V(IncreaseInterruptBudget) \
V(ReduceInterruptBudget) \
GAP_MOVE_NODE_LIST(V) \
VALUE_NODE_LIST(V)
#define CONDITIONAL_CONTROL_NODE_LIST(V) \
......@@ -2111,6 +2113,45 @@ class Construct : public ValueNodeT<Construct> {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class IncreaseInterruptBudget
: public FixedInputNodeT<0, IncreaseInterruptBudget> {
using Base = FixedInputNodeT<0, IncreaseInterruptBudget>;
public:
explicit IncreaseInterruptBudget(uint64_t bitfield, int amount)
: Base(bitfield), amount_(amount) {
DCHECK_GT(amount, 0);
}
int amount() const { return amount_; }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
const int amount_;
};
class ReduceInterruptBudget : public FixedInputNodeT<0, ReduceInterruptBudget> {
using Base = FixedInputNodeT<0, ReduceInterruptBudget>;
public:
explicit ReduceInterruptBudget(uint64_t bitfield, int amount)
: Base(bitfield), amount_(amount) {
DCHECK_GT(amount, 0);
}
int amount() const { return amount_; }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
const int amount_;
};
// Represents either a direct BasicBlock pointer, or an entry in a list of
// unresolved BasicBlockRefs which will be mutated (in place) at some point into
// direct BasicBlock pointers.
......@@ -2319,37 +2360,27 @@ class Jump : public UnconditionalControlNodeT<Jump> {
using Base = UnconditionalControlNodeT<Jump>;
public:
Jump(uint64_t bitfield, BasicBlockRef* target_refs,
base::Optional<uint32_t> relative_jump_bytecode_offset = {})
: Base(bitfield, target_refs),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset) {}
Jump(uint64_t bitfield, BasicBlockRef* target_refs)
: Base(bitfield, target_refs) {}
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private:
// For maintaining the interrupt_budget.
const base::Optional<uint32_t> relative_jump_bytecode_offset_;
};
class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
using Base = UnconditionalControlNodeT<JumpLoop>;
public:
explicit JumpLoop(uint64_t bitfield, BasicBlock* target,
uint32_t relative_jump_bytecode_offset, int32_t loop_depth,
explicit JumpLoop(uint64_t bitfield, BasicBlock* target, int32_t loop_depth,
FeedbackSlot feedback_slot)
: Base(bitfield, target),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset),
loop_depth_(loop_depth),
feedback_slot_(feedback_slot) {}
explicit JumpLoop(uint64_t bitfield, BasicBlockRef* ref,
uint32_t relative_jump_bytecode_offset, int32_t loop_depth,
explicit JumpLoop(uint64_t bitfield, BasicBlockRef* ref, int32_t loop_depth,
FeedbackSlot feedback_slot)
: Base(bitfield, ref),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset),
loop_depth_(loop_depth),
feedback_slot_(feedback_slot) {}
......@@ -2358,8 +2389,6 @@ class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private:
// For maintaining the interrupt_budget.
const uint32_t relative_jump_bytecode_offset_;
// For OSR.
const int32_t loop_depth_;
const FeedbackSlot feedback_slot_;
......@@ -2387,27 +2416,17 @@ class JumpFromInlined : public UnconditionalControlNodeT<JumpFromInlined> {
using Base = UnconditionalControlNodeT<JumpFromInlined>;
public:
explicit JumpFromInlined(
uint64_t bitfield, BasicBlockRef* target_refs,
base::Optional<uint32_t> relative_jump_bytecode_offset = {})
: Base(bitfield, target_refs),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset) {}
explicit JumpFromInlined(uint64_t bitfield, BasicBlockRef* target_refs)
: Base(bitfield, target_refs) {}
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private:
// For maintaining the interrupt_budget.
const base::Optional<uint32_t> relative_jump_bytecode_offset_;
};
class Return : public ControlNode {
public:
explicit Return(uint64_t bitfield,
base::Optional<uint32_t> relative_jump_bytecode_offset = {})
: ControlNode(bitfield),
relative_jump_bytecode_offset_(relative_jump_bytecode_offset) {
explicit Return(uint64_t bitfield) : ControlNode(bitfield) {
DCHECK_EQ(NodeBase::opcode(), opcode_of<Return>);
}
......@@ -2416,10 +2435,6 @@ class Return : public ControlNode {
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private:
// For maintaining the interrupt_budget.
const base::Optional<uint32_t> relative_jump_bytecode_offset_;
};
class Deopt : public ControlNode {
......
......@@ -682,59 +682,90 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
BasicBlock* block) {
current_node_ = node;
// We first allocate fixed inputs (including fixed temporaries), then inject
// phis (because these may be fixed too), and finally arbitrary inputs and
// temporaries.
for (Input& input : *node) AssignFixedInput(input);
AssignFixedTemporaries(node);
// Control nodes can't lazy deopt at the moment.
DCHECK(!node->properties().can_lazy_deopt());
if (node->Is<JumpToInlined>()) {
// Do nothing.
// TODO(leszeks): DCHECK any useful invariants here.
DCHECK(node->temporaries().is_empty());
DCHECK_EQ(node->num_temporaries_needed(), 0);
DCHECK_EQ(node->input_count(), 0);
DCHECK_EQ(node->properties(), OpProperties(0));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_));
}
} else if (node->Is<Deopt>()) {
// No fixed temporaries.
DCHECK(node->temporaries().is_empty());
DCHECK_EQ(node->num_temporaries_needed(), 0);
DCHECK_EQ(node->input_count(), 0);
DCHECK_EQ(node->properties(), OpProperties::EagerDeopt());
UpdateUse(*node->eager_deopt_info());
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_));
}
} else if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
// No fixed temporaries.
DCHECK(node->temporaries().is_empty());
DCHECK_EQ(node->num_temporaries_needed(), 0);
DCHECK_EQ(node->input_count(), 0);
DCHECK(!node->properties().can_eager_deopt());
DCHECK(!node->properties().can_lazy_deopt());
// Initialize phis before assigning inputs, in case one of the inputs
// conflicts with a fixed phi.
InitializeBranchTargetPhis(block->predecessor_id(),
unconditional->target());
}
for (Input& input : *node) AssignArbitraryRegisterInput(input);
AssignArbitraryTemporaries(node);
VerifyInputs(node);
if (node->properties().can_eager_deopt()) {
UpdateUse(*node->eager_deopt_info());
}
for (Input& input : *node) UpdateUse(&input);
DCHECK(!node->properties().is_call());
if (node->properties().is_call()) SpillAndClearRegisters();
DCHECK_EQ(general_registers_.free() | node->temporaries(),
general_registers_.free());
general_registers_.clear_blocked();
double_registers_.clear_blocked();
VerifyRegisterState();
general_registers_.clear_blocked();
double_registers_.clear_blocked();
VerifyRegisterState();
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_));
}
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_));
}
// Finally, initialize the merge states of branch targets, including the
// fallthrough, with the final state after all allocation
if (node->Is<JumpToInlined>()) {
// Do nothing.
// TODO(leszeks): DCHECK any useful invariants here.
} else if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
// Merge register values. Values only flowing into phis and not being
// independently live will be killed as part of the merge.
MergeRegisterValues(unconditional, unconditional->target(),
block->predecessor_id());
} else if (auto conditional = node->TryCast<ConditionalControlNode>()) {
InitializeConditionalBranchTarget(conditional, conditional->if_true());
InitializeConditionalBranchTarget(conditional, conditional->if_false());
} else {
DCHECK(node->Is<ConditionalControlNode>() || node->Is<Return>());
AssignInputs(node);
VerifyInputs(node);
DCHECK(!node->properties().can_eager_deopt());
for (Input& input : *node) UpdateUse(&input);
DCHECK(!node->properties().can_lazy_deopt());
if (node->properties().is_call()) SpillAndClearRegisters();
DCHECK_EQ(general_registers_.free() | node->temporaries(),
general_registers_.free());
general_registers_.clear_blocked();
double_registers_.clear_blocked();
VerifyRegisterState();
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(node,
ProcessingState(compilation_info_, block_it_));
}
// Finally, initialize the merge states of branch targets, including the
// fallthrough, with the final state after all allocation
if (auto conditional = node->TryCast<ConditionalControlNode>()) {
InitializeConditionalBranchTarget(conditional, conditional->if_true());
InitializeConditionalBranchTarget(conditional, conditional->if_false());
}
}
VerifyRegisterState();
......@@ -906,7 +937,7 @@ void StraightForwardRegisterAllocator::AssignArbitraryRegisterInput(
}
}
void StraightForwardRegisterAllocator::AssignInputs(Node* node) {
void StraightForwardRegisterAllocator::AssignInputs(NodeBase* node) {
// We allocate arbitrary register inputs after fixed inputs, since the fixed
// inputs may clobber the arbitrarily chosen ones.
for (Input& input : *node) AssignFixedInput(input);
......
......@@ -143,7 +143,7 @@ class StraightForwardRegisterAllocator {
void AllocateNodeResult(ValueNode* node);
void AssignFixedInput(Input& input);
void AssignArbitraryRegisterInput(Input& input);
void AssignInputs(Node* node);
void AssignInputs(NodeBase* node);
void AssignFixedTemporaries(NodeBase* node);
void AssignArbitraryTemporaries(NodeBase* node);
void TryAllocateToInput(Phi* phi);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment