Commit 7a93bd64 authored by Jakob Kummerow's avatar Jakob Kummerow Committed by V8 LUCI CQ

[wasm] Execution budget based dynamic tiering

Temporarily behind a new flag: --new-wasm-dynamic-tiering
The plan is to merge this into the existing --wasm-dynamic-tiering
flag once it's been confirmed to be generally beneficial.

Bug: v8:12281
Change-Id: I191d03170f8d5360073a45fea170f432074f7534
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3247632Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77620}
parent 39d58fd9
......@@ -994,6 +994,12 @@ DEFINE_BOOL(wasm_tier_up, true,
"have an effect)")
DEFINE_BOOL(wasm_dynamic_tiering, false,
"enable dynamic tier up to the optimizing compiler")
DEFINE_BOOL(new_wasm_dynamic_tiering, false, "dynamic tier up (new impl)")
// For dynamic tiering to have an effect, we have to turn off eager tierup.
// This is handled in module-compiler.cc for --wasm-dynamic-tiering.
DEFINE_NEG_IMPLICATION(new_wasm_dynamic_tiering, wasm_tier_up)
DEFINE_INT(wasm_tiering_budget, 1800000,
"budget for dynamic tiering (rough approximation of bytes executed")
DEFINE_INT(
wasm_caching_threshold, 1000000,
"the amount of wasm top tier code that triggers the next caching event")
......@@ -1100,7 +1106,6 @@ DEFINE_BOOL(trace_wasm_speculative_inlining, false,
"trace wasm speculative inlining")
DEFINE_IMPLICATION(wasm_speculative_inlining, experimental_wasm_typed_funcref)
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_inlining)
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_dynamic_tiering)
DEFINE_NEG_IMPLICATION(wasm_speculative_inlining, wasm_tier_up)
DEFINE_BOOL(wasm_loop_unrolling, true,
"enable loop unrolling for wasm functions")
......
......@@ -292,6 +292,16 @@ RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
if (FLAG_new_wasm_dynamic_tiering) {
// We're reusing this interrupt mechanism to interrupt long-running loops.
StackLimitCheck check(isolate);
DCHECK(!check.JsHasOverflowed());
if (check.InterruptRequested()) {
Object result = isolate->stack_guard()->HandleInterrupts();
if (result.IsException()) return result;
}
}
FrameFinder<WasmFrame> frame_finder(isolate);
int func_index = frame_finder.frame()->function_index();
auto* native_module = instance->module_object().native_module();
......
......@@ -58,11 +58,12 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// -1 | 0xa: WASM |
// -2 | instance |
// -3 | feedback vector |
// -4 | tiering budget |
// -----+--------------------+---------------------------
// -4 | slot 0 (high) | ^
// -5 | slot 0 (low) | |
// -6 | slot 1 (high) | Frame slots
// -7 | slot 1 (low) | |
// -5 | slot 0 (high) | ^
// -6 | slot 0 (low) | |
// -7 | slot 1 (high) | Frame slots
// -8 | slot 1 (low) | |
// | | v
// -----+--------------------+ <-- stack ptr (sp)
//
......@@ -70,6 +71,7 @@ static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize,
"Slot size should be twice the size of the 32 bit pointer.");
constexpr int kInstanceOffset = 2 * kSystemPointerSize;
constexpr int kFeedbackVectorOffset = 3 * kSystemPointerSize;
constexpr int kTierupBudgetOffset = 4 * kSystemPointerSize;
// kPatchInstructionsRequired sets a maximum limit of how many instructions that
// PatchPrepareStackFrame will use in order to increase the stack appropriately.
// Three instructions are required to sub a large constant, movw + movt + sub.
......@@ -559,7 +561,7 @@ void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kFeedbackVectorOffset;
return liftoff::kTierupBudgetOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
......@@ -2239,6 +2241,13 @@ void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
b(label, cond);
}
void LiftoffAssembler::emit_i32_subi_jump_negative(Register value,
int subtrahend,
Label* result_negative) {
sub(value, value, Operand(subtrahend), SetCC);
b(result_negative, mi);
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
clz(dst, src);
mov(dst, Operand(dst, LSR, kRegSizeInBitsLog2));
......
......@@ -58,9 +58,10 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// -1 | 0xa: WASM |
// -2 | instance |
// -3 | feedback vector|
// -4 | tiering budget |
// -----+--------------------+---------------------------
// -4 | slot 0 | ^
// -5 | slot 1 | |
// -5 | slot 0 | ^
// -6 | slot 1 | |
// | | Frame slots
// | | |
// | | v
......@@ -70,6 +71,7 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
constexpr int kInstanceOffset = 2 * kSystemPointerSize;
constexpr int kFeedbackVectorOffset = 3 * kSystemPointerSize;
constexpr int kTierupBudgetOffset = 4 * kSystemPointerSize;
inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
......@@ -386,7 +388,7 @@ void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kFeedbackVectorOffset;
return liftoff::kTierupBudgetOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
......@@ -1590,6 +1592,13 @@ void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
B(label, cond);
}
void LiftoffAssembler::emit_i32_subi_jump_negative(Register value,
int subtrahend,
Label* result_negative) {
Subs(value.W(), value.W(), Immediate(subtrahend));
B(result_negative, mi);
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
Cmp(src.W(), wzr);
Cset(dst.W(), eq);
......
......@@ -52,6 +52,7 @@ inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
// ebp-4 holds the stack marker, ebp-8 is the instance parameter.
constexpr int kInstanceOffset = 8;
constexpr int kFeedbackVectorOffset = 12; // ebp-12 is the feedback vector.
constexpr int kTierupBudgetOffset = 16; // ebp-16 is the tiering budget.
inline Operand GetStackSlot(int offset) { return Operand(ebp, -offset); }
......@@ -308,7 +309,7 @@ void LiftoffAssembler::AbortCompilation() {}
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kFeedbackVectorOffset;
return liftoff::kTierupBudgetOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
......@@ -2488,6 +2489,13 @@ void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
j(cond, label);
}
void LiftoffAssembler::emit_i32_subi_jump_negative(Register value,
int subtrahend,
Label* result_negative) {
sub(value, Immediate(subtrahend));
j(negative, result_negative);
}
namespace liftoff {
// Setcc into dst register, given a scratch byte register (might be the same as
......
......@@ -1012,6 +1012,8 @@ class LiftoffAssembler : public TurboAssembler {
Register lhs, Register rhs = no_reg);
inline void emit_i32_cond_jumpi(LiftoffCondition, Label*, Register lhs,
int imm);
inline void emit_i32_subi_jump_negative(Register value, int subtrahend,
Label* result_negative);
// Set {dst} to 1 if condition holds, 0 otherwise.
inline void emit_i32_eqz(Register dst, Register src);
inline void emit_i32_set_cond(LiftoffCondition, Register dst, Register lhs,
......
......@@ -129,6 +129,12 @@ compiler::CallDescriptor* GetLoweredCallDescriptor(
: call_desc;
}
constexpr LiftoffRegList GetGpParamRegisters() {
LiftoffRegList registers;
for (auto reg : kGpParamRegisters) registers.set(reg);
return registers;
}
constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
switch (opcode) {
case kExprI32Eq:
......@@ -454,6 +460,24 @@ class LiftoffCompiler {
debug_sidetable_entry_builder // debug_side_table_entry_builder
};
}
static OutOfLineCode TierupCheck(
WasmCodePosition pos, LiftoffRegList regs_to_save,
Register cached_instance, SpilledRegistersForInspection* spilled_regs,
OutOfLineSafepointInfo* safepoint_info,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
return {
{}, // label
{}, // continuation,
WasmCode::kWasmTriggerTierUp, // stub
pos, // position
regs_to_save, // regs_to_save
cached_instance, // cached_instance
safepoint_info, // safepoint_info
0, // pc
spilled_regs, // spilled_registers
debug_sidetable_entry_builder // debug_side_table_entry_builder
};
}
};
LiftoffCompiler(compiler::CallDescriptor* call_descriptor,
......@@ -696,6 +720,52 @@ class LiftoffCompiler {
__ bind(ool.continuation.get());
}
void TierupCheck(FullDecoder* decoder, WasmCodePosition position,
int budget_used) {
CODE_COMMENT("tierup check");
// We never want to blow the entire budget at once.
const int kMax = FLAG_wasm_tiering_budget / 4;
if (budget_used > kMax) budget_used = kMax;
LiftoffRegister budget_reg = __ GetUnusedRegister(kGpReg, {});
__ Fill(budget_reg, liftoff::kTierupBudgetOffset, ValueKind::kI32);
LiftoffRegList regs_to_save = __ cache_state()->used_registers;
// The cached instance will be reloaded separately.
if (__ cache_state()->cached_instance != no_reg) {
DCHECK(regs_to_save.has(__ cache_state()->cached_instance));
regs_to_save.clear(__ cache_state()->cached_instance);
}
SpilledRegistersForInspection* spilled_regs = nullptr;
OutOfLineSafepointInfo* safepoint_info =
compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
__ cache_state()->GetTaggedSlotsForOOLCode(
&safepoint_info->slots, &safepoint_info->spills,
for_debugging_
? LiftoffAssembler::CacheState::SpillLocation::kStackSlots
: LiftoffAssembler::CacheState::SpillLocation::kTopOfStack);
if (V8_UNLIKELY(for_debugging_)) {
// When debugging, we do not just push all registers to the stack, but we
// spill them to their proper stack locations such that we can inspect
// them.
// The only exception is the cached memory start, which we just push
// before the tierup check and pop afterwards.
regs_to_save = {};
if (__ cache_state()->cached_mem_start != no_reg) {
regs_to_save.set(__ cache_state()->cached_mem_start);
}
spilled_regs = GetSpilledRegistersForInspection();
}
out_of_line_code_.push_back(OutOfLineCode::TierupCheck(
position, regs_to_save, __ cache_state()->cached_instance, spilled_regs,
safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
OutOfLineCode& ool = out_of_line_code_.back();
__ emit_i32_subi_jump_negative(budget_reg.gp(), budget_used,
ool.label.get());
__ Spill(liftoff::kTierupBudgetOffset, budget_reg, ValueKind::kI32);
__ bind(ool.continuation.get());
}
bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
int actual_locals = __ num_locals() - num_params;
DCHECK_LE(0, actual_locals);
......@@ -759,12 +829,12 @@ class LiftoffCompiler {
.AsRegister()));
__ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
// Load the feedback vector and cache it in a stack slot.
constexpr LiftoffRegList parameter_registers = GetGpParamRegisters();
if (FLAG_wasm_speculative_inlining) {
int declared_func_index =
func_index_ - env_->module->num_imported_functions;
DCHECK_GE(declared_func_index, 0);
LiftoffRegList pinned;
for (auto reg : kGpParamRegisters) pinned.set(reg);
LiftoffRegList pinned = parameter_registers;
LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadTaggedPointerFromInstance(
tmp.gp(), kWasmInstanceRegister,
......@@ -777,6 +847,18 @@ class LiftoffCompiler {
} else {
__ Spill(liftoff::kFeedbackVectorOffset, WasmValue::ForUintPtr(0));
}
if (FLAG_new_wasm_dynamic_tiering) {
LiftoffRegList pinned = parameter_registers;
LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LOAD_INSTANCE_FIELD(tmp.gp(), NumLiftoffFunctionCallsArray,
kSystemPointerSize, pinned);
uint32_t offset =
kInt32Size * declared_function_index(env_->module, func_index_);
__ Load(tmp, tmp.gp(), no_reg, offset, LoadType::kI32Load, pinned);
__ Spill(liftoff::kTierupBudgetOffset, tmp, ValueKind::kI32);
} else {
__ Spill(liftoff::kTierupBudgetOffset, WasmValue::ForUintPtr(0));
}
if (for_debugging_) __ ResetOSRTarget();
// Process parameters.
......@@ -885,6 +967,7 @@ class LiftoffCompiler {
(std::string("OOL: ") + GetRuntimeStubName(ool->stub)).c_str());
__ bind(ool->label.get());
const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard;
const bool is_tierup = ool->stub == WasmCode::kWasmTriggerTierUp;
// Only memory OOB traps need a {pc}, but not unconditionally. Static OOB
// accesses do not need protected instruction information, hence they also
......@@ -945,17 +1028,22 @@ class LiftoffCompiler {
__ RecordSpillsInSafepoint(safepoint, gp_regs,
ool->safepoint_info->spills, index);
}
if (is_tierup) {
// Reset the budget.
__ Spill(liftoff::kTierupBudgetOffset,
WasmValue(FLAG_wasm_tiering_budget));
}
DCHECK_EQ(!debug_sidetable_builder_, !ool->debug_sidetable_entry_builder);
if (V8_UNLIKELY(ool->debug_sidetable_entry_builder)) {
ool->debug_sidetable_entry_builder->set_pc_offset(__ pc_offset());
}
DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check);
DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check || is_tierup);
if (is_stack_check) {
MaybeOSR();
}
if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save);
if (is_stack_check) {
if (is_stack_check || is_tierup) {
if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
DCHECK(for_debugging_);
for (auto& entry : ool->spilled_registers->entries) {
......@@ -1139,9 +1227,13 @@ class LiftoffCompiler {
PushControl(loop);
// Execute a stack check in the loop header.
if (!FLAG_new_wasm_dynamic_tiering) {
// When the budget-based tiering mechanism is enabled, use that to
// check for interrupt requests; otherwise execute a stack check in the
// loop header.
StackCheck(decoder, decoder->position());
}
}
void Try(FullDecoder* decoder, Control* block) {
block->try_info = std::make_unique<TryInfo>();
......@@ -2186,8 +2278,23 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(int64_t));
}
void TierupCheckOnExit(FullDecoder* decoder) {
if (!FLAG_new_wasm_dynamic_tiering) return;
TierupCheck(decoder, decoder->position(), __ pc_offset());
LiftoffRegList pinned;
LiftoffRegister budget = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister array = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LOAD_INSTANCE_FIELD(array.gp(), NumLiftoffFunctionCallsArray,
kSystemPointerSize, pinned);
uint32_t offset =
kInt32Size * declared_function_index(env_->module, func_index_);
__ Fill(budget, liftoff::kTierupBudgetOffset, ValueKind::kI32);
__ Store(array.gp(), no_reg, offset, budget, StoreType::kI32Store, pinned);
}
void DoReturn(FullDecoder* decoder, uint32_t /* drop_values */) {
if (FLAG_trace_wasm) TraceFunctionExit(decoder);
TierupCheckOnExit(decoder);
size_t num_returns = decoder->sig_->return_count();
if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
__ LeaveFrame(StackFrame::WASM);
......@@ -2518,12 +2625,24 @@ class LiftoffCompiler {
__ PushRegister(kind, dst);
}
void BrImpl(Control* target) {
void BrImpl(FullDecoder* decoder, Control* target) {
if (!target->br_merge()->reached) {
target->label_state.InitMerge(
*__ cache_state(), __ num_locals(), target->br_merge()->arity,
target->stack_depth + target->num_exceptions);
}
if (FLAG_new_wasm_dynamic_tiering) {
if (target->is_loop()) {
DCHECK(target->label.get()->is_bound());
int jump_distance = __ pc_offset() - target->label.get()->pos();
TierupCheck(decoder, decoder->position(), jump_distance);
} else {
// To estimate time spent in this function more accurately, we could
// increment the tiering budget on forward jumps. However, we don't
// know the jump distance yet; using a blanket value has been tried
// and found to not make a difference.
}
}
__ MergeStackWith(target->label_state, target->br_merge()->arity,
target->is_loop() ? LiftoffAssembler::kBackwardJump
: LiftoffAssembler::kForwardJump);
......@@ -2535,7 +2654,7 @@ class LiftoffCompiler {
if (depth == decoder->control_depth() - 1) {
DoReturn(decoder, 0);
} else {
BrImpl(decoder->control_at(depth));
BrImpl(decoder, decoder->control_at(depth));
}
}
......@@ -3259,18 +3378,21 @@ class LiftoffCompiler {
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
TierupCheckOnExit(decoder);
CallDirect(decoder, imm, args, nullptr, kTailCall);
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
TierupCheckOnExit(decoder);
CallIndirect(decoder, index_val, imm, kTailCall);
}
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
TierupCheckOnExit(decoder);
CallRef(decoder, func_ref.type, sig, kTailCall);
}
......
......@@ -350,12 +350,14 @@ class LiftoffRegList {
constexpr LiftoffRegList() = default;
Register set(Register reg) { return set(LiftoffRegister(reg)).gp(); }
DoubleRegister set(DoubleRegister reg) {
constexpr Register set(Register reg) {
return set(LiftoffRegister(reg)).gp();
}
constexpr DoubleRegister set(DoubleRegister reg) {
return set(LiftoffRegister(reg)).fp();
}
LiftoffRegister set(LiftoffRegister reg) {
constexpr LiftoffRegister set(LiftoffRegister reg) {
if (reg.is_pair()) {
regs_ |= storage_t{1} << reg.low().liftoff_code();
regs_ |= storage_t{1} << reg.high().liftoff_code();
......
......@@ -66,6 +66,7 @@ static_assert((kLiftoffAssemblerFpCacheRegs &
// rbp-8 holds the stack marker, rbp-16 is the instance parameter.
constexpr int kInstanceOffset = 16;
constexpr int kFeedbackVectorOffset = 24; // rbp-24 is the feedback vector.
constexpr int kTierupBudgetOffset = 32; // rbp-32 is the feedback vector.
inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); }
......@@ -2161,6 +2162,13 @@ void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
j(cond, label);
}
void LiftoffAssembler::emit_i32_subi_jump_negative(Register value,
int subtrahend,
Label* result_negative) {
subl(value, Immediate(subtrahend));
j(negative, result_negative);
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
testl(src, src);
setcc(equal, dst);
......
......@@ -1326,6 +1326,23 @@ void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
kNoDebugging};
const WasmModule* module = native_module->module();
size_t priority;
if (FLAG_new_wasm_dynamic_tiering) {
base::MutexGuard mutex_guard(&module->type_feedback.mutex);
int saved_priority =
module->type_feedback.feedback_for_function[func_index].tierup_priority;
saved_priority++;
module->type_feedback.feedback_for_function[func_index].tierup_priority =
saved_priority;
// Continue to creating a compilation unit if this is the first time
// we detect this function as hot, and create a new higher-priority unit
// if the number of tierup checks is a power of two (at least 4).
if (saved_priority > 1 &&
(saved_priority < 4 || (saved_priority & (saved_priority - 1)) != 0)) {
return;
}
priority = saved_priority;
}
if (FLAG_wasm_speculative_inlining) {
auto feedback = ProcessTypeFeedback(isolate, instance, func_index);
base::MutexGuard mutex_guard(&module->type_feedback.mutex);
......@@ -1336,11 +1353,11 @@ void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
std::move(feedback);
}
if (!FLAG_new_wasm_dynamic_tiering) {
uint32_t* call_array = native_module->num_liftoff_function_calls_array();
int offset = wasm::declared_function_index(module, func_index);
size_t priority =
base::Relaxed_Load(reinterpret_cast<int*>(&call_array[offset]));
priority = base::Relaxed_Load(reinterpret_cast<int*>(&call_array[offset]));
}
compilation_state->AddTopTierPriorityCompilationUnit(tiering_unit, priority);
}
......
......@@ -1000,11 +1000,16 @@ NativeModule::NativeModule(const WasmFeatures& enabled,
num_liftoff_function_calls_ =
std::make_unique<uint32_t[]>(module_->num_declared_functions);
if (FLAG_new_wasm_dynamic_tiering) {
std::fill_n(num_liftoff_function_calls_.get(),
module_->num_declared_functions, FLAG_wasm_tiering_budget);
} else {
// Start counter at 4 to avoid runtime calls for smaller numbers.
constexpr int kCounterStart = 4;
std::fill_n(num_liftoff_function_calls_.get(),
module_->num_declared_functions, kCounterStart);
}
}
// Even though there cannot be another thread using this object (since we are
// just constructing it), we need to hold the mutex to fulfill the
// precondition of {WasmCodeAllocator::Init}, which calls
......
......@@ -161,7 +161,7 @@ constexpr int kAnonymousFuncIndex = -1;
constexpr uint32_t kGenericWrapperBudget = 1000;
#if V8_TARGET_ARCH_X64
constexpr int32_t kOSRTargetOffset = 4 * kSystemPointerSize;
constexpr int32_t kOSRTargetOffset = 5 * kSystemPointerSize;
#endif
} // namespace wasm
......
......@@ -269,6 +269,7 @@ struct CallSiteFeedback {
struct FunctionTypeFeedback {
std::vector<CallSiteFeedback> feedback_vector;
std::map<WasmCodePosition, int> positions;
int tierup_priority = 0;
};
struct TypeFeedbackStorage {
std::map<uint32_t, FunctionTypeFeedback> feedback_for_function;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment