Commit 1f215c20 authored by Jakob Kummerow's avatar Jakob Kummerow Committed by V8 LUCI CQ

[wasm-gc] Polymorphic inlining for call_ref

When call_ref has seen more than one call target, we now support
inlining all of them (constrained by budget/heuristics).

Bug: v8:7748,v8:12166
Change-Id: Iae16e74da1bad5e7a117f70efb6c61b3f39f832c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3650607Reviewed-by: 's avatarManos Koukoutos <manoskouk@chromium.org>
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80602}
parent ea07528c
......@@ -52,6 +52,8 @@ const kAnyTableType: constexpr int31
generates 'wasm::kWasmAnyRef.raw_bit_field()';
const kAnyNonNullTableType: constexpr int31
generates 'wasm::kWasmAnyNonNullableRef.raw_bit_field()';
const kMaxPolymorphism:
constexpr int31 generates 'wasm::kMaxPolymorphism';
extern macro WasmBuiltinsAssembler::LoadInstanceFromFrame(): WasmInstanceObject;
......@@ -496,7 +498,7 @@ macro GetTargetAndInstance(funcref: WasmInternalFunction): TargetAndInstance {
// - monomorphic: (funcref, count (smi)). The second slot is a counter for how
// often the funcref in the first slot has been seen.
// - polymorphic: (fixed_array, <unused>). In this case, the array
// contains 2..4 pairs (funcref, count (smi)) (like monomorphic data).
// contains 2..kMaxPolymorphism pairs (funcref, count (smi))
// - megamorphic: ("megamorphic" sentinel, <unused>)
//
// TODO(rstz): The counter might overflow if it exceeds the range of a Smi.
......@@ -533,7 +535,8 @@ builtin CallRefIC(
} else if (Is<FixedArray>(value)) {
// Polymorphic miss.
const entries = UnsafeCast<FixedArray>(value);
if (entries.length == SmiConstant(8)) { // 4 entries, 2 slots each.
const kMaxSlots = kMaxPolymorphism * 2; // 2 slots per entry.
if (entries.length == SmiConstant(kMaxSlots)) {
vector.objects[index] = ic::kMegamorphicSymbol;
vector.objects[index + 1] = ic::kMegamorphicSymbol;
} else {
......
......@@ -2962,9 +2962,11 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* real_sig,
return call;
}
void WasmGraphBuilder::CompareToInternalFunctionAtIndex(
Node* func_ref, uint32_t function_index, Node** success_control,
Node** failure_control) {
void WasmGraphBuilder::CompareToInternalFunctionAtIndex(Node* func_ref,
uint32_t function_index,
Node** success_control,
Node** failure_control,
bool is_last_case) {
// Since we are comparing to a function reference, it is guaranteed that
// instance->wasm_internal_functions() has been initialized.
Node* internal_functions = gasm_->LoadImmutable(
......@@ -2974,8 +2976,9 @@ void WasmGraphBuilder::CompareToInternalFunctionAtIndex(
Node* function_ref_at_index = gasm_->LoadFixedArrayElement(
internal_functions, gasm_->IntPtrConstant(function_index),
MachineType::AnyTagged());
BranchHint hint = is_last_case ? BranchHint::kTrue : BranchHint::kNone;
gasm_->Branch(gasm_->TaggedEqual(function_ref_at_index, func_ref),
success_control, failure_control, BranchHint::kTrue);
success_control, failure_control, hint);
}
Node* WasmGraphBuilder::CallRef(const wasm::FunctionSig* real_sig,
......
......@@ -371,7 +371,8 @@ class WasmGraphBuilder {
void CompareToInternalFunctionAtIndex(Node* func_ref, uint32_t function_index,
Node** success_control,
Node** failure_control);
Node** failure_control,
bool is_last_case);
void BrOnNull(Node* ref_object, Node** non_null_node, Node** null_node);
......
......@@ -94,11 +94,12 @@ Reduction WasmInliner::ReduceCall(Node* call) {
}
bool SmallEnoughToInline(size_t current_graph_size, uint32_t candidate_size) {
if (WasmInliner::graph_size_allows_inlining(current_graph_size)) {
if (WasmInliner::graph_size_allows_inlining(current_graph_size +
candidate_size)) {
return true;
}
// For truly tiny functions, let's be a bit more generous.
return candidate_size < 10 &&
return candidate_size <= 12 &&
WasmInliner::graph_size_allows_inlining(current_graph_size - 100);
}
......
......@@ -58,8 +58,8 @@ class WasmInliner final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
void Finalize() final;
static bool graph_size_allows_inlining(size_t initial_graph_size) {
return initial_graph_size < 5000;
static bool graph_size_allows_inlining(size_t graph_size) {
return graph_size < FLAG_wasm_inlining_budget;
}
private:
......
......@@ -1108,11 +1108,8 @@ DEFINE_BOOL(wasm_math_intrinsics, true,
DEFINE_BOOL(
wasm_inlining, false,
"enable inlining of wasm functions into wasm functions (experimental)")
DEFINE_SIZE_T(
wasm_inlining_budget_factor, 75000,
"maximum allowed size to inline a function is given by {n / caller size}")
DEFINE_SIZE_T(wasm_inlining_max_size, 1000,
"maximum size of a function that can be inlined, in TF nodes")
DEFINE_SIZE_T(wasm_inlining_budget, 9000,
"maximum graph size (in TF nodes) that allows inlining more")
DEFINE_BOOL(wasm_speculative_inlining, false,
"enable speculative inlining of call_ref targets (experimental)")
DEFINE_BOOL(trace_wasm_inlining, false, "trace wasm inlining")
......
......@@ -127,8 +127,15 @@ class WasmGraphBuildingInterface {
base::MutexGuard mutex_guard(&feedbacks.mutex);
auto feedback = feedbacks.feedback_for_function.find(func_index_);
if (feedback != feedbacks.feedback_for_function.end()) {
// This creates a copy of the vector, which is cheaper than holding on
// to the mutex throughout graph building.
type_feedback_ = feedback->second.feedback_vector;
builder_->ReserveCallCounts(type_feedback_.size());
// Preallocate space for storing call counts to save Zone memory.
int total_calls = 0;
for (size_t i = 0; i < type_feedback_.size(); i++) {
total_calls += type_feedback_[i].num_cases();
}
builder_->ReserveCallCounts(static_cast<size_t>(total_calls));
// We need to keep the feedback in the module to inline later. However,
// this means we are stuck with it forever.
// TODO(jkummerow): Reconsider our options here.
......@@ -644,7 +651,8 @@ class WasmGraphBuildingInterface {
int maybe_call_count = -1;
if (FLAG_wasm_speculative_inlining && type_feedback_.size() > 0) {
const CallSiteFeedback& feedback = next_call_feedback();
maybe_call_count = feedback.absolute_call_frequency;
DCHECK_EQ(feedback.num_cases(), 1);
maybe_call_count = feedback.call_count(0);
}
DoCall(decoder, CallInfo::CallDirect(imm.index, maybe_call_count), imm.sig,
args, returns);
......@@ -656,7 +664,8 @@ class WasmGraphBuildingInterface {
int maybe_call_count = -1;
if (FLAG_wasm_speculative_inlining && type_feedback_.size() > 0) {
const CallSiteFeedback& feedback = next_call_feedback();
maybe_call_count = feedback.absolute_call_frequency;
DCHECK_EQ(feedback.num_cases(), 1);
maybe_call_count = feedback.call_count(0);
}
DoReturnCall(decoder, CallInfo::CallDirect(imm.index, maybe_call_count),
imm.sig, args);
......@@ -683,14 +692,11 @@ class WasmGraphBuildingInterface {
void CallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
int maybe_feedback = -1;
int maybe_call_count = -1;
const CallSiteFeedback* feedback = nullptr;
if (FLAG_wasm_speculative_inlining && type_feedback_.size() > 0) {
const CallSiteFeedback& feedback = next_call_feedback();
maybe_feedback = feedback.function_index;
maybe_call_count = feedback.absolute_call_frequency;
feedback = &next_call_feedback();
}
if (maybe_feedback == -1) {
if (feedback == nullptr || feedback->num_cases() == 0) {
DoCall(decoder, CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
sig, args, returns);
return;
......@@ -698,70 +704,80 @@ class WasmGraphBuildingInterface {
// Check for equality against a function at a specific index, and if
// successful, just emit a direct call.
DCHECK_GE(maybe_feedback, 0);
const uint32_t expected_function_index = maybe_feedback;
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call #%d: graph support for inlining target #%d]\n",
func_index_, feedback_instruction_index_ - 1,
expected_function_index);
}
TFNode* success_control;
TFNode* failure_control;
builder_->CompareToInternalFunctionAtIndex(
func_ref.node, expected_function_index, &success_control,
&failure_control);
TFNode* initial_effect = effect();
builder_->SetControl(success_control);
ssa_env_->control = success_control;
Value* returns_direct =
decoder->zone()->NewArray<Value>(sig->return_count());
DoCall(decoder,
CallInfo::CallDirect(expected_function_index, maybe_call_count),
decoder->module_->signature(sig_index), args, returns_direct);
TFNode* control_direct = control();
TFNode* effect_direct = effect();
builder_->SetEffectControl(initial_effect, failure_control);
ssa_env_->effect = initial_effect;
ssa_env_->control = failure_control;
int num_cases = feedback->num_cases();
std::vector<TFNode*> control_args;
std::vector<TFNode*> effect_args;
std::vector<Value*> returns_values;
control_args.reserve(num_cases + 1);
effect_args.reserve(num_cases + 2);
returns_values.reserve(num_cases);
for (int i = 0; i < num_cases; i++) {
const uint32_t expected_function_index = feedback->function_index(i);
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call #%d: graph support for inlining #%d]\n",
func_index_, feedback_instruction_index_ - 1,
expected_function_index);
}
TFNode* success_control;
TFNode* failure_control;
builder_->CompareToInternalFunctionAtIndex(
func_ref.node, expected_function_index, &success_control,
&failure_control, i == num_cases - 1);
TFNode* initial_effect = effect();
builder_->SetControl(success_control);
ssa_env_->control = success_control;
Value* returns_direct =
decoder->zone()->NewArray<Value>(sig->return_count());
DoCall(decoder,
CallInfo::CallDirect(expected_function_index,
feedback->call_count(i)),
decoder->module_->signature(sig_index), args, returns_direct);
control_args.push_back(control());
effect_args.push_back(effect());
returns_values.push_back(returns_direct);
builder_->SetEffectControl(initial_effect, failure_control);
ssa_env_->effect = initial_effect;
ssa_env_->control = failure_control;
}
Value* returns_ref = decoder->zone()->NewArray<Value>(sig->return_count());
DoCall(decoder, CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
sig, args, returns_ref);
TFNode* control_ref = control();
TFNode* effect_ref = effect();
TFNode* control_args[] = {control_direct, control_ref};
TFNode* control = builder_->Merge(2, control_args);
control_args.push_back(control());
TFNode* control = builder_->Merge(num_cases + 1, control_args.data());
TFNode* effect_args[] = {effect_direct, effect_ref, control};
TFNode* effect = builder_->EffectPhi(2, effect_args);
effect_args.push_back(effect());
effect_args.push_back(control);
TFNode* effect = builder_->EffectPhi(num_cases + 1, effect_args.data());
ssa_env_->control = control;
ssa_env_->effect = effect;
builder_->SetEffectControl(effect, control);
for (uint32_t i = 0; i < sig->return_count(); i++) {
TFNode* phi_args[] = {returns_direct[i].node, returns_ref[i].node,
control};
returns[i].node = builder_->Phi(sig->GetReturn(i), 2, phi_args);
std::vector<TFNode*> phi_args;
for (int j = 0; j < num_cases; j++) {
phi_args.push_back(returns_values[j][i].node);
}
phi_args.push_back(returns_ref[i].node);
phi_args.push_back(control);
returns[i].node =
builder_->Phi(sig->GetReturn(i), num_cases + 1, phi_args.data());
}
}
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
int maybe_feedback = -1;
int maybe_call_count = -1;
const CallSiteFeedback* feedback = nullptr;
if (FLAG_wasm_speculative_inlining && type_feedback_.size() > 0) {
const CallSiteFeedback& feedback = next_call_feedback();
maybe_feedback = feedback.function_index;
maybe_call_count = feedback.absolute_call_frequency;
feedback = &next_call_feedback();
}
if (maybe_feedback == -1) {
if (feedback == nullptr || feedback->num_cases() == 0) {
DoReturnCall(decoder,
CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
sig, args);
......@@ -770,32 +786,35 @@ class WasmGraphBuildingInterface {
// Check for equality against a function at a specific index, and if
// successful, just emit a direct call.
DCHECK_GE(maybe_feedback, 0);
const uint32_t expected_function_index = maybe_feedback;
int num_cases = feedback->num_cases();
for (int i = 0; i < num_cases; i++) {
const uint32_t expected_function_index = feedback->function_index(i);
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call #%d: graph support for inlining #%d]\n",
func_index_, feedback_instruction_index_ - 1,
expected_function_index);
}
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call #%d: graph support for inlining target #%d]\n",
func_index_, feedback_instruction_index_ - 1,
expected_function_index);
}
TFNode* success_control;
TFNode* failure_control;
builder_->CompareToInternalFunctionAtIndex(
func_ref.node, expected_function_index, &success_control,
&failure_control, i == num_cases - 1);
TFNode* initial_effect = effect();
TFNode* success_control;
TFNode* failure_control;
builder_->CompareToInternalFunctionAtIndex(
func_ref.node, expected_function_index, &success_control,
&failure_control);
TFNode* initial_effect = effect();
builder_->SetControl(success_control);
ssa_env_->control = success_control;
DoReturnCall(decoder,
CallInfo::CallDirect(expected_function_index,
feedback->call_count(i)),
sig, args);
builder_->SetControl(success_control);
ssa_env_->control = success_control;
DoReturnCall(
decoder,
CallInfo::CallDirect(expected_function_index, maybe_call_count), sig,
args);
builder_->SetEffectControl(initial_effect, failure_control);
ssa_env_->effect = initial_effect;
ssa_env_->control = failure_control;
}
builder_->SetEffectControl(initial_effect, failure_control);
ssa_env_->effect = initial_effect;
ssa_env_->control = failure_control;
DoReturnCall(decoder,
CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)), sig,
args);
......@@ -1464,7 +1483,7 @@ class WasmGraphBuildingInterface {
std::vector<compiler::WasmLoopInfo> loop_infos_;
InlinedStatus inlined_status_;
// The entries in {type_feedback_} are indexed by the position of feedback-
// consuming instructions (currently only call_ref).
// consuming instructions (currently only calls).
int feedback_instruction_index_ = 0;
std::vector<CallSiteFeedback> type_feedback_;
......
......@@ -1284,20 +1284,21 @@ class TransitiveTypeFeedbackProcessor {
private:
void Process(int func_index);
void EnqueueCallees(std::vector<CallSiteFeedback> feedback) {
void EnqueueCallees(const std::vector<CallSiteFeedback>& feedback) {
for (size_t i = 0; i < feedback.size(); i++) {
int func = feedback[i].function_index;
// Nothing to do for non-inlineable (e.g. megamorphic) calls.
if (func == -1) continue;
// Don't spend time on calls that have never been executed.
if (feedback[i].absolute_call_frequency == 0) continue;
// Don't recompute feedback that has already been processed.
auto existing = feedback_for_function_.find(func);
if (existing != feedback_for_function_.end() &&
existing->second.feedback_vector.size() > 0) {
continue;
const CallSiteFeedback& csf = feedback[i];
for (int j = 0; j < csf.num_cases(); j++) {
int func = csf.function_index(j);
// Don't spend time on calls that have never been executed.
if (csf.call_count(j) == 0) continue;
// Don't recompute feedback that has already been processed.
auto existing = feedback_for_function_.find(func);
if (existing != feedback_for_function_.end() &&
existing->second.feedback_vector.size() > 0) {
continue;
}
queue_.insert(func);
}
queue_.insert(func);
}
}
......@@ -1306,111 +1307,127 @@ class TransitiveTypeFeedbackProcessor {
std::unordered_set<int> queue_;
};
class FeedbackMaker {
public:
FeedbackMaker(WasmInstanceObject instance, int func_index_, int num_calls)
: instance_(instance),
num_imported_functions_(
static_cast<int>(instance.module()->num_imported_functions)),
targets_cache_(kMaxPolymorphism),
counts_cache_(kMaxPolymorphism) {
result_.reserve(num_calls);
}
void AddCandidate(Object maybe_function, int count) {
if (!maybe_function.IsWasmInternalFunction()) return;
WasmInternalFunction function = WasmInternalFunction::cast(maybe_function);
if (!WasmExportedFunction::IsWasmExportedFunction(function.external())) {
return;
}
WasmExportedFunction target =
WasmExportedFunction::cast(function.external());
if (target.instance() != instance_) return;
if (target.function_index() < num_imported_functions_) return;
AddCall(target.function_index(), count);
}
void AddCall(int target, int count) {
// Keep the cache sorted (using insertion-sort), highest count first.
int insertion_index = 0;
while (insertion_index < cache_usage_ &&
counts_cache_[insertion_index] >= count) {
insertion_index++;
}
for (int shifted_index = cache_usage_ - 1; shifted_index >= insertion_index;
shifted_index--) {
targets_cache_[shifted_index + 1] = targets_cache_[shifted_index];
counts_cache_[shifted_index + 1] = counts_cache_[shifted_index];
}
targets_cache_[insertion_index] = target;
counts_cache_[insertion_index] = count;
cache_usage_++;
}
void FinalizeCall() {
if (cache_usage_ == 0) {
result_.emplace_back();
} else if (cache_usage_ == 1) {
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call_ref #%zu inlineable (monomorphic)]\n",
func_index_, result_.size());
}
result_.emplace_back(targets_cache_[0], counts_cache_[0]);
} else {
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call_ref #%zu inlineable (polymorphic %d)]\n",
func_index_, result_.size(), cache_usage_);
}
CallSiteFeedback::PolymorphicCase* polymorphic =
new CallSiteFeedback::PolymorphicCase[cache_usage_];
for (int i = 0; i < cache_usage_; i++) {
polymorphic[i].function_index = targets_cache_[i];
polymorphic[i].absolute_call_frequency = counts_cache_[i];
}
result_.emplace_back(polymorphic, cache_usage_);
}
cache_usage_ = 0;
}
std::vector<CallSiteFeedback>&& GetResult() { return std::move(result_); }
private:
WasmInstanceObject instance_;
std::vector<CallSiteFeedback> result_;
int num_imported_functions_;
int func_index_;
int cache_usage_{0};
std::vector<int> targets_cache_;
std::vector<int> counts_cache_;
};
void TransitiveTypeFeedbackProcessor::Process(int func_index) {
int which_vector = declared_function_index(instance_->module(), func_index);
Object maybe_feedback = instance_->feedback_vectors().get(which_vector);
if (!maybe_feedback.IsFixedArray()) return;
FixedArray feedback = FixedArray::cast(maybe_feedback);
std::vector<CallSiteFeedback> result(feedback.length() / 2);
int imported_functions =
static_cast<int>(instance_->module()->num_imported_functions);
WasmModuleObject module_object = instance_->module_object();
const NativeModule* native_module = module_object.native_module();
const WasmModule* module = native_module->module();
const std::vector<uint32_t>& call_direct_targets(
module->type_feedback.feedback_for_function[func_index].call_targets);
FeedbackMaker fm(*instance_, func_index, feedback.length() / 2);
for (int i = 0; i < feedback.length(); i += 2) {
Object value = feedback.get(i);
if (value.IsWasmInternalFunction() &&
WasmExportedFunction::IsWasmExportedFunction(
WasmInternalFunction::cast(value).external())) {
// Monomorphic, and the internal function points to a wasm-generated
// external function (WasmExportedFunction). Mark the target for inlining
// if it's defined in the same module.
WasmExportedFunction target = WasmExportedFunction::cast(
WasmInternalFunction::cast(value).external());
if (target.instance() == *instance_ &&
target.function_index() >= imported_functions) {
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call_ref #%d inlineable (monomorphic)]\n",
func_index, i / 2);
}
int32_t count = Smi::cast(feedback.get(i + 1)).value();
result[i / 2] = {target.function_index(), count};
continue;
}
if (value.IsWasmInternalFunction()) {
// Monomorphic.
int count = Smi::cast(feedback.get(i + 1)).value();
fm.AddCandidate(value, count);
} else if (value.IsFixedArray()) {
// Polymorphic. Pick a target for inlining if there is one that was
// seen for most calls, and matches the requirements of the monomorphic
// case.
// Polymorphic.
FixedArray polymorphic = FixedArray::cast(value);
size_t total_count = 0;
for (int j = 0; j < polymorphic.length(); j += 2) {
total_count += Smi::cast(polymorphic.get(j + 1)).value();
}
int found_target = -1;
int found_count = -1;
double best_frequency = 0;
for (int j = 0; j < polymorphic.length(); j += 2) {
int32_t this_count = Smi::cast(polymorphic.get(j + 1)).value();
double frequency = static_cast<double>(this_count) / total_count;
if (frequency > best_frequency) best_frequency = frequency;
if (frequency < 0.8) continue;
// We reject this polymorphic entry if:
// - it is not defined,
// - it is not a wasm-defined function (WasmExportedFunction)
// - it was not defined in this module.
if (!polymorphic.get(j).IsWasmInternalFunction()) continue;
WasmInternalFunction internal =
WasmInternalFunction::cast(polymorphic.get(j));
if (!WasmExportedFunction::IsWasmExportedFunction(
internal.external())) {
continue;
}
WasmExportedFunction target =
WasmExportedFunction::cast(internal.external());
if (target.instance() != *instance_ ||
target.function_index() < imported_functions) {
continue;
}
found_target = target.function_index();
found_count = static_cast<int>(this_count);
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call_ref #%d inlineable (polymorphic %f)]\n",
func_index, i / 2, frequency);
}
break;
}
if (found_target >= 0) {
result[i / 2] = {found_target, found_count};
continue;
} else if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call_ref #%d: best frequency %f]\n", func_index,
i / 2, best_frequency);
Object function = polymorphic.get(j);
int count = Smi::cast(polymorphic.get(j + 1)).value();
fm.AddCandidate(function, count);
}
} else if (value.IsSmi()) {
// Uninitialized, or a direct call collecting call count.
uint32_t target = call_direct_targets[i / 2];
if (target != FunctionTypeFeedback::kNonDirectCall) {
int count = Smi::cast(value).value();
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call_direct #%d: frequency %d]\n", func_index,
i / 2, count);
}
result[i / 2] = {static_cast<int>(target), count};
continue;
fm.AddCall(static_cast<int>(target), count);
} else if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call #%d: uninitialized]\n", func_index, i / 2);
}
} else if (FLAG_trace_wasm_speculative_inlining) {
if (value ==
ReadOnlyRoots(instance_->GetIsolate()).megamorphic_symbol()) {
PrintF("[Function #%d call #%d: megamorphic]\n", func_index, i / 2);
}
}
// If we fall through to here, then this call isn't eligible for inlining.
// Possible reasons: uninitialized or megamorphic feedback; or monomorphic
// or polymorphic that didn't meet our requirements.
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call #%d *not* inlineable]\n", func_index, i / 2);
}
result[i / 2] = {-1, -1};
fm.FinalizeCall();
}
std::vector<CallSiteFeedback> result(fm.GetResult());
EnqueueCallees(result);
feedback_for_function_[func_index].feedback_vector = std::move(result);
}
......
......@@ -186,6 +186,9 @@ constexpr uint32_t kGenericWrapperBudget = 1000;
// gives up some module size for faster access to the supertypes.
constexpr uint32_t kMinimumSupertypeArraySize = 3;
// Maximum number of call targets tracked per call.
constexpr int kMaxPolymorphism = 4;
#if V8_TARGET_ARCH_X64
constexpr int32_t kOSRTargetOffset = 5 * kSystemPointerSize;
#endif
......
......@@ -399,9 +399,79 @@ struct V8_EXPORT_PRIVATE WasmDebugSymbols {
WireBytesRef external_url;
};
struct CallSiteFeedback {
int function_index;
int absolute_call_frequency;
class CallSiteFeedback {
public:
struct PolymorphicCase {
int function_index;
int absolute_call_frequency;
};
// Regular constructor: uninitialized/unknown, monomorphic, or polymorphic.
CallSiteFeedback() : index_or_count_(-1), frequency_or_ool_(0) {}
CallSiteFeedback(int function_index, int call_count)
: index_or_count_(function_index), frequency_or_ool_(call_count) {}
CallSiteFeedback(PolymorphicCase* polymorphic_cases, int num_cases)
: index_or_count_(-num_cases),
frequency_or_ool_(reinterpret_cast<intptr_t>(polymorphic_cases)) {}
// Copying and assignment: prefer moving, as it's cheaper.
// The code below makes sure external polymorphic storage is copied and/or
// freed as appropriate.
CallSiteFeedback(const CallSiteFeedback& other) V8_NOEXCEPT { *this = other; }
CallSiteFeedback(CallSiteFeedback&& other) V8_NOEXCEPT { *this = other; }
CallSiteFeedback& operator=(const CallSiteFeedback& other) V8_NOEXCEPT {
index_or_count_ = other.index_or_count_;
if (other.is_polymorphic()) {
int num_cases = other.num_cases();
PolymorphicCase* polymorphic = new PolymorphicCase[num_cases];
for (int i = 0; i < num_cases; i++) {
polymorphic[i].function_index = other.function_index(i);
polymorphic[i].absolute_call_frequency = other.call_count(i);
}
frequency_or_ool_ = reinterpret_cast<intptr_t>(polymorphic);
} else {
frequency_or_ool_ = other.frequency_or_ool_;
}
return *this;
}
CallSiteFeedback& operator=(CallSiteFeedback&& other) V8_NOEXCEPT {
if (this != &other) {
index_or_count_ = other.index_or_count_;
frequency_or_ool_ = other.frequency_or_ool_;
other.frequency_or_ool_ = 0;
}
return *this;
}
~CallSiteFeedback() {
if (is_polymorphic()) delete[] polymorphic_storage();
}
int num_cases() const {
if (is_monomorphic()) return 1;
if (is_invalid()) return 0;
return -index_or_count_;
}
int function_index(int i) const {
DCHECK(!is_invalid());
if (is_monomorphic()) return index_or_count_;
return polymorphic_storage()[i].function_index;
}
int call_count(int i) const {
if (index_or_count_ >= 0) return static_cast<int>(frequency_or_ool_);
return polymorphic_storage()[i].absolute_call_frequency;
}
private:
bool is_monomorphic() const { return index_or_count_ >= 0; }
bool is_polymorphic() const { return index_or_count_ <= -2; }
bool is_invalid() const { return index_or_count_ == -1; }
const PolymorphicCase* polymorphic_storage() const {
return reinterpret_cast<PolymorphicCase*>(frequency_or_ool_);
}
int index_or_count_;
intptr_t frequency_or_ool_;
};
struct FunctionTypeFeedback {
std::vector<CallSiteFeedback> feedback_vector;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment