Commit 5133bbf6 authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

[turbofan] Brokerize JSInliningHeuristic

The JSInliningHeuristic is now completely heap-access free. JSInliner
still includes Allow* guards and will be brokerized as a follow-up CL.

R=neis@chromium.org

Bug: v8:7790
Change-Id: I6df5d8515bb8bd8d512e8442e4f4dba9ebe9dd2e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1528437Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60680}
parent 93aeb582
......@@ -2871,7 +2871,15 @@ bool JSFunctionRef::serialized() const {
}
bool JSFunctionRef::IsSerializedForCompilation() const {
return shared().IsSerializedForCompilation(feedback_vector());
if (broker()->mode() == JSHeapBroker::kDisabled) {
return handle(object()->shared(), broker()->isolate())->HasBytecodeArray();
}
// We get a crash if we try to access the shared() getter without
// checking for `serialized` first. Also it's possible to have a
// JSFunctionRef without a feedback vector.
return serialized() && has_feedback_vector() &&
shared().IsSerializedForCompilation(feedback_vector());
}
void SharedFunctionInfoRef::SetSerializedForCompilation(
......
......@@ -22,57 +22,73 @@ namespace compiler {
namespace {
int CollectFunctions(Node* node, Handle<JSFunction>* functions,
Handle<BytecodeArray>* bytecode, int functions_size,
Handle<SharedFunctionInfo>& shared, Isolate* isolate) {
bool IsSmallInlineFunction(BytecodeArrayRef bytecode) {
// Forcibly inline small functions.
if (bytecode.length() <= FLAG_max_inlined_bytecode_size_small) {
return true;
}
return false;
}
} // namespace
JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
Node* node, int functions_size) {
DCHECK_NE(0, functions_size);
HeapObjectMatcher m(node);
if (m.HasValue() && m.Value()->IsJSFunction()) {
functions[0] = Handle<JSFunction>::cast(m.Value());
if (functions[0]->shared()->HasBytecodeArray()) {
bytecode[0] = handle(functions[0]->shared()->GetBytecodeArray(), isolate);
Node* callee = node->InputAt(0);
Candidate out;
out.node = node;
HeapObjectMatcher m(callee);
if (m.HasValue() && m.Ref(broker()).IsJSFunction()) {
out.functions[0] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[0].value();
if (function.IsSerializedForCompilation()) {
out.bytecode[0] = function.shared().GetBytecodeArray();
}
return 1;
out.num_functions = 1;
return out;
}
if (m.IsPhi()) {
int const value_input_count = m.node()->op()->ValueInputCount();
if (value_input_count > functions_size) return 0;
if (value_input_count > functions_size) {
out.num_functions = 0;
return out;
}
for (int n = 0; n < value_input_count; ++n) {
HeapObjectMatcher m(node->InputAt(n));
if (!m.HasValue() || !m.Value()->IsJSFunction()) return 0;
functions[n] = Handle<JSFunction>::cast(m.Value());
if (functions[n]->shared()->HasBytecodeArray()) {
bytecode[n] =
handle(functions[n]->shared()->GetBytecodeArray(), isolate);
HeapObjectMatcher m(callee->InputAt(n));
if (!m.HasValue() || !m.Ref(broker()).IsJSFunction()) {
out.num_functions = 0;
return out;
}
out.functions[n] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[n].value();
if (function.IsSerializedForCompilation()) {
out.bytecode[n] = function.shared().GetBytecodeArray(), isolate();
}
}
return value_input_count;
out.num_functions = value_input_count;
return out;
}
if (m.IsJSCreateClosure()) {
CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
functions[0] = Handle<JSFunction>::null();
shared = p.shared_info();
if (shared->HasBytecodeArray()) {
bytecode[0] = handle(shared->GetBytecodeArray(), isolate);
DCHECK(!out.functions[0].has_value());
out.shared_info = SharedFunctionInfoRef(broker(), p.shared_info());
SharedFunctionInfoRef shared_info = out.shared_info.value();
if (shared_info.HasBytecodeArray()) {
out.bytecode[0] = shared_info.GetBytecodeArray();
}
return 1;
out.num_functions = 1;
return out;
}
return 0;
out.num_functions = 0;
return out;
}
bool IsSmallInlineFunction(Handle<BytecodeArray> bytecode) {
// Forcibly inline small functions.
// Don't forcibly inline functions that weren't compiled yet.
if (!bytecode.is_null() &&
bytecode->length() <= FLAG_max_inlined_bytecode_size_small) {
return true;
}
return false;
}
} // namespace
Reduction JSInliningHeuristic::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
// Check if we already saw that {node} before, and if so, just skip it.
......@@ -80,12 +96,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
seen_.insert(node->id());
// Check if the {node} is an appropriate candidate for inlining.
Node* callee = node->InputAt(0);
Candidate candidate;
candidate.node = node;
candidate.num_functions =
CollectFunctions(callee, candidate.functions, candidate.bytecode,
kMaxCallPolymorphism, candidate.shared_info, isolate());
Candidate candidate = CollectFunctions(node, kMaxCallPolymorphism);
if (candidate.num_functions == 0) {
return NoChange();
} else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
......@@ -102,12 +113,29 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
Handle<SharedFunctionInfo> frame_shared_info;
for (int i = 0; i < candidate.num_functions; ++i) {
Handle<SharedFunctionInfo> shared =
candidate.functions[i].is_null()
? candidate.shared_info
: handle(candidate.functions[i]->shared(), isolate());
SharedFunctionInfoRef sfi_ref(broker(), shared);
candidate.can_inline_function[i] = sfi_ref.IsInlineable();
if (!candidate.bytecode[i].has_value()) {
// We're already missing critical data which wouldn't allow us to
// continue the inlining checks. Log a warning and continue.
if (candidate.functions[i].has_value()) {
TRACE_BROKER(broker(),
"Missing bytecode array trying to inline function "
<< candidate.functions[i].value().object().address());
} else {
TRACE_BROKER(
broker(),
"Missing bytecode array trying to inline function with SFI "
<< candidate.shared_info.value().object().address());
}
// Those functions that don't have their bytecode serialized probably
// don't have the SFI either, so we exit the loop early.
candidate.can_inline_function[i] = false;
continue;
}
SharedFunctionInfoRef shared = candidate.functions[i].has_value()
? candidate.functions[i].value().shared()
: candidate.shared_info.value();
candidate.can_inline_function[i] = shared.IsInlineable();
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recurion like f() -> g() -> f(). The indirect recursion is helpful in
// cases where f() is a small dispatch function that calls the appropriate
......@@ -118,17 +146,19 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// analysis that converts them to iterative implementations. Though it is
// not obvious if such an anlysis is needed.
if (frame_info.shared_info().ToHandle(&frame_shared_info) &&
*frame_shared_info == *shared) {
frame_shared_info.equals(shared.object())) {
TRACE("Not considering call site #%d:%s, because of recursive inlining\n",
node->id(), node->op()->mnemonic());
candidate.can_inline_function[i] = false;
}
Handle<BytecodeArray> bytecode = candidate.bytecode[i];
// A function reaching this point should always have its bytecode
// serialized.
BytecodeArrayRef bytecode = candidate.bytecode[i].value();
if (candidate.can_inline_function[i]) {
can_inline = true;
candidate.total_size += bytecode->length();
candidate.total_size += bytecode.length();
}
// We don't force inline small functions if any of them is not inlineable
// We don't force inline small functions if any of them is not inlineable.
if (!IsSmallInlineFunction(bytecode)) {
force_inline_small = false;
}
......@@ -566,7 +596,7 @@ void JSInliningHeuristic::CreateOrReuseDispatch(Node* node, Node* callee,
for (int i = 0; i < num_calls; ++i) {
// TODO(2206): Make comparison be based on underlying SharedFunctionInfo
// instead of the target JSFunction reference directly.
Node* target = jsgraph()->HeapConstant(candidate.functions[i]);
Node* target = jsgraph()->Constant(candidate.functions[i].value());
if (i != (num_calls - 1)) {
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), callee, target);
......@@ -601,7 +631,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
if (num_calls == 1) {
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
cumulative_count_ += candidate.bytecode[0]->length();
cumulative_count_ += candidate.bytecode[0].value().length();
}
return reduction;
}
......@@ -709,12 +739,12 @@ void JSInliningHeuristic::PrintCandidates() {
<< candidate.node->op()->mnemonic()
<< ", frequency: " << candidate.frequency << std::endl;
for (int i = 0; i < candidate.num_functions; ++i) {
Handle<SharedFunctionInfo> shared =
candidate.functions[i].is_null()
? candidate.shared_info
: handle(candidate.functions[i]->shared(), isolate());
PrintF(" - size:%d, name: %s\n", candidate.bytecode[i]->length(),
shared->DebugName()->ToCString().get());
SharedFunctionInfoRef shared =
candidate.functions[i].has_value()
? candidate.functions[i].value().shared()
: candidate.shared_info.value();
PrintF(" - size:%d, name: %s\n", candidate.bytecode[i].value().length(),
shared.object()->DebugName()->ToCString().get());
}
}
}
......
......@@ -41,18 +41,18 @@ class JSInliningHeuristic final : public AdvancedReducer {
static const int kMaxCallPolymorphism = 4;
struct Candidate {
Handle<JSFunction> functions[kMaxCallPolymorphism];
base::Optional<JSFunctionRef> functions[kMaxCallPolymorphism];
// In the case of polymorphic inlining, this tells if each of the
// functions could be inlined.
bool can_inline_function[kMaxCallPolymorphism];
// Strong references to bytecode to ensure it is not flushed from SFI
// while choosing inlining candidates.
Handle<BytecodeArray> bytecode[kMaxCallPolymorphism];
base::Optional<BytecodeArrayRef> bytecode[kMaxCallPolymorphism];
// TODO(2206): For now polymorphic inlining is treated orthogonally to
// inlining based on SharedFunctionInfo. This should be unified and the
// above array should be switched to SharedFunctionInfo instead. Currently
// we use {num_functions == 1 && functions[0].is_null()} as an indicator.
Handle<SharedFunctionInfo> shared_info;
base::Optional<SharedFunctionInfoRef> shared_info;
int num_functions;
Node* node = nullptr; // The call site at which to inline.
CallFrequency frequency; // Relative frequency of this call site.
......@@ -81,6 +81,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
StateCloneMode mode);
Node* DuplicateStateValuesAndRename(Node* state_values, Node* from, Node* to,
StateCloneMode mode);
Candidate CollectFunctions(Node* node, int functions_size);
CommonOperatorBuilder* common() const;
Graph* graph() const;
......
......@@ -379,6 +379,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Handle<SharedFunctionInfo> shared_info;
JSCallAccessor call(node);
// TODO(mslekova): Remove those when inlining is brokerized.
AllowHandleDereference allow_handle_deref;
AllowHandleAllocation allow_handle_alloc;
// Determine the call target.
if (!DetermineCallTarget(node, shared_info)) return NoChange();
......
......@@ -24,6 +24,7 @@ function foo(cond) {
func();
}
%PrepareFunctionForOptimization(foo);
foo(true);
foo(false);
%OptimizeFunctionOnNextCall(foo);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment