Commit 5133bbf6 authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

[turbofan] Brokerize JSInliningHeuristic

The JSInliningHeuristic is now completely heap-access free. JSInliner
still includes Allow* guards and will be brokerized as a follow-up CL.

R=neis@chromium.org

Bug: v8:7790
Change-Id: I6df5d8515bb8bd8d512e8442e4f4dba9ebe9dd2e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1528437Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60680}
parent 93aeb582
...@@ -2871,7 +2871,15 @@ bool JSFunctionRef::serialized() const { ...@@ -2871,7 +2871,15 @@ bool JSFunctionRef::serialized() const {
} }
bool JSFunctionRef::IsSerializedForCompilation() const { bool JSFunctionRef::IsSerializedForCompilation() const {
return shared().IsSerializedForCompilation(feedback_vector()); if (broker()->mode() == JSHeapBroker::kDisabled) {
return handle(object()->shared(), broker()->isolate())->HasBytecodeArray();
}
// We get a crash if we try to access the shared() getter without
// checking for `serialized` first. Also it's possible to have a
// JSFunctionRef without a feedback vector.
return serialized() && has_feedback_vector() &&
shared().IsSerializedForCompilation(feedback_vector());
} }
void SharedFunctionInfoRef::SetSerializedForCompilation( void SharedFunctionInfoRef::SetSerializedForCompilation(
......
...@@ -22,57 +22,73 @@ namespace compiler { ...@@ -22,57 +22,73 @@ namespace compiler {
namespace { namespace {
int CollectFunctions(Node* node, Handle<JSFunction>* functions, bool IsSmallInlineFunction(BytecodeArrayRef bytecode) {
Handle<BytecodeArray>* bytecode, int functions_size, // Forcibly inline small functions.
Handle<SharedFunctionInfo>& shared, Isolate* isolate) { if (bytecode.length() <= FLAG_max_inlined_bytecode_size_small) {
return true;
}
return false;
}
} // namespace
JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
Node* node, int functions_size) {
DCHECK_NE(0, functions_size); DCHECK_NE(0, functions_size);
HeapObjectMatcher m(node); Node* callee = node->InputAt(0);
if (m.HasValue() && m.Value()->IsJSFunction()) { Candidate out;
functions[0] = Handle<JSFunction>::cast(m.Value()); out.node = node;
if (functions[0]->shared()->HasBytecodeArray()) {
bytecode[0] = handle(functions[0]->shared()->GetBytecodeArray(), isolate); HeapObjectMatcher m(callee);
if (m.HasValue() && m.Ref(broker()).IsJSFunction()) {
out.functions[0] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[0].value();
if (function.IsSerializedForCompilation()) {
out.bytecode[0] = function.shared().GetBytecodeArray();
} }
return 1; out.num_functions = 1;
return out;
} }
if (m.IsPhi()) { if (m.IsPhi()) {
int const value_input_count = m.node()->op()->ValueInputCount(); int const value_input_count = m.node()->op()->ValueInputCount();
if (value_input_count > functions_size) return 0; if (value_input_count > functions_size) {
out.num_functions = 0;
return out;
}
for (int n = 0; n < value_input_count; ++n) { for (int n = 0; n < value_input_count; ++n) {
HeapObjectMatcher m(node->InputAt(n)); HeapObjectMatcher m(callee->InputAt(n));
if (!m.HasValue() || !m.Value()->IsJSFunction()) return 0; if (!m.HasValue() || !m.Ref(broker()).IsJSFunction()) {
functions[n] = Handle<JSFunction>::cast(m.Value()); out.num_functions = 0;
if (functions[n]->shared()->HasBytecodeArray()) { return out;
bytecode[n] =
handle(functions[n]->shared()->GetBytecodeArray(), isolate);
} }
out.functions[n] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[n].value();
if (function.IsSerializedForCompilation()) {
out.bytecode[n] = function.shared().GetBytecodeArray(), isolate();
} }
return value_input_count; }
out.num_functions = value_input_count;
return out;
} }
if (m.IsJSCreateClosure()) { if (m.IsJSCreateClosure()) {
CreateClosureParameters const& p = CreateClosureParametersOf(m.op()); CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
functions[0] = Handle<JSFunction>::null(); DCHECK(!out.functions[0].has_value());
shared = p.shared_info(); out.shared_info = SharedFunctionInfoRef(broker(), p.shared_info());
if (shared->HasBytecodeArray()) { SharedFunctionInfoRef shared_info = out.shared_info.value();
bytecode[0] = handle(shared->GetBytecodeArray(), isolate); if (shared_info.HasBytecodeArray()) {
out.bytecode[0] = shared_info.GetBytecodeArray();
} }
return 1; out.num_functions = 1;
return out;
} }
return 0; out.num_functions = 0;
return out;
} }
bool IsSmallInlineFunction(Handle<BytecodeArray> bytecode) {
// Forcibly inline small functions.
// Don't forcibly inline functions that weren't compiled yet.
if (!bytecode.is_null() &&
bytecode->length() <= FLAG_max_inlined_bytecode_size_small) {
return true;
}
return false;
}
} // namespace
Reduction JSInliningHeuristic::Reduce(Node* node) { Reduction JSInliningHeuristic::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange(); if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
// Check if we already saw that {node} before, and if so, just skip it. // Check if we already saw that {node} before, and if so, just skip it.
...@@ -80,12 +96,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { ...@@ -80,12 +96,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
seen_.insert(node->id()); seen_.insert(node->id());
// Check if the {node} is an appropriate candidate for inlining. // Check if the {node} is an appropriate candidate for inlining.
Node* callee = node->InputAt(0); Candidate candidate = CollectFunctions(node, kMaxCallPolymorphism);
Candidate candidate;
candidate.node = node;
candidate.num_functions =
CollectFunctions(callee, candidate.functions, candidate.bytecode,
kMaxCallPolymorphism, candidate.shared_info, isolate());
if (candidate.num_functions == 0) { if (candidate.num_functions == 0) {
return NoChange(); return NoChange();
} else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) { } else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
...@@ -102,12 +113,29 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { ...@@ -102,12 +113,29 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op()); FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
Handle<SharedFunctionInfo> frame_shared_info; Handle<SharedFunctionInfo> frame_shared_info;
for (int i = 0; i < candidate.num_functions; ++i) { for (int i = 0; i < candidate.num_functions; ++i) {
Handle<SharedFunctionInfo> shared = if (!candidate.bytecode[i].has_value()) {
candidate.functions[i].is_null() // We're already missing critical data which wouldn't allow us to
? candidate.shared_info // continue the inlining checks. Log a warning and continue.
: handle(candidate.functions[i]->shared(), isolate()); if (candidate.functions[i].has_value()) {
SharedFunctionInfoRef sfi_ref(broker(), shared); TRACE_BROKER(broker(),
candidate.can_inline_function[i] = sfi_ref.IsInlineable(); "Missing bytecode array trying to inline function "
<< candidate.functions[i].value().object().address());
} else {
TRACE_BROKER(
broker(),
"Missing bytecode array trying to inline function with SFI "
<< candidate.shared_info.value().object().address());
}
// Those functions that don't have their bytecode serialized probably
// don't have the SFI either, so we exit the loop early.
candidate.can_inline_function[i] = false;
continue;
}
SharedFunctionInfoRef shared = candidate.functions[i].has_value()
? candidate.functions[i].value().shared()
: candidate.shared_info.value();
candidate.can_inline_function[i] = shared.IsInlineable();
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect // Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recurion like f() -> g() -> f(). The indirect recursion is helpful in // recurion like f() -> g() -> f(). The indirect recursion is helpful in
// cases where f() is a small dispatch function that calls the appropriate // cases where f() is a small dispatch function that calls the appropriate
...@@ -118,17 +146,19 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { ...@@ -118,17 +146,19 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// analysis that converts them to iterative implementations. Though it is // analysis that converts them to iterative implementations. Though it is
// not obvious if such an anlysis is needed. // not obvious if such an anlysis is needed.
if (frame_info.shared_info().ToHandle(&frame_shared_info) && if (frame_info.shared_info().ToHandle(&frame_shared_info) &&
*frame_shared_info == *shared) { frame_shared_info.equals(shared.object())) {
TRACE("Not considering call site #%d:%s, because of recursive inlining\n", TRACE("Not considering call site #%d:%s, because of recursive inlining\n",
node->id(), node->op()->mnemonic()); node->id(), node->op()->mnemonic());
candidate.can_inline_function[i] = false; candidate.can_inline_function[i] = false;
} }
Handle<BytecodeArray> bytecode = candidate.bytecode[i]; // A function reaching this point should always have its bytecode
// serialized.
BytecodeArrayRef bytecode = candidate.bytecode[i].value();
if (candidate.can_inline_function[i]) { if (candidate.can_inline_function[i]) {
can_inline = true; can_inline = true;
candidate.total_size += bytecode->length(); candidate.total_size += bytecode.length();
} }
// We don't force inline small functions if any of them is not inlineable // We don't force inline small functions if any of them is not inlineable.
if (!IsSmallInlineFunction(bytecode)) { if (!IsSmallInlineFunction(bytecode)) {
force_inline_small = false; force_inline_small = false;
} }
...@@ -566,7 +596,7 @@ void JSInliningHeuristic::CreateOrReuseDispatch(Node* node, Node* callee, ...@@ -566,7 +596,7 @@ void JSInliningHeuristic::CreateOrReuseDispatch(Node* node, Node* callee,
for (int i = 0; i < num_calls; ++i) { for (int i = 0; i < num_calls; ++i) {
// TODO(2206): Make comparison be based on underlying SharedFunctionInfo // TODO(2206): Make comparison be based on underlying SharedFunctionInfo
// instead of the target JSFunction reference directly. // instead of the target JSFunction reference directly.
Node* target = jsgraph()->HeapConstant(candidate.functions[i]); Node* target = jsgraph()->Constant(candidate.functions[i].value());
if (i != (num_calls - 1)) { if (i != (num_calls - 1)) {
Node* check = Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), callee, target); graph()->NewNode(simplified()->ReferenceEqual(), callee, target);
...@@ -601,7 +631,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate, ...@@ -601,7 +631,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
if (num_calls == 1) { if (num_calls == 1) {
Reduction const reduction = inliner_.ReduceJSCall(node); Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) { if (reduction.Changed()) {
cumulative_count_ += candidate.bytecode[0]->length(); cumulative_count_ += candidate.bytecode[0].value().length();
} }
return reduction; return reduction;
} }
...@@ -709,12 +739,12 @@ void JSInliningHeuristic::PrintCandidates() { ...@@ -709,12 +739,12 @@ void JSInliningHeuristic::PrintCandidates() {
<< candidate.node->op()->mnemonic() << candidate.node->op()->mnemonic()
<< ", frequency: " << candidate.frequency << std::endl; << ", frequency: " << candidate.frequency << std::endl;
for (int i = 0; i < candidate.num_functions; ++i) { for (int i = 0; i < candidate.num_functions; ++i) {
Handle<SharedFunctionInfo> shared = SharedFunctionInfoRef shared =
candidate.functions[i].is_null() candidate.functions[i].has_value()
? candidate.shared_info ? candidate.functions[i].value().shared()
: handle(candidate.functions[i]->shared(), isolate()); : candidate.shared_info.value();
PrintF(" - size:%d, name: %s\n", candidate.bytecode[i]->length(), PrintF(" - size:%d, name: %s\n", candidate.bytecode[i].value().length(),
shared->DebugName()->ToCString().get()); shared.object()->DebugName()->ToCString().get());
} }
} }
} }
......
...@@ -41,18 +41,18 @@ class JSInliningHeuristic final : public AdvancedReducer { ...@@ -41,18 +41,18 @@ class JSInliningHeuristic final : public AdvancedReducer {
static const int kMaxCallPolymorphism = 4; static const int kMaxCallPolymorphism = 4;
struct Candidate { struct Candidate {
Handle<JSFunction> functions[kMaxCallPolymorphism]; base::Optional<JSFunctionRef> functions[kMaxCallPolymorphism];
// In the case of polymorphic inlining, this tells if each of the // In the case of polymorphic inlining, this tells if each of the
// functions could be inlined. // functions could be inlined.
bool can_inline_function[kMaxCallPolymorphism]; bool can_inline_function[kMaxCallPolymorphism];
// Strong references to bytecode to ensure it is not flushed from SFI // Strong references to bytecode to ensure it is not flushed from SFI
// while choosing inlining candidates. // while choosing inlining candidates.
Handle<BytecodeArray> bytecode[kMaxCallPolymorphism]; base::Optional<BytecodeArrayRef> bytecode[kMaxCallPolymorphism];
// TODO(2206): For now polymorphic inlining is treated orthogonally to // TODO(2206): For now polymorphic inlining is treated orthogonally to
// inlining based on SharedFunctionInfo. This should be unified and the // inlining based on SharedFunctionInfo. This should be unified and the
// above array should be switched to SharedFunctionInfo instead. Currently // above array should be switched to SharedFunctionInfo instead. Currently
// we use {num_functions == 1 && functions[0].is_null()} as an indicator. // we use {num_functions == 1 && functions[0].is_null()} as an indicator.
Handle<SharedFunctionInfo> shared_info; base::Optional<SharedFunctionInfoRef> shared_info;
int num_functions; int num_functions;
Node* node = nullptr; // The call site at which to inline. Node* node = nullptr; // The call site at which to inline.
CallFrequency frequency; // Relative frequency of this call site. CallFrequency frequency; // Relative frequency of this call site.
...@@ -81,6 +81,7 @@ class JSInliningHeuristic final : public AdvancedReducer { ...@@ -81,6 +81,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
StateCloneMode mode); StateCloneMode mode);
Node* DuplicateStateValuesAndRename(Node* state_values, Node* from, Node* to, Node* DuplicateStateValuesAndRename(Node* state_values, Node* from, Node* to,
StateCloneMode mode); StateCloneMode mode);
Candidate CollectFunctions(Node* node, int functions_size);
CommonOperatorBuilder* common() const; CommonOperatorBuilder* common() const;
Graph* graph() const; Graph* graph() const;
......
...@@ -379,6 +379,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) { ...@@ -379,6 +379,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Handle<SharedFunctionInfo> shared_info; Handle<SharedFunctionInfo> shared_info;
JSCallAccessor call(node); JSCallAccessor call(node);
// TODO(mslekova): Remove those when inlining is brokerized.
AllowHandleDereference allow_handle_deref;
AllowHandleAllocation allow_handle_alloc;
// Determine the call target. // Determine the call target.
if (!DetermineCallTarget(node, shared_info)) return NoChange(); if (!DetermineCallTarget(node, shared_info)) return NoChange();
......
...@@ -24,6 +24,7 @@ function foo(cond) { ...@@ -24,6 +24,7 @@ function foo(cond) {
func(); func();
} }
%PrepareFunctionForOptimization(foo);
foo(true); foo(true);
foo(false); foo(false);
%OptimizeFunctionOnNextCall(foo); %OptimizeFunctionOnNextCall(foo);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment