Commit fed1364a authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[Compiler] Ensure TurboFan holds onto BytecodeArray to keep it alive.

With Bytecode flushing, the a SharedFunctionInfo's bytecode might be flushed
while the compiler is expecting it to still exist. Rather than continually
getting the bytecode from the SFI, instead bottleneck the points where we get
BytecodeArray from SFIs and maintain an explicit strong reference to the
BytecodeArray from that point onwards to prevent flushing.

BUG=v8:8395

Change-Id: I6a18adec99402838690971eb37ee0617cdc15920
Reviewed-on: https://chromium-review.googlesource.com/c/1309763
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57536}
parent a2f7867d
......@@ -515,7 +515,8 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
}
BytecodeGraphBuilder::BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
Zone* local_zone, Handle<BytecodeArray> bytecode_array,
Handle<SharedFunctionInfo> shared_info,
Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency& invocation_frequency,
SourcePositionTable* source_positions, Handle<Context> native_context,
......@@ -524,14 +525,13 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
: local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
bytecode_array_(
handle(shared_info->GetBytecodeArray(), jsgraph->isolate())),
bytecode_array_(bytecode_array),
feedback_vector_(feedback_vector),
type_hint_lowering_(jsgraph, feedback_vector, flags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
bytecode_array()->parameter_count(),
bytecode_array()->register_count(), shared_info)),
bytecode_array->parameter_count(), bytecode_array->register_count(),
shared_info)),
bytecode_iterator_(nullptr),
bytecode_analysis_(nullptr),
environment_(nullptr),
......
......@@ -29,9 +29,10 @@ class SourcePositionTable;
class BytecodeGraphBuilder {
public:
BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared,
Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency& invocation_frequency,
Zone* local_zone, Handle<BytecodeArray> bytecode_array,
Handle<SharedFunctionInfo> shared, Handle<FeedbackVector> feedback_vector,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency& invocation_frequency,
SourcePositionTable* source_positions, Handle<Context> native_context,
int inlining_id = SourcePosition::kNotInlined,
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags,
......
......@@ -23,11 +23,15 @@ namespace compiler {
namespace {
int CollectFunctions(Node* node, Handle<JSFunction>* functions,
int functions_size, Handle<SharedFunctionInfo>& shared) {
Handle<BytecodeArray>* bytecode, int functions_size,
Handle<SharedFunctionInfo>& shared, Isolate* isolate) {
DCHECK_NE(0, functions_size);
HeapObjectMatcher m(node);
if (m.HasValue() && m.Value()->IsJSFunction()) {
functions[0] = Handle<JSFunction>::cast(m.Value());
if (functions[0]->shared()->HasBytecodeArray()) {
bytecode[0] = handle(functions[0]->shared()->GetBytecodeArray(), isolate);
}
return 1;
}
if (m.IsPhi()) {
......@@ -37,6 +41,10 @@ int CollectFunctions(Node* node, Handle<JSFunction>* functions,
HeapObjectMatcher m(node->InputAt(n));
if (!m.HasValue() || !m.Value()->IsJSFunction()) return 0;
functions[n] = Handle<JSFunction>::cast(m.Value());
if (functions[n]->shared()->HasBytecodeArray()) {
bytecode[n] =
handle(functions[n]->shared()->GetBytecodeArray(), isolate);
}
}
return value_input_count;
}
......@@ -44,12 +52,16 @@ int CollectFunctions(Node* node, Handle<JSFunction>* functions,
CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
functions[0] = Handle<JSFunction>::null();
shared = p.shared_info();
if (shared->HasBytecodeArray()) {
bytecode[0] = handle(shared->GetBytecodeArray(), isolate);
}
return 1;
}
return 0;
}
bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
bool CanInlineFunction(Handle<SharedFunctionInfo> shared,
Handle<BytecodeArray> bytecode) {
// Built-in functions are handled by the JSCallReducer.
if (shared->HasBuiltinFunctionId()) return false;
......@@ -59,21 +71,21 @@ bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
// If there is no bytecode array, it is either not compiled or it is compiled
// with WebAssembly for the asm.js pipeline. In either case we don't want to
// inline.
if (!shared->HasBytecodeArray()) return false;
if (bytecode.is_null()) return false;
// Quick check on the size of the bytecode to avoid inlining large functions.
if (shared->GetBytecodeArray()->length() > FLAG_max_inlined_bytecode_size) {
if (bytecode->length() > FLAG_max_inlined_bytecode_size) {
return false;
}
return true;
}
bool IsSmallInlineFunction(Handle<SharedFunctionInfo> shared) {
bool IsSmallInlineFunction(Handle<BytecodeArray> bytecode) {
// Forcibly inline small functions.
// Don't forcibly inline functions that weren't compiled yet.
if (shared->HasBytecodeArray() && shared->GetBytecodeArray()->length() <=
FLAG_max_inlined_bytecode_size_small) {
if (!bytecode.is_null() &&
bytecode->length() <= FLAG_max_inlined_bytecode_size_small) {
return true;
}
return false;
......@@ -92,8 +104,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
Node* callee = node->InputAt(0);
Candidate candidate;
candidate.node = node;
candidate.num_functions = CollectFunctions(
callee, candidate.functions, kMaxCallPolymorphism, candidate.shared_info);
candidate.num_functions =
CollectFunctions(callee, candidate.functions, candidate.bytecode,
kMaxCallPolymorphism, candidate.shared_info, isolate());
if (candidate.num_functions == 0) {
return NoChange();
} else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
......@@ -114,7 +127,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
candidate.functions[i].is_null()
? candidate.shared_info
: handle(candidate.functions[i]->shared(), isolate());
candidate.can_inline_function[i] = CanInlineFunction(shared);
Handle<BytecodeArray> bytecode = candidate.bytecode[i];
candidate.can_inline_function[i] = CanInlineFunction(shared, bytecode);
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recurion like f() -> g() -> f(). The indirect recursion is helpful in
// cases where f() is a small dispatch function that calls the appropriate
......@@ -132,9 +146,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
if (candidate.can_inline_function[i]) {
can_inline = true;
candidate.total_size += shared->GetBytecodeArray()->length();
candidate.total_size += bytecode->length();
}
if (!IsSmallInlineFunction(shared)) {
if (!IsSmallInlineFunction(bytecode)) {
small_inline = false;
}
}
......@@ -604,13 +618,9 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
int const num_calls = candidate.num_functions;
Node* const node = candidate.node;
if (num_calls == 1) {
Handle<SharedFunctionInfo> shared =
candidate.functions[0].is_null()
? candidate.shared_info
: handle(candidate.functions[0]->shared(), isolate());
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
cumulative_count_ += shared->GetBytecodeArray()->length();
cumulative_count_ += candidate.bytecode[0]->length();
}
return reduction;
}
......@@ -669,7 +679,6 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
// Inline the individual, cloned call sites.
for (int i = 0; i < num_calls; ++i) {
Handle<JSFunction> function = candidate.functions[i];
Node* node = calls[i];
if (small_function ||
(candidate.can_inline_function[i] &&
......@@ -679,7 +688,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
// Killing the call node is not strictly necessary, but it is safer to
// make sure we do not resurrect the node.
node->Kill();
cumulative_count_ += function->shared()->GetBytecodeArray()->length();
cumulative_count_ += candidate.bytecode[i]->length();
}
}
}
......@@ -720,7 +729,7 @@ void JSInliningHeuristic::PrintCandidates() {
candidate.functions[i].is_null()
? candidate.shared_info
: handle(candidate.functions[i]->shared(), isolate());
PrintF(" - size:%d, name: %s\n", shared->GetBytecodeArray()->length(),
PrintF(" - size:%d, name: %s\n", candidate.bytecode[i]->length(),
shared->DebugName()->ToCString().get());
}
}
......
......@@ -44,6 +44,9 @@ class JSInliningHeuristic final : public AdvancedReducer {
// In the case of polymorphic inlining, this tells if each of the
// functions could be inlined.
bool can_inline_function[kMaxCallPolymorphism];
// Strong references to bytecode to ensure it is not flushed from SFI
// while choosing inlining candidates.
Handle<BytecodeArray> bytecode[kMaxCallPolymorphism];
// TODO(2206): For now polymorphic inlining is treated orthogonally to
// inlining based on SharedFunctionInfo. This should be unified and the
// above array should be switched to SharedFunctionInfo instead. Currently
......
......@@ -469,6 +469,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
info_->shared_info()->DebugName()->ToCString().get(),
(exception_target != nullptr) ? " (inside try-block)" : "");
// Get the bytecode array.
Handle<BytecodeArray> bytecode_array =
handle(shared_info->GetBytecodeArray(), isolate());
// Determine the targets feedback vector and its context.
Node* context;
Handle<FeedbackVector> feedback_vector;
......@@ -490,8 +494,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
CallFrequency frequency = call.frequency();
BytecodeGraphBuilder graph_builder(
zone(), shared_info, feedback_vector, BailoutId::None(), jsgraph(),
frequency, source_positions_, native_context(), inlining_id,
zone(), bytecode_array, shared_info, feedback_vector, BailoutId::None(),
jsgraph(), frequency, source_positions_, native_context(), inlining_id,
flags, false, info_->is_analyze_environment_liveness());
graph_builder.CreateGraph();
......
......@@ -232,6 +232,7 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
// extracted from the top-most frame in {frame_state}.
Handle<SharedFunctionInfo> shared =
FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked();
DCHECK(shared->is_compiled());
int register_count = shared->internal_formal_parameter_count() +
shared->GetBytecodeArray()->register_count();
Node* value = effect =
......
......@@ -15,12 +15,10 @@ namespace internal {
namespace compiler {
OsrHelper::OsrHelper(OptimizedCompilationInfo* info)
: parameter_count_(
info->shared_info()->GetBytecodeArray()->parameter_count()),
stack_slot_count_(
InterpreterFrameConstants::RegisterStackSlotCount(
info->shared_info()->GetBytecodeArray()->register_count()) +
InterpreterFrameConstants::kExtraSlotCount) {}
: parameter_count_(info->bytecode_array()->parameter_count()),
stack_slot_count_(InterpreterFrameConstants::RegisterStackSlotCount(
info->bytecode_array()->register_count()) +
InterpreterFrameConstants::kExtraSlotCount) {}
void OsrHelper::SetupFrame(Frame* frame) {
// The optimized frame will subsume the unoptimized frame. Do so by reserving
......
......@@ -883,7 +883,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
Isolate* isolate) {
if (compilation_info()->shared_info()->GetBytecodeArray()->length() >
if (compilation_info()->bytecode_array()->length() >
kMaxBytecodeSizeForTurbofan) {
return AbortOptimization(BailoutReason::kFunctionTooBig);
}
......@@ -1024,7 +1024,7 @@ struct GraphBuilderPhase {
}
CallFrequency frequency = CallFrequency(1.0f);
BytecodeGraphBuilder graph_builder(
temp_zone, data->info()->shared_info(),
temp_zone, data->info()->bytecode_array(), data->info()->shared_info(),
handle(data->info()->closure()->feedback_vector(), data->isolate()),
data->info()->osr_offset(), data->jsgraph(), frequency,
data->source_positions(), data->native_context(),
......
......@@ -18,6 +18,8 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure)
: OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) {
DCHECK(shared->is_compiled());
bytecode_array_ = handle(shared->GetBytecodeArray(), isolate);
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
......@@ -107,6 +109,9 @@ void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
if (!shared_info_.is_null()) {
shared_info_ = Handle<SharedFunctionInfo>(*shared_info_, isolate);
}
if (!bytecode_array_.is_null()) {
bytecode_array_ = Handle<BytecodeArray>(*bytecode_array_, isolate);
}
if (!closure_.is_null()) {
closure_ = Handle<JSFunction>(*closure_, isolate);
}
......
......@@ -68,6 +68,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool is_osr() const { return !osr_offset_.IsNone(); }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
bool has_shared_info() const { return !shared_info().is_null(); }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
Code::Kind code_kind() const { return code_kind_; }
......@@ -278,6 +280,10 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
uint32_t stub_key_ = 0;
int32_t builtin_index_ = -1;
// We retain a reference the bytecode array specifically to ensure it doesn't
// get flushed while we are optimizing the code.
Handle<BytecodeArray> bytecode_array_;
Handle<SharedFunctionInfo> shared_info_;
Handle<JSFunction> closure_;
......
......@@ -142,6 +142,9 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
CHECK(function->is_compiled() ||
Compiler::Compile(function, Compiler::CLEAR_EXCEPTION));
Zone zone(isolate->allocator(), ZONE_NAME);
OptimizedCompilationInfo info(&zone, isolate, shared, function);
......@@ -149,8 +152,6 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
info.MarkAsInliningEnabled();
}
CHECK(function->is_compiled() ||
Compiler::Compile(function, Compiler::CLEAR_EXCEPTION));
CHECK(info.shared_info()->HasBytecodeArray());
JSFunction::EnsureFeedbackVector(function);
......
......@@ -73,6 +73,7 @@ TEST_F(OptimizingCompileDispatcherTest, Construct) {
TEST_F(OptimizingCompileDispatcherTest, NonBlockingFlush) {
Handle<JSFunction> fun =
RunJS<JSFunction>("function f() { function g() {}; return g;}; f();");
ASSERT_TRUE(Compiler::Compile(fun, Compiler::CLEAR_EXCEPTION));
BlockingCompilationJob* job = new BlockingCompilationJob(i_isolate(), fun);
OptimizingCompileDispatcher dispatcher(i_isolate());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment