Commit fed1364a authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[Compiler] Ensure TurboFan holds onto BytecodeArray to keep it alive.

With Bytecode flushing, the a SharedFunctionInfo's bytecode might be flushed
while the compiler is expecting it to still exist. Rather than continually
getting the bytecode from the SFI, instead bottleneck the points where we get
BytecodeArray from SFIs and maintain an explicit strong reference to the
BytecodeArray from that point onwards to prevent flushing.

BUG=v8:8395

Change-Id: I6a18adec99402838690971eb37ee0617cdc15920
Reviewed-on: https://chromium-review.googlesource.com/c/1309763
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57536}
parent a2f7867d
...@@ -515,7 +515,8 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint( ...@@ -515,7 +515,8 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
} }
BytecodeGraphBuilder::BytecodeGraphBuilder( BytecodeGraphBuilder::BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared_info, Zone* local_zone, Handle<BytecodeArray> bytecode_array,
Handle<SharedFunctionInfo> shared_info,
Handle<FeedbackVector> feedback_vector, BailoutId osr_offset, Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency& invocation_frequency, JSGraph* jsgraph, CallFrequency& invocation_frequency,
SourcePositionTable* source_positions, Handle<Context> native_context, SourcePositionTable* source_positions, Handle<Context> native_context,
...@@ -524,14 +525,13 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( ...@@ -524,14 +525,13 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
: local_zone_(local_zone), : local_zone_(local_zone),
jsgraph_(jsgraph), jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency), invocation_frequency_(invocation_frequency),
bytecode_array_( bytecode_array_(bytecode_array),
handle(shared_info->GetBytecodeArray(), jsgraph->isolate())),
feedback_vector_(feedback_vector), feedback_vector_(feedback_vector),
type_hint_lowering_(jsgraph, feedback_vector, flags), type_hint_lowering_(jsgraph, feedback_vector, flags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo( frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction, FrameStateType::kInterpretedFunction,
bytecode_array()->parameter_count(), bytecode_array->parameter_count(), bytecode_array->register_count(),
bytecode_array()->register_count(), shared_info)), shared_info)),
bytecode_iterator_(nullptr), bytecode_iterator_(nullptr),
bytecode_analysis_(nullptr), bytecode_analysis_(nullptr),
environment_(nullptr), environment_(nullptr),
......
...@@ -29,9 +29,10 @@ class SourcePositionTable; ...@@ -29,9 +29,10 @@ class SourcePositionTable;
class BytecodeGraphBuilder { class BytecodeGraphBuilder {
public: public:
BytecodeGraphBuilder( BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared, Zone* local_zone, Handle<BytecodeArray> bytecode_array,
Handle<FeedbackVector> feedback_vector, BailoutId osr_offset, Handle<SharedFunctionInfo> shared, Handle<FeedbackVector> feedback_vector,
JSGraph* jsgraph, CallFrequency& invocation_frequency, BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency& invocation_frequency,
SourcePositionTable* source_positions, Handle<Context> native_context, SourcePositionTable* source_positions, Handle<Context> native_context,
int inlining_id = SourcePosition::kNotInlined, int inlining_id = SourcePosition::kNotInlined,
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags, JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags,
......
...@@ -23,11 +23,15 @@ namespace compiler { ...@@ -23,11 +23,15 @@ namespace compiler {
namespace { namespace {
int CollectFunctions(Node* node, Handle<JSFunction>* functions, int CollectFunctions(Node* node, Handle<JSFunction>* functions,
int functions_size, Handle<SharedFunctionInfo>& shared) { Handle<BytecodeArray>* bytecode, int functions_size,
Handle<SharedFunctionInfo>& shared, Isolate* isolate) {
DCHECK_NE(0, functions_size); DCHECK_NE(0, functions_size);
HeapObjectMatcher m(node); HeapObjectMatcher m(node);
if (m.HasValue() && m.Value()->IsJSFunction()) { if (m.HasValue() && m.Value()->IsJSFunction()) {
functions[0] = Handle<JSFunction>::cast(m.Value()); functions[0] = Handle<JSFunction>::cast(m.Value());
if (functions[0]->shared()->HasBytecodeArray()) {
bytecode[0] = handle(functions[0]->shared()->GetBytecodeArray(), isolate);
}
return 1; return 1;
} }
if (m.IsPhi()) { if (m.IsPhi()) {
...@@ -37,6 +41,10 @@ int CollectFunctions(Node* node, Handle<JSFunction>* functions, ...@@ -37,6 +41,10 @@ int CollectFunctions(Node* node, Handle<JSFunction>* functions,
HeapObjectMatcher m(node->InputAt(n)); HeapObjectMatcher m(node->InputAt(n));
if (!m.HasValue() || !m.Value()->IsJSFunction()) return 0; if (!m.HasValue() || !m.Value()->IsJSFunction()) return 0;
functions[n] = Handle<JSFunction>::cast(m.Value()); functions[n] = Handle<JSFunction>::cast(m.Value());
if (functions[n]->shared()->HasBytecodeArray()) {
bytecode[n] =
handle(functions[n]->shared()->GetBytecodeArray(), isolate);
}
} }
return value_input_count; return value_input_count;
} }
...@@ -44,12 +52,16 @@ int CollectFunctions(Node* node, Handle<JSFunction>* functions, ...@@ -44,12 +52,16 @@ int CollectFunctions(Node* node, Handle<JSFunction>* functions,
CreateClosureParameters const& p = CreateClosureParametersOf(m.op()); CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
functions[0] = Handle<JSFunction>::null(); functions[0] = Handle<JSFunction>::null();
shared = p.shared_info(); shared = p.shared_info();
if (shared->HasBytecodeArray()) {
bytecode[0] = handle(shared->GetBytecodeArray(), isolate);
}
return 1; return 1;
} }
return 0; return 0;
} }
bool CanInlineFunction(Handle<SharedFunctionInfo> shared) { bool CanInlineFunction(Handle<SharedFunctionInfo> shared,
Handle<BytecodeArray> bytecode) {
// Built-in functions are handled by the JSCallReducer. // Built-in functions are handled by the JSCallReducer.
if (shared->HasBuiltinFunctionId()) return false; if (shared->HasBuiltinFunctionId()) return false;
...@@ -59,21 +71,21 @@ bool CanInlineFunction(Handle<SharedFunctionInfo> shared) { ...@@ -59,21 +71,21 @@ bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
// If there is no bytecode array, it is either not compiled or it is compiled // If there is no bytecode array, it is either not compiled or it is compiled
// with WebAssembly for the asm.js pipeline. In either case we don't want to // with WebAssembly for the asm.js pipeline. In either case we don't want to
// inline. // inline.
if (!shared->HasBytecodeArray()) return false; if (bytecode.is_null()) return false;
// Quick check on the size of the bytecode to avoid inlining large functions. // Quick check on the size of the bytecode to avoid inlining large functions.
if (shared->GetBytecodeArray()->length() > FLAG_max_inlined_bytecode_size) { if (bytecode->length() > FLAG_max_inlined_bytecode_size) {
return false; return false;
} }
return true; return true;
} }
bool IsSmallInlineFunction(Handle<SharedFunctionInfo> shared) { bool IsSmallInlineFunction(Handle<BytecodeArray> bytecode) {
// Forcibly inline small functions. // Forcibly inline small functions.
// Don't forcibly inline functions that weren't compiled yet. // Don't forcibly inline functions that weren't compiled yet.
if (shared->HasBytecodeArray() && shared->GetBytecodeArray()->length() <= if (!bytecode.is_null() &&
FLAG_max_inlined_bytecode_size_small) { bytecode->length() <= FLAG_max_inlined_bytecode_size_small) {
return true; return true;
} }
return false; return false;
...@@ -92,8 +104,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { ...@@ -92,8 +104,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
Node* callee = node->InputAt(0); Node* callee = node->InputAt(0);
Candidate candidate; Candidate candidate;
candidate.node = node; candidate.node = node;
candidate.num_functions = CollectFunctions( candidate.num_functions =
callee, candidate.functions, kMaxCallPolymorphism, candidate.shared_info); CollectFunctions(callee, candidate.functions, candidate.bytecode,
kMaxCallPolymorphism, candidate.shared_info, isolate());
if (candidate.num_functions == 0) { if (candidate.num_functions == 0) {
return NoChange(); return NoChange();
} else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) { } else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
...@@ -114,7 +127,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { ...@@ -114,7 +127,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
candidate.functions[i].is_null() candidate.functions[i].is_null()
? candidate.shared_info ? candidate.shared_info
: handle(candidate.functions[i]->shared(), isolate()); : handle(candidate.functions[i]->shared(), isolate());
candidate.can_inline_function[i] = CanInlineFunction(shared); Handle<BytecodeArray> bytecode = candidate.bytecode[i];
candidate.can_inline_function[i] = CanInlineFunction(shared, bytecode);
// Do not allow direct recursion i.e. f() -> f(). We still allow indirect // Do not allow direct recursion i.e. f() -> f(). We still allow indirect
// recurion like f() -> g() -> f(). The indirect recursion is helpful in // recurion like f() -> g() -> f(). The indirect recursion is helpful in
// cases where f() is a small dispatch function that calls the appropriate // cases where f() is a small dispatch function that calls the appropriate
...@@ -132,9 +146,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { ...@@ -132,9 +146,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
} }
if (candidate.can_inline_function[i]) { if (candidate.can_inline_function[i]) {
can_inline = true; can_inline = true;
candidate.total_size += shared->GetBytecodeArray()->length(); candidate.total_size += bytecode->length();
} }
if (!IsSmallInlineFunction(shared)) { if (!IsSmallInlineFunction(bytecode)) {
small_inline = false; small_inline = false;
} }
} }
...@@ -604,13 +618,9 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate, ...@@ -604,13 +618,9 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
int const num_calls = candidate.num_functions; int const num_calls = candidate.num_functions;
Node* const node = candidate.node; Node* const node = candidate.node;
if (num_calls == 1) { if (num_calls == 1) {
Handle<SharedFunctionInfo> shared =
candidate.functions[0].is_null()
? candidate.shared_info
: handle(candidate.functions[0]->shared(), isolate());
Reduction const reduction = inliner_.ReduceJSCall(node); Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) { if (reduction.Changed()) {
cumulative_count_ += shared->GetBytecodeArray()->length(); cumulative_count_ += candidate.bytecode[0]->length();
} }
return reduction; return reduction;
} }
...@@ -669,7 +679,6 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate, ...@@ -669,7 +679,6 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
// Inline the individual, cloned call sites. // Inline the individual, cloned call sites.
for (int i = 0; i < num_calls; ++i) { for (int i = 0; i < num_calls; ++i) {
Handle<JSFunction> function = candidate.functions[i];
Node* node = calls[i]; Node* node = calls[i];
if (small_function || if (small_function ||
(candidate.can_inline_function[i] && (candidate.can_inline_function[i] &&
...@@ -679,7 +688,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate, ...@@ -679,7 +688,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
// Killing the call node is not strictly necessary, but it is safer to // Killing the call node is not strictly necessary, but it is safer to
// make sure we do not resurrect the node. // make sure we do not resurrect the node.
node->Kill(); node->Kill();
cumulative_count_ += function->shared()->GetBytecodeArray()->length(); cumulative_count_ += candidate.bytecode[i]->length();
} }
} }
} }
...@@ -720,7 +729,7 @@ void JSInliningHeuristic::PrintCandidates() { ...@@ -720,7 +729,7 @@ void JSInliningHeuristic::PrintCandidates() {
candidate.functions[i].is_null() candidate.functions[i].is_null()
? candidate.shared_info ? candidate.shared_info
: handle(candidate.functions[i]->shared(), isolate()); : handle(candidate.functions[i]->shared(), isolate());
PrintF(" - size:%d, name: %s\n", shared->GetBytecodeArray()->length(), PrintF(" - size:%d, name: %s\n", candidate.bytecode[i]->length(),
shared->DebugName()->ToCString().get()); shared->DebugName()->ToCString().get());
} }
} }
......
...@@ -44,6 +44,9 @@ class JSInliningHeuristic final : public AdvancedReducer { ...@@ -44,6 +44,9 @@ class JSInliningHeuristic final : public AdvancedReducer {
// In the case of polymorphic inlining, this tells if each of the // In the case of polymorphic inlining, this tells if each of the
// functions could be inlined. // functions could be inlined.
bool can_inline_function[kMaxCallPolymorphism]; bool can_inline_function[kMaxCallPolymorphism];
// Strong references to bytecode to ensure it is not flushed from SFI
// while choosing inlining candidates.
Handle<BytecodeArray> bytecode[kMaxCallPolymorphism];
// TODO(2206): For now polymorphic inlining is treated orthogonally to // TODO(2206): For now polymorphic inlining is treated orthogonally to
// inlining based on SharedFunctionInfo. This should be unified and the // inlining based on SharedFunctionInfo. This should be unified and the
// above array should be switched to SharedFunctionInfo instead. Currently // above array should be switched to SharedFunctionInfo instead. Currently
......
...@@ -469,6 +469,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) { ...@@ -469,6 +469,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
info_->shared_info()->DebugName()->ToCString().get(), info_->shared_info()->DebugName()->ToCString().get(),
(exception_target != nullptr) ? " (inside try-block)" : ""); (exception_target != nullptr) ? " (inside try-block)" : "");
// Get the bytecode array.
Handle<BytecodeArray> bytecode_array =
handle(shared_info->GetBytecodeArray(), isolate());
// Determine the targets feedback vector and its context. // Determine the targets feedback vector and its context.
Node* context; Node* context;
Handle<FeedbackVector> feedback_vector; Handle<FeedbackVector> feedback_vector;
...@@ -490,8 +494,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) { ...@@ -490,8 +494,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
} }
CallFrequency frequency = call.frequency(); CallFrequency frequency = call.frequency();
BytecodeGraphBuilder graph_builder( BytecodeGraphBuilder graph_builder(
zone(), shared_info, feedback_vector, BailoutId::None(), jsgraph(), zone(), bytecode_array, shared_info, feedback_vector, BailoutId::None(),
frequency, source_positions_, native_context(), inlining_id, jsgraph(), frequency, source_positions_, native_context(), inlining_id,
flags, false, info_->is_analyze_environment_liveness()); flags, false, info_->is_analyze_environment_liveness());
graph_builder.CreateGraph(); graph_builder.CreateGraph();
......
...@@ -232,6 +232,7 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter( ...@@ -232,6 +232,7 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
// extracted from the top-most frame in {frame_state}. // extracted from the top-most frame in {frame_state}.
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo> shared =
FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked(); FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked();
DCHECK(shared->is_compiled());
int register_count = shared->internal_formal_parameter_count() + int register_count = shared->internal_formal_parameter_count() +
shared->GetBytecodeArray()->register_count(); shared->GetBytecodeArray()->register_count();
Node* value = effect = Node* value = effect =
......
...@@ -15,12 +15,10 @@ namespace internal { ...@@ -15,12 +15,10 @@ namespace internal {
namespace compiler { namespace compiler {
OsrHelper::OsrHelper(OptimizedCompilationInfo* info) OsrHelper::OsrHelper(OptimizedCompilationInfo* info)
: parameter_count_( : parameter_count_(info->bytecode_array()->parameter_count()),
info->shared_info()->GetBytecodeArray()->parameter_count()), stack_slot_count_(InterpreterFrameConstants::RegisterStackSlotCount(
stack_slot_count_( info->bytecode_array()->register_count()) +
InterpreterFrameConstants::RegisterStackSlotCount( InterpreterFrameConstants::kExtraSlotCount) {}
info->shared_info()->GetBytecodeArray()->register_count()) +
InterpreterFrameConstants::kExtraSlotCount) {}
void OsrHelper::SetupFrame(Frame* frame) { void OsrHelper::SetupFrame(Frame* frame) {
// The optimized frame will subsume the unoptimized frame. Do so by reserving // The optimized frame will subsume the unoptimized frame. Do so by reserving
......
...@@ -883,7 +883,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob { ...@@ -883,7 +883,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
Isolate* isolate) { Isolate* isolate) {
if (compilation_info()->shared_info()->GetBytecodeArray()->length() > if (compilation_info()->bytecode_array()->length() >
kMaxBytecodeSizeForTurbofan) { kMaxBytecodeSizeForTurbofan) {
return AbortOptimization(BailoutReason::kFunctionTooBig); return AbortOptimization(BailoutReason::kFunctionTooBig);
} }
...@@ -1024,7 +1024,7 @@ struct GraphBuilderPhase { ...@@ -1024,7 +1024,7 @@ struct GraphBuilderPhase {
} }
CallFrequency frequency = CallFrequency(1.0f); CallFrequency frequency = CallFrequency(1.0f);
BytecodeGraphBuilder graph_builder( BytecodeGraphBuilder graph_builder(
temp_zone, data->info()->shared_info(), temp_zone, data->info()->bytecode_array(), data->info()->shared_info(),
handle(data->info()->closure()->feedback_vector(), data->isolate()), handle(data->info()->closure()->feedback_vector(), data->isolate()),
data->info()->osr_offset(), data->jsgraph(), frequency, data->info()->osr_offset(), data->jsgraph(), frequency,
data->source_positions(), data->native_context(), data->source_positions(), data->native_context(),
......
...@@ -18,6 +18,8 @@ OptimizedCompilationInfo::OptimizedCompilationInfo( ...@@ -18,6 +18,8 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared, Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure) Handle<JSFunction> closure)
: OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) { : OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) {
DCHECK(shared->is_compiled());
bytecode_array_ = handle(shared->GetBytecodeArray(), isolate);
shared_info_ = shared; shared_info_ = shared;
closure_ = closure; closure_ = closure;
optimization_id_ = isolate->NextOptimizationId(); optimization_id_ = isolate->NextOptimizationId();
...@@ -107,6 +109,9 @@ void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) { ...@@ -107,6 +109,9 @@ void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
if (!shared_info_.is_null()) { if (!shared_info_.is_null()) {
shared_info_ = Handle<SharedFunctionInfo>(*shared_info_, isolate); shared_info_ = Handle<SharedFunctionInfo>(*shared_info_, isolate);
} }
if (!bytecode_array_.is_null()) {
bytecode_array_ = Handle<BytecodeArray>(*bytecode_array_, isolate);
}
if (!closure_.is_null()) { if (!closure_.is_null()) {
closure_ = Handle<JSFunction>(*closure_, isolate); closure_ = Handle<JSFunction>(*closure_, isolate);
} }
......
...@@ -68,6 +68,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { ...@@ -68,6 +68,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool is_osr() const { return !osr_offset_.IsNone(); } bool is_osr() const { return !osr_offset_.IsNone(); }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; } Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
bool has_shared_info() const { return !shared_info().is_null(); } bool has_shared_info() const { return !shared_info().is_null(); }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<JSFunction> closure() const { return closure_; } Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; } Handle<Code> code() const { return code_; }
Code::Kind code_kind() const { return code_kind_; } Code::Kind code_kind() const { return code_kind_; }
...@@ -278,6 +280,10 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { ...@@ -278,6 +280,10 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
uint32_t stub_key_ = 0; uint32_t stub_key_ = 0;
int32_t builtin_index_ = -1; int32_t builtin_index_ = -1;
// We retain a reference the bytecode array specifically to ensure it doesn't
// get flushed while we are optimizing the code.
Handle<BytecodeArray> bytecode_array_;
Handle<SharedFunctionInfo> shared_info_; Handle<SharedFunctionInfo> shared_info_;
Handle<JSFunction> closure_; Handle<JSFunction> closure_;
......
...@@ -142,6 +142,9 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph, ...@@ -142,6 +142,9 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) { Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate); Handle<SharedFunctionInfo> shared(function->shared(), isolate);
CHECK(function->is_compiled() ||
Compiler::Compile(function, Compiler::CLEAR_EXCEPTION));
Zone zone(isolate->allocator(), ZONE_NAME); Zone zone(isolate->allocator(), ZONE_NAME);
OptimizedCompilationInfo info(&zone, isolate, shared, function); OptimizedCompilationInfo info(&zone, isolate, shared, function);
...@@ -149,8 +152,6 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) { ...@@ -149,8 +152,6 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
info.MarkAsInliningEnabled(); info.MarkAsInliningEnabled();
} }
CHECK(function->is_compiled() ||
Compiler::Compile(function, Compiler::CLEAR_EXCEPTION));
CHECK(info.shared_info()->HasBytecodeArray()); CHECK(info.shared_info()->HasBytecodeArray());
JSFunction::EnsureFeedbackVector(function); JSFunction::EnsureFeedbackVector(function);
......
...@@ -73,6 +73,7 @@ TEST_F(OptimizingCompileDispatcherTest, Construct) { ...@@ -73,6 +73,7 @@ TEST_F(OptimizingCompileDispatcherTest, Construct) {
TEST_F(OptimizingCompileDispatcherTest, NonBlockingFlush) { TEST_F(OptimizingCompileDispatcherTest, NonBlockingFlush) {
Handle<JSFunction> fun = Handle<JSFunction> fun =
RunJS<JSFunction>("function f() { function g() {}; return g;}; f();"); RunJS<JSFunction>("function f() { function g() {}; return g;}; f();");
ASSERT_TRUE(Compiler::Compile(fun, Compiler::CLEAR_EXCEPTION));
BlockingCompilationJob* job = new BlockingCompilationJob(i_isolate(), fun); BlockingCompilationJob* job = new BlockingCompilationJob(i_isolate(), fun);
OptimizingCompileDispatcher dispatcher(i_isolate()); OptimizingCompileDispatcher dispatcher(i_isolate());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment