Commit 0996cea5 authored by rmcilroy's avatar rmcilroy Committed by Commit bot

[Interpreter] Introduce InterpreterCompilationJob

Adds InterpreterCompilationJob as a sub-class of
CompilationJob, to enable off-thread bytecode
generation. Currently only used in
Interpreter::MakeBytecode.

As part of this change, CompilationJob is modified
to make it less specific to optimized compilation,
renaming the phases as follows:
 - CreateGraph -> PrepareJob
 - OptimizeGraph -> ExecuteJob
 - GenerateCode -> FinalizeJob

RegisterWeakObjectsInOptimizedCode is also moved out
of CompilationJob and instead becomes a static function
on Compiler.

BUG=v8:5203

Committed: https://crrev.com/1fb6a7e697e8bc5b4af51647553741f966e00cdc
Committed: https://crrev.com/785990e9fc0dd9a9d963d25d0bed2909165e4ca9
Committed: https://crrev.com/d7c6195c4c5cdc080caa74dfe2ae9ecab69bea73
Review-Url: https://codereview.chromium.org/2240463002
Cr-Original-Original-Original-Commit-Position: refs/heads/master@{#38662}
Cr-Original-Original-Commit-Position: refs/heads/master@{#38668}
Cr-Original-Commit-Position: refs/heads/master@{#38725}
Cr-Commit-Position: refs/heads/master@{#38778}
parent f9d60761
......@@ -107,7 +107,7 @@ void OptimizingCompileDispatcher::CompileNext(CompilationJob* job) {
if (!job) return;
// The function may have already been optimized by OSR. Simply continue.
CompilationJob::Status status = job->OptimizeGraph();
CompilationJob::Status status = job->ExecuteJob();
USE(status); // Prevent an unused-variable error.
// The function may have already been optimized by OSR. Simply continue.
......
......@@ -241,11 +241,11 @@ bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
// ----------------------------------------------------------------------------
// Implementation of CompilationJob
CompilationJob::Status CompilationJob::CreateGraph() {
CompilationJob::Status CompilationJob::PrepareJob() {
DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
DisallowJavascriptExecution no_js(isolate());
DCHECK(info()->IsOptimizing());
if (FLAG_trace_opt) {
if (FLAG_trace_opt && info()->IsOptimizing()) {
OFStream os(stdout);
os << "[compiling method " << Brief(*info()->closure()) << " using "
<< compiler_name_;
......@@ -254,35 +254,35 @@ CompilationJob::Status CompilationJob::CreateGraph() {
}
// Delegate to the underlying implementation.
DCHECK_EQ(SUCCEEDED, last_status());
ScopedTimer t(&time_taken_to_create_graph_);
return SetLastStatus(CreateGraphImpl());
DCHECK(state() == State::kReadyToPrepare);
ScopedTimer t(&time_taken_to_prepare_);
return UpdateState(PrepareJobImpl(), State::kReadyToExecute);
}
CompilationJob::Status CompilationJob::OptimizeGraph() {
CompilationJob::Status CompilationJob::ExecuteJob() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
DisallowCodeDependencyChange no_dependency_change;
// Delegate to the underlying implementation.
DCHECK_EQ(SUCCEEDED, last_status());
ScopedTimer t(&time_taken_to_optimize_);
return SetLastStatus(OptimizeGraphImpl());
DCHECK(state() == State::kReadyToExecute);
ScopedTimer t(&time_taken_to_execute_);
return UpdateState(ExecuteJobImpl(), State::kReadyToFinalize);
}
CompilationJob::Status CompilationJob::GenerateCode() {
CompilationJob::Status CompilationJob::FinalizeJob() {
DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
DisallowCodeDependencyChange no_dependency_change;
DisallowJavascriptExecution no_js(isolate());
DCHECK(!info()->dependencies()->HasAborted());
// Delegate to the underlying implementation.
DCHECK_EQ(SUCCEEDED, last_status());
ScopedTimer t(&time_taken_to_codegen_);
return SetLastStatus(GenerateCodeImpl());
DCHECK(state() == State::kReadyToFinalize);
ScopedTimer t(&time_taken_to_finalize_);
return UpdateState(FinalizeJobImpl(), State::kSucceeded);
}
namespace {
void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
......@@ -342,15 +342,16 @@ void CompilationJob::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
}
void CompilationJob::RecordOptimizationStats() {
DCHECK(info()->IsOptimizing());
Handle<JSFunction> function = info()->closure();
if (!function->IsOptimized()) {
// Concurrent recompilation and OSR may race. Increment only once.
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
}
double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
double ms_optimize = time_taken_to_execute_.InMillisecondsF();
double ms_codegen = time_taken_to_finalize_.InMillisecondsF();
if (FLAG_trace_opt) {
PrintF("[optimizing ");
function->ShortPrint();
......@@ -366,14 +367,12 @@ void CompilationJob::RecordOptimizationStats() {
compiled_functions++;
code_size += function->shared()->SourceSize();
PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
compiled_functions,
code_size,
compilation_time);
compiled_functions, code_size, compilation_time);
}
if (FLAG_hydrogen_stats) {
isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
time_taken_to_optimize_,
time_taken_to_codegen_);
isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_prepare_,
time_taken_to_execute_,
time_taken_to_finalize_);
}
}
......@@ -673,9 +672,9 @@ bool GetOptimizedCodeNow(CompilationJob* job) {
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
if (job->CreateGraph() != CompilationJob::SUCCEEDED ||
job->OptimizeGraph() != CompilationJob::SUCCEEDED ||
job->GenerateCode() != CompilationJob::SUCCEEDED) {
if (job->PrepareJob() != CompilationJob::SUCCEEDED ||
job->ExecuteJob() != CompilationJob::SUCCEEDED ||
job->FinalizeJob() != CompilationJob::SUCCEEDED) {
if (FLAG_trace_opt) {
PrintF("[aborted optimizing ");
info->closure()->ShortPrint();
......@@ -736,7 +735,7 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
if (job->CreateGraph() != CompilationJob::SUCCEEDED) return false;
if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
if (FLAG_trace_concurrent_recompilation) {
......@@ -1915,12 +1914,12 @@ void Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
// Except when OSR already disabled optimization for some reason.
// 3) The code may have already been invalidated due to dependency change.
// 4) Code generation may have failed.
if (job->last_status() == CompilationJob::SUCCEEDED) {
if (job->state() == CompilationJob::State::kReadyToFinalize) {
if (shared->optimization_disabled()) {
job->RetryOptimization(kOptimizationDisabled);
} else if (info->dependencies()->HasAborted()) {
job->RetryOptimization(kBailedOutDueToDependencyChange);
} else if (job->GenerateCode() == CompilationJob::SUCCEEDED) {
} else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
job->RecordOptimizationStats();
RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
......@@ -1937,7 +1936,7 @@ void Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
}
}
DCHECK(job->last_status() != CompilationJob::SUCCEEDED);
DCHECK(job->state() == CompilationJob::State::kFailed);
if (FLAG_trace_opt) {
PrintF("[aborted optimizing ");
info->closure()->ShortPrint();
......
......@@ -540,62 +540,86 @@ class CompilationInfo final {
// A base class for compilation jobs intended to run concurrent to the main
// thread. The job is split into three phases which are called in sequence on
// different threads and with different limitations:
// 1) CreateGraph: Runs on main thread. No major limitations.
// 2) OptimizeGraph: Runs concurrently. No heap allocation or handle derefs.
// 3) GenerateCode: Runs on main thread. No dependency changes.
// 1) PrepareJob: Runs on main thread. No major limitations.
// 2) ExecuteJob: Runs concurrently. No heap allocation or handle derefs.
// 3) FinalizeJob: Runs on main thread. No dependency changes.
//
// Each of the three phases can either fail or succeed. Apart from their return
// value, the status of the phase last run can be checked using {last_status()}
// as well. When failing we distinguish between the following levels:
// a) AbortOptimization: Persistent failure, disable future optimization.
// b) RetryOptimzation: Transient failure, try again next time.
// Each of the three phases can either fail or succeed. The current state of
// the job can be checked using {state()}.
class CompilationJob {
public:
explicit CompilationJob(CompilationInfo* info, const char* compiler_name)
: info_(info), compiler_name_(compiler_name), last_status_(SUCCEEDED) {}
enum Status { SUCCEEDED, FAILED };
enum class State {
kReadyToPrepare,
kReadyToExecute,
kReadyToFinalize,
kSucceeded,
kFailed,
};
explicit CompilationJob(CompilationInfo* info, const char* compiler_name,
State initial_state = State::kReadyToPrepare)
: info_(info), compiler_name_(compiler_name), state_(initial_state) {}
virtual ~CompilationJob() {}
enum Status { FAILED, SUCCEEDED };
// Prepare the compile job. Must be called on the main thread.
MUST_USE_RESULT Status PrepareJob();
MUST_USE_RESULT Status CreateGraph();
MUST_USE_RESULT Status OptimizeGraph();
MUST_USE_RESULT Status GenerateCode();
// Executes the compile job. Can be called off the main thread.
MUST_USE_RESULT Status ExecuteJob();
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
// Finalizes the compile job. Must be called on the main thread.
MUST_USE_RESULT Status FinalizeJob();
// Report a transient failure, try again next time. Should only be called on
// optimization compilation jobs.
Status RetryOptimization(BailoutReason reason) {
DCHECK(info_->IsOptimizing());
info_->RetryOptimization(reason);
return SetLastStatus(FAILED);
state_ = State::kFailed;
return FAILED;
}
// Report a persistent failure, disable future optimization on the function.
// Should only be called on optimization compilation jobs.
Status AbortOptimization(BailoutReason reason) {
DCHECK(info_->IsOptimizing());
info_->AbortOptimization(reason);
return SetLastStatus(FAILED);
state_ = State::kFailed;
return FAILED;
}
void RecordOptimizationStats();
protected:
void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
State state() const { return state_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
protected:
// Overridden by the actual implementation.
virtual Status CreateGraphImpl() = 0;
virtual Status OptimizeGraphImpl() = 0;
virtual Status GenerateCodeImpl() = 0;
virtual Status PrepareJobImpl() = 0;
virtual Status ExecuteJobImpl() = 0;
virtual Status FinalizeJobImpl() = 0;
// Registers weak object to optimized code dependencies.
// TODO(turbofan): Move this to pipeline.cc once Crankshaft dies.
void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
private:
CompilationInfo* info_;
base::TimeDelta time_taken_to_create_graph_;
base::TimeDelta time_taken_to_optimize_;
base::TimeDelta time_taken_to_codegen_;
base::TimeDelta time_taken_to_prepare_;
base::TimeDelta time_taken_to_execute_;
base::TimeDelta time_taken_to_finalize_;
const char* compiler_name_;
Status last_status_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
return last_status_;
State state_;
MUST_USE_RESULT Status UpdateState(Status status, State next_state) {
if (status == SUCCEEDED) {
state_ = next_state;
} else {
state_ = State::kFailed;
}
return status;
}
};
......
......@@ -573,9 +573,9 @@ class PipelineCompilationJob final : public CompilationJob {
linkage_(nullptr) {}
protected:
Status CreateGraphImpl() final;
Status OptimizeGraphImpl() final;
Status GenerateCodeImpl() final;
Status PrepareJobImpl() final;
Status ExecuteJobImpl() final;
Status FinalizeJobImpl() final;
private:
Zone zone_;
......@@ -590,7 +590,7 @@ class PipelineCompilationJob final : public CompilationJob {
DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
};
PipelineCompilationJob::Status PipelineCompilationJob::CreateGraphImpl() {
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
if (info()->shared_info()->asm_function()) {
if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
info()->MarkAsFunctionContextSpecializing();
......@@ -633,12 +633,12 @@ PipelineCompilationJob::Status PipelineCompilationJob::CreateGraphImpl() {
return SUCCEEDED;
}
PipelineCompilationJob::Status PipelineCompilationJob::OptimizeGraphImpl() {
PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
return SUCCEEDED;
}
PipelineCompilationJob::Status PipelineCompilationJob::GenerateCodeImpl() {
PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
Handle<Code> code = pipeline_.GenerateCode(linkage_);
if (code.is_null()) {
if (info()->bailout_reason() == kNoReason) {
......@@ -660,16 +660,16 @@ class PipelineWasmCompilationJob final : public CompilationJob {
explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
CallDescriptor* descriptor,
SourcePositionTable* source_positions)
: CompilationJob(info, "TurboFan"),
: CompilationJob(info, "TurboFan", State::kReadyToExecute),
zone_pool_(info->isolate()->allocator()),
data_(&zone_pool_, info, graph, source_positions),
pipeline_(&data_),
linkage_(descriptor) {}
protected:
Status CreateGraphImpl() final;
Status OptimizeGraphImpl() final;
Status GenerateCodeImpl() final;
Status PrepareJobImpl() final;
Status ExecuteJobImpl() final;
Status FinalizeJobImpl() final;
private:
ZonePool zone_pool_;
......@@ -679,12 +679,13 @@ class PipelineWasmCompilationJob final : public CompilationJob {
};
PipelineWasmCompilationJob::Status
PipelineWasmCompilationJob::CreateGraphImpl() {
PipelineWasmCompilationJob::PrepareJobImpl() {
UNREACHABLE(); // Prepare should always be skipped for WasmCompilationJob.
return SUCCEEDED;
}
PipelineWasmCompilationJob::Status
PipelineWasmCompilationJob::OptimizeGraphImpl() {
PipelineWasmCompilationJob::ExecuteJobImpl() {
if (FLAG_trace_turbo) {
TurboJsonFile json_of(info(), std::ios_base::trunc);
json_of << "{\"function\":\"" << info()->GetDebugName().get()
......@@ -698,7 +699,7 @@ PipelineWasmCompilationJob::OptimizeGraphImpl() {
}
PipelineWasmCompilationJob::Status
PipelineWasmCompilationJob::GenerateCodeImpl() {
PipelineWasmCompilationJob::FinalizeJobImpl() {
pipeline_.GenerateCode(&linkage_);
return SUCCEEDED;
}
......
......@@ -3208,11 +3208,7 @@ void WasmCompilationUnit::ExecuteCompilation() {
}
job_.reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
descriptor, source_positions));
// The function name {OptimizeGraph()} is misleading but necessary because we
// want to use the CompilationJob interface. A better name would be
// ScheduleGraphAndSelectInstructions.
ok_ = job_->OptimizeGraph() == CompilationJob::SUCCEEDED;
ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(ahaas): The counters are not thread-safe at the moment.
// isolate_->counters()->wasm_compile_function_peak_memory_bytes()
......@@ -3244,7 +3240,7 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
return Handle<Code>::null();
}
if (job_->GenerateCode() != CompilationJob::SUCCEEDED) {
if (job_->FinalizeJob() != CompilationJob::SUCCEEDED) {
return Handle<Code>::null();
}
base::ElapsedTimer compile_timer;
......
......@@ -114,7 +114,7 @@ class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
#undef DEF_VISIT
};
HCompilationJob::Status HCompilationJob::CreateGraphImpl() {
HCompilationJob::Status HCompilationJob::PrepareJobImpl() {
if (!isolate()->use_crankshaft() ||
info()->shared_info()->dont_crankshaft()) {
// Crankshaft is entirely disabled.
......@@ -203,7 +203,7 @@ HCompilationJob::Status HCompilationJob::CreateGraphImpl() {
return SUCCEEDED;
}
HCompilationJob::Status HCompilationJob::OptimizeGraphImpl() {
HCompilationJob::Status HCompilationJob::ExecuteJobImpl() {
DCHECK(graph_ != NULL);
BailoutReason bailout_reason = kNoReason;
......@@ -217,7 +217,7 @@ HCompilationJob::Status HCompilationJob::OptimizeGraphImpl() {
return FAILED;
}
HCompilationJob::Status HCompilationJob::GenerateCodeImpl() {
HCompilationJob::Status HCompilationJob::FinalizeJobImpl() {
DCHECK(chunk_ != NULL);
DCHECK(graph_ != NULL);
{
......
......@@ -44,9 +44,9 @@ class HCompilationJob final : public CompilationJob {
chunk_(nullptr) {}
protected:
virtual Status CreateGraphImpl();
virtual Status OptimizeGraphImpl();
virtual Status GenerateCodeImpl();
virtual Status PrepareJobImpl();
virtual Status ExecuteJobImpl();
virtual Status FinalizeJobImpl();
private:
Zone zone_;
......
......@@ -54,8 +54,6 @@ class LCodeGenBase BASE_EMBEDDED {
int GetNextEmittedBlock() const;
void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
void WriteTranslationFrame(LEnvironment* environment,
Translation* translation);
int DefineDeoptimizationLiteral(Handle<Object> literal);
......
......@@ -686,20 +686,16 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
InitializeAstVisitor(info->isolate()->stack_guard()->real_climit());
}
Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(Isolate* isolate) {
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
// Create an inner HandleScope to avoid unnecessarily canonicalizing handles
// created as part of bytecode finalization.
HandleScope scope(isolate);
GenerateBytecode();
FinalizeBytecode(isolate);
AllocateDeferredConstants();
if (HasStackOverflow()) return Handle<BytecodeArray>();
return scope.CloseAndEscape(builder()->ToBytecodeArray(isolate));
}
void BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
void BytecodeGenerator::AllocateDeferredConstants() {
// Build global declaration pair arrays.
for (GlobalDeclarationsBuilder* globals_builder : global_declarations_) {
Handle<FixedArray> declarations =
......
......@@ -24,7 +24,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
explicit BytecodeGenerator(CompilationInfo* info);
Handle<BytecodeArray> MakeBytecode(Isolate* isolate);
void GenerateBytecode();
Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate);
#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
......@@ -52,9 +53,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
enum class TestFallthrough { kThen, kElse, kNone };
void GenerateBytecode();
void GenerateBytecodeBody();
void FinalizeBytecode(Isolate* isolate);
void AllocateDeferredConstants();
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
......
......@@ -30,6 +30,23 @@ typedef InterpreterAssembler::Arg Arg;
#define __ assembler->
class InterpreterCompilationJob final : public CompilationJob {
public:
explicit InterpreterCompilationJob(CompilationInfo* info);
protected:
Status PrepareJobImpl() final;
Status ExecuteJobImpl() final;
Status FinalizeJobImpl() final;
private:
BytecodeGenerator* generator() { return &generator_; }
BytecodeGenerator generator_;
DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
memset(dispatch_table_, 0, sizeof(dispatch_table_));
}
......@@ -132,6 +149,39 @@ int Interpreter::InterruptBudget() {
return FLAG_interrupt_budget * kCodeSizeMultiplier;
}
InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
: CompilationJob(info, "Ignition"), generator_(info) {}
InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
return SUCCEEDED;
}
InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
generator()->GenerateBytecode();
if (generator()->HasStackOverflow()) {
return FAILED;
}
return SUCCEEDED;
}
InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
Handle<BytecodeArray> bytecodes = generator()->FinalizeBytecode(isolate());
if (generator()->HasStackOverflow()) {
return FAILED;
}
if (FLAG_print_bytecode) {
OFStream os(stdout);
bytecodes->Print(os);
os << std::flush;
}
info()->SetBytecodeArray(bytecodes);
info()->SetCode(info()->isolate()->builtins()->InterpreterEntryTrampoline());
return SUCCEEDED;
}
bool Interpreter::MakeBytecode(CompilationInfo* info) {
RuntimeCallTimerScope runtimeTimer(info->isolate(),
&RuntimeCallStats::CompileIgnition);
......@@ -156,20 +206,10 @@ bool Interpreter::MakeBytecode(CompilationInfo* info) {
}
#endif // DEBUG
BytecodeGenerator generator(info);
Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info->isolate());
if (generator.HasStackOverflow()) return false;
if (FLAG_print_bytecode) {
OFStream os(stdout);
bytecodes->Print(os);
os << std::flush;
}
info->SetBytecodeArray(bytecodes);
info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline());
return true;
InterpreterCompilationJob job(info);
if (job.PrepareJob() != CompilationJob::SUCCEEDED) return false;
if (job.ExecuteJob() != CompilationJob::SUCCEEDED) return false;
return job.FinalizeJob() == CompilationJob::SUCCEEDED;
}
bool Interpreter::IsDispatchTableInitialized() {
......
......@@ -549,8 +549,8 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
Code::ComputeFlags(Code::WASM_FUNCTION));
std::unique_ptr<CompilationJob> job(Pipeline::NewWasmCompilationJob(
&info, graph(), desc, &source_position_table_));
if (job->OptimizeGraph() != CompilationJob::SUCCEEDED ||
job->GenerateCode() != CompilationJob::SUCCEEDED)
if (job->ExecuteJob() != CompilationJob::SUCCEEDED ||
job->FinalizeJob() != CompilationJob::SUCCEEDED)
return Handle<Code>::null();
Handle<Code> code = info.code();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment