Commit 3b9091c8 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[compiler-dispatcher] Move Job pointer to SFI

Reduce the enqueuing cost of compiler-dispatcher jobs by getting rid of
the sets and hashmaps, and instead:

  1. Turning the pending job set into a queue, and
  2. Making the SharedFunctionInfo's UncompiledData hold a pointer to
     the LazyCompilerDispatcher::Job, instead of maintaining an
     IdentityMap from one to the other.

To avoid bloating all UncompiledData, this adds two new UncompiledData
subclasses, making it four subclasses total, for with/without Preparse
data and with/without a Job pointer. "should_parallel_compile"
FunctionLiterals get allocated an UncompiledData with a job pointer by
default, otherwise enqueueing a SFI without a job pointer triggers a
reallocation of the UncompiledData to add a job pointer.

Since there is no longer a set of all Jobs (aside from one for
debug-only), we need to be careful to manually clear the Job pointer
from the UncompiledData whenever we finish a Job (whether successfully
or by aborting) and we have to make sure that we implicitly can reach
all Jobs via the pending/finalizable lists, or the set of currently
running jobs.

Change-Id: I3aae78e6dfbdc74f5f7c1411de398433907b2705
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3314833Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78302}
parent 657e5dc1
......@@ -14458,6 +14458,8 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
CODET_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
#if V8_ENABLE_WEBASSEMBLY
WASM_CAPI_FUNCTION_DATA_TYPE,
......@@ -14469,16 +14471,17 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
Label check_is_bytecode_array(this);
Label check_is_baseline_data(this);
Label check_is_asm_wasm_data(this);
Label check_is_uncompiled_data_without_preparse_data(this);
Label check_is_uncompiled_data_with_preparse_data(this);
Label check_is_uncompiled_data(this);
Label check_is_function_template_info(this);
Label check_is_interpreter_data(this);
Label check_is_wasm_function_data(this);
Label* case_labels[] = {
&check_is_bytecode_array,
&check_is_baseline_data,
&check_is_uncompiled_data_without_preparse_data,
&check_is_uncompiled_data_with_preparse_data,
&check_is_uncompiled_data,
&check_is_uncompiled_data,
&check_is_uncompiled_data,
&check_is_uncompiled_data,
&check_is_function_template_info,
#if V8_ENABLE_WEBASSEMBLY
&check_is_wasm_function_data,
......@@ -14506,9 +14509,7 @@ TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsUncompiledDataWithPreparseData | IsUncompiledDataWithoutPreparseData:
// Compile lazy
BIND(&check_is_uncompiled_data_with_preparse_data);
Goto(&check_is_uncompiled_data_without_preparse_data);
BIND(&check_is_uncompiled_data_without_preparse_data);
BIND(&check_is_uncompiled_data);
sfi_code = HeapConstant(BUILTIN_CODET(isolate(), CompileLazy));
Goto(if_compile_lazy ? if_compile_lazy : &done);
......
......@@ -1648,6 +1648,12 @@ bool BackgroundCompileTask::FinalizeFunction(
Handle<SharedFunctionInfo> input_shared_info =
input_shared_info_.ToHandleChecked();
// The UncompiledData on the input SharedFunctionInfo will have a pointer to
// the LazyCompileDispatcher Job that launched this task, which will now be
// considered complete, so clear that regardless of whether the finalize
// succeeds or not.
input_shared_info->ClearUncompiledDataJobPointer();
// We might not have been able to finalize all jobs on the background
// thread (e.g. asm.js jobs), so finalize those deferred jobs now.
if (FinalizeDeferredUnoptimizedCompilationJobs(
......@@ -1675,6 +1681,14 @@ bool BackgroundCompileTask::FinalizeFunction(
return true;
}
void BackgroundCompileTask::AbortFunction() {
// The UncompiledData on the input SharedFunctionInfo will have a pointer to
// the LazyCompileDispatcher Job that launched this task, which is about to be
// deleted, so clear that to avoid the SharedFunctionInfo from pointing to
// deallocated memory.
input_shared_info_.ToHandleChecked()->ClearUncompiledDataJobPointer();
}
void BackgroundCompileTask::ReportStatistics(Isolate* isolate) {
// Update use-counts.
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
......
......@@ -525,12 +525,16 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
bool FinalizeFunction(Isolate* isolate, Compiler::ClearExceptionFlag flag);
void AbortFunction();
UnoptimizedCompileFlags flags() const { return flags_; }
LanguageMode language_mode() const { return language_mode_; }
private:
void ReportStatistics(Isolate* isolate);
void ClearFunctionJobPointer();
// Data needed for parsing and compilation. These need to be initialized
// before the compilation starts.
Isolate* isolate_for_local_isolate_;
......
......@@ -115,13 +115,29 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
struct Job {
enum class State {
// Background thread states (Enqueue + DoBackgroundWork)
// ---
// In the pending task queue.
kPending,
// Currently running on a background thread.
kRunning,
kAbortRequested, // ... but we want to drop the result.
// In the finalizable task queue.
kReadyToFinalize,
kFinalized,
kAbortRequested,
kAborted,
kPendingToRunOnForeground
// Main thread states (FinishNow and FinalizeSingleJob)
// ---
// Popped off the pending task queue.
kPendingToRunOnForeground,
// Popped off the finalizable task queue.
kFinalizingNow,
kAbortingNow, // ... and we want to abort
// Finished finalizing, ready for deletion.
kFinalized,
};
explicit Job(std::unique_ptr<BackgroundCompileTask> task);
......@@ -140,13 +156,26 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
void WaitForJobIfRunningOnBackground(Job* job, const base::MutexGuard&);
Job* GetJobFor(Handle<SharedFunctionInfo> shared,
const base::MutexGuard&) const;
std::tuple<SharedFunctionInfo, Job*> GetSingleFinalizableJob(
const base::MutexGuard&);
Job* PopSingleFinalizeJob();
void ScheduleIdleTaskFromAnyThread(const base::MutexGuard&);
bool FinalizeSingleJob();
void DoBackgroundWork(JobDelegate* delegate);
void DoIdleWork(double deadline_in_seconds);
// DeleteJob without the mutex held.
void DeleteJob(Job* job);
// DeleteJob with the mutex already held.
void DeleteJob(Job* job, const base::MutexGuard&);
void NotifyAddedBackgroundJob(const base::MutexGuard& lock) {
++num_jobs_for_background_;
VerifyBackgroundTaskCount(lock);
}
void NotifyRemovedBackgroundJob(const base::MutexGuard& lock) {
--num_jobs_for_background_;
VerifyBackgroundTaskCount(lock);
}
#ifdef DEBUG
void VerifyBackgroundTaskCount(const base::MutexGuard&);
#else
......@@ -171,20 +200,25 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
// the mutex |mutex_| while accessing them.
mutable base::Mutex mutex_;
// Mapping from SharedFunctionInfo to the corresponding unoptimized
// compilation job.
SharedToJobMap shared_to_unoptimized_job_;
// True if an idle task is scheduled to be run.
bool idle_task_scheduled_;
// The set of jobs that can be run on a background thread.
std::unordered_set<Job*> pending_background_jobs_;
std::vector<Job*> pending_background_jobs_;
// The set of jobs that can be finalized on the main thread.
std::vector<Job*> finalizable_jobs_;
// The total number of jobs ready to execute on background, both those pending
// and those currently running.
std::atomic<size_t> num_jobs_for_background_;
#ifdef DEBUG
// The set of all allocated jobs, used for verification of the various queues
// and counts.
std::unordered_set<Job*> all_jobs_;
#endif
// If not nullptr, then the main thread waits for the task processing
// this job, and blocks on the ConditionVariable main_thread_blocking_signal_.
Job* main_thread_blocking_on_job_;
......
......@@ -357,6 +357,29 @@ FactoryBase<Impl>::NewUncompiledDataWithPreparseData(
AllocationType::kOld);
}
template <typename Impl>
Handle<UncompiledDataWithoutPreparseDataWithJob>
FactoryBase<Impl>::NewUncompiledDataWithoutPreparseDataWithJob(
Handle<String> inferred_name, int32_t start_position,
int32_t end_position) {
return TorqueGeneratedFactory<
Impl>::NewUncompiledDataWithoutPreparseDataWithJob(inferred_name,
start_position,
end_position,
kNullAddress,
AllocationType::kOld);
}
template <typename Impl>
Handle<UncompiledDataWithPreparseDataAndJob>
FactoryBase<Impl>::NewUncompiledDataWithPreparseDataAndJob(
Handle<String> inferred_name, int32_t start_position, int32_t end_position,
Handle<PreparseData> preparse_data) {
return TorqueGeneratedFactory<Impl>::NewUncompiledDataWithPreparseDataAndJob(
inferred_name, start_position, end_position, preparse_data, kNullAddress,
AllocationType::kOld);
}
template <typename Impl>
Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
MaybeHandle<String> maybe_name, MaybeHandle<HeapObject> maybe_function_data,
......
......@@ -178,6 +178,17 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<String> inferred_name, int32_t start_position,
int32_t end_position, Handle<PreparseData>);
Handle<UncompiledDataWithoutPreparseDataWithJob>
NewUncompiledDataWithoutPreparseDataWithJob(Handle<String> inferred_name,
int32_t start_position,
int32_t end_position);
Handle<UncompiledDataWithPreparseDataAndJob>
NewUncompiledDataWithPreparseDataAndJob(Handle<String> inferred_name,
int32_t start_position,
int32_t end_position,
Handle<PreparseData>);
// Allocates a FeedbackMedata object and zeroes the data section.
Handle<FeedbackMetadata> NewFeedbackMetadata(
int slot_count, int create_closure_slot_count,
......
......@@ -230,6 +230,8 @@ class ZoneForwardList;
V(UncompiledData) \
V(UncompiledDataWithPreparseData) \
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseDataAndJob) \
V(UncompiledDataWithoutPreparseDataWithJob) \
V(Undetectable) \
V(UniqueName) \
IF_WASM(V, WasmApiFunctionRef) \
......
......@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
......@@ -92,6 +93,8 @@ void PreparseData::set_child(int index, PreparseData value,
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseDataWithJob)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseDataAndJob)
TQ_OBJECT_CONSTRUCTORS_IMPL(InterpreterData)
TQ_OBJECT_CONSTRUCTORS_IMPL(SharedFunctionInfo)
......@@ -791,6 +794,17 @@ bool SharedFunctionInfo::HasUncompiledDataWithoutPreparseData() const {
return function_data(kAcquireLoad).IsUncompiledDataWithoutPreparseData();
}
void SharedFunctionInfo::ClearUncompiledDataJobPointer() {
UncompiledData uncompiled_data = this->uncompiled_data();
if (uncompiled_data.IsUncompiledDataWithPreparseDataAndJob()) {
UncompiledDataWithPreparseDataAndJob::cast(uncompiled_data)
.set_job(kNullAddress);
} else if (uncompiled_data.IsUncompiledDataWithoutPreparseDataWithJob()) {
UncompiledDataWithoutPreparseDataWithJob::cast(uncompiled_data)
.set_job(kNullAddress);
}
}
void SharedFunctionInfo::ClearPreparseData() {
DCHECK(HasUncompiledDataWithPreparseData());
UncompiledDataWithPreparseData data = uncompiled_data_with_preparse_data();
......
......@@ -549,14 +549,26 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
if (scope_data != nullptr) {
Handle<PreparseData> preparse_data = scope_data->Serialize(isolate);
if (lit->should_parallel_compile()) {
data = isolate->factory()->NewUncompiledDataWithPreparseDataAndJob(
lit->GetInferredName(isolate), lit->start_position(),
lit->end_position(), preparse_data);
} else {
data = isolate->factory()->NewUncompiledDataWithPreparseData(
lit->GetInferredName(isolate), lit->start_position(),
lit->end_position(), preparse_data);
}
} else {
if (lit->should_parallel_compile()) {
data = isolate->factory()->NewUncompiledDataWithoutPreparseDataWithJob(
lit->GetInferredName(isolate), lit->start_position(),
lit->end_position());
} else {
data = isolate->factory()->NewUncompiledDataWithoutPreparseData(
lit->GetInferredName(isolate), lit->start_position(),
lit->end_position());
}
}
shared_info->set_uncompiled_data(*data);
}
......
......@@ -145,6 +145,31 @@ class UncompiledDataWithPreparseData
TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData)
};
// Class representing data for an uncompiled function that does not have any
// data from the pre-parser, either because it's a leaf function or because the
// pre-parser bailed out, but has a job pointer.
class UncompiledDataWithoutPreparseDataWithJob
: public TorqueGeneratedUncompiledDataWithoutPreparseDataWithJob<
UncompiledDataWithoutPreparseDataWithJob,
UncompiledDataWithoutPreparseData> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithoutPreparseDataWithJob)
};
// Class representing data for an uncompiled function that has pre-parsed scope
// data and a job pointer.
class UncompiledDataWithPreparseDataAndJob
: public TorqueGeneratedUncompiledDataWithPreparseDataAndJob<
UncompiledDataWithPreparseDataAndJob,
UncompiledDataWithPreparseData> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseDataAndJob)
};
class InterpreterData
: public TorqueGeneratedInterpreterData<InterpreterData, Struct> {
public:
......@@ -351,6 +376,7 @@ class SharedFunctionInfo
inline void set_uncompiled_data_with_preparse_data(
UncompiledDataWithPreparseData data);
inline bool HasUncompiledDataWithoutPreparseData() const;
inline void ClearUncompiledDataJobPointer();
// Clear out pre-parsed scope data from UncompiledDataWithPreparseData,
// turning it into UncompiledDataWithoutPreparseData.
......
......@@ -135,6 +135,24 @@ extern class UncompiledDataWithPreparseData extends UncompiledData {
preparse_data: PreparseData;
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class UncompiledDataWithoutPreparseDataWithJob extends
UncompiledDataWithoutPreparseData {
// TODO(v8:10391): Define the field as ExternalPointer or move jobs into cage.
job: RawPtr;
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class UncompiledDataWithPreparseDataAndJob extends
UncompiledDataWithPreparseData {
// TODO(v8:10391): Define the field as ExternalPointer or move jobs into cage.
job: RawPtr;
}
@export
class OnHeapBasicBlockProfilerData extends HeapObject {
block_ids: ByteArray; // Stored as 4-byte ints
......
......@@ -201,6 +201,25 @@ void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
return;
}
if (obj->IsUncompiledDataWithoutPreparseDataWithJob()) {
Handle<UncompiledDataWithoutPreparseDataWithJob> data =
Handle<UncompiledDataWithoutPreparseDataWithJob>::cast(obj);
Address job = data->job();
data->set_job(kNullAddress);
SerializeGeneric(data);
data->set_job(job);
return;
}
if (obj->IsUncompiledDataWithPreparseDataAndJob()) {
Handle<UncompiledDataWithPreparseDataAndJob> data =
Handle<UncompiledDataWithPreparseDataAndJob>::cast(obj);
Address job = data->job();
data->set_job(kNullAddress);
SerializeGeneric(data);
data->set_job(job);
return;
}
// NOTE(mmarchini): If we try to serialize an InterpreterData our process
// will crash since it stores a code object. Instead, we serialize the
// bytecode array stored within the InterpreterData, which is the important
......
......@@ -25,6 +25,12 @@
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
#ifdef DEBUG
#define DEBUG_ASSERT_EQ ASSERT_EQ
#else
#define DEBUG_ASSERT_EQ(...)
#endif
namespace v8 {
namespace internal {
......@@ -397,11 +403,17 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskNoIdleTime) {
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 0u);
// Run compile steps.
platform.RunJobTasksAndBlock(V8::GetCurrentPlatform());
// Job should be ready to finalize.
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 0u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 1u);
ASSERT_EQ(
dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
......@@ -415,7 +427,8 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskNoIdleTime) {
ASSERT_TRUE(platform.IdleTaskPending());
// Job should be ready to finalize.
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 0u);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 1u);
ASSERT_EQ(
dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
......@@ -444,11 +457,17 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskSmallIdleTime) {
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_1);
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 2u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 2u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 0u);
// Run compile steps.
platform.RunJobTasksAndBlock(V8::GetCurrentPlatform());
// Both jobs should be ready to finalize.
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 2);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 2u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 0u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 2u);
ASSERT_EQ(
dispatcher.GetJobFor(shared_1, base::MutexGuard(&dispatcher.mutex_))
->state,
......@@ -464,7 +483,9 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskSmallIdleTime) {
platform.RunIdleTask(2.0, 1.0);
// Only one of the jobs should be finalized.
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 0u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 1u);
if (dispatcher.IsEnqueued(shared_1)) {
ASSERT_EQ(
dispatcher.GetJobFor(shared_1, base::MutexGuard(&dispatcher.mutex_))
......@@ -532,7 +553,9 @@ TEST_F(LazyCompileDispatcherTest, FinishNowWithWorkerTask) {
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 0u);
ASSERT_NE(
dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
......@@ -544,6 +567,7 @@ TEST_F(LazyCompileDispatcherTest, FinishNowWithWorkerTask) {
ASSERT_TRUE(dispatcher.FinishNow(shared));
// Finishing removes the SFI from the queue.
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 0u);
ASSERT_TRUE(shared->is_compiled());
if (platform.IdleTaskPending()) platform.ClearIdleTask();
ASSERT_FALSE(platform.JobTaskPending());
......@@ -620,7 +644,9 @@ TEST_F(LazyCompileDispatcherTest, AbortJobNotStarted) {
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 0u);
ASSERT_NE(
dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
......@@ -645,7 +671,9 @@ TEST_F(LazyCompileDispatcherTest, AbortJobAlreadyStarted) {
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 0u);
ASSERT_NE(
dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
......@@ -674,7 +702,9 @@ TEST_F(LazyCompileDispatcherTest, AbortJobAlreadyStarted) {
// Job should have finished running and then been aborted.
ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 1u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 0u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 1u);
ASSERT_EQ(
dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kAborted);
......@@ -760,7 +790,9 @@ TEST_F(LazyCompileDispatcherTest, CompileMultipleOnBackgroundThread) {
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 2);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 2u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 2u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 0u);
ASSERT_NE(
dispatcher.GetJobFor(shared_1, base::MutexGuard(&dispatcher.mutex_))
->state,
......@@ -781,7 +813,9 @@ TEST_F(LazyCompileDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_FALSE(platform.JobTaskPending());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 2);
DEBUG_ASSERT_EQ(dispatcher.all_jobs_.size(), 2u);
ASSERT_EQ(dispatcher.pending_background_jobs_.size(), 0u);
ASSERT_EQ(dispatcher.finalizable_jobs_.size(), 2u);
ASSERT_EQ(
dispatcher.GetJobFor(shared_1, base::MutexGuard(&dispatcher.mutex_))
->state,
......
......@@ -43,7 +43,7 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
shared->set_function_literal_id(function_literal_id);
// Ensure that the function can be compiled lazily.
shared->set_uncompiled_data(
*isolate->factory()->NewUncompiledDataWithoutPreparseData(
*isolate->factory()->NewUncompiledDataWithoutPreparseDataWithJob(
ReadOnlyRoots(isolate).empty_string_handle(), 0, source->length()));
// Make sure we have an outer scope info, even though it's empty
shared->set_raw_outer_scope_info_or_feedback_metadata(
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment