Commit 2df876ce authored by Etienne Pierre-doray's avatar Etienne Pierre-doray Committed by Commit Bot

[Jobs]: Delete item-parallel-job.

Now that all users are migrated to Jobs API.

Bug: chromium:1196703
Change-Id: Ic48cce441c1793b1b33f0fc3d6a60847f2eefb2f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2810156
Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73838}
parent 3dcb0d77
......@@ -2588,7 +2588,6 @@ v8_header_set("v8_internal_headers") {
"src/heap/index-generator.h",
"src/heap/invalidated-slots-inl.h",
"src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.h",
"src/heap/large-spaces.h",
"src/heap/list.h",
"src/heap/local-allocator-inl.h",
......@@ -3700,7 +3699,6 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/incremental-marking.cc",
"src/heap/index-generator.cc",
"src/heap/invalidated-slots.cc",
"src/heap/item-parallel-job.cc",
"src/heap/large-spaces.cc",
"src/heap/local-factory.cc",
"src/heap/local-heap.cc",
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/item-parallel-job.h"
#include "src/base/platform/semaphore.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
namespace v8 {
namespace internal {
ItemParallelJob::Task::Task(Isolate* isolate) : CancelableTask(isolate) {}
void ItemParallelJob::Task::SetupInternal(base::Semaphore* on_finish,
std::vector<Item*>* items,
size_t start_index) {
on_finish_ = on_finish;
items_ = items;
if (start_index < items->size()) {
cur_index_ = start_index;
} else {
items_considered_ = items_->size();
}
}
void ItemParallelJob::Task::WillRunOnForeground() {
runner_ = Runner::kForeground;
}
void ItemParallelJob::Task::RunInternal() {
RunInParallel(runner_);
on_finish_->Signal();
}
ItemParallelJob::ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
base::Semaphore* pending_tasks)
: cancelable_task_manager_(cancelable_task_manager),
pending_tasks_(pending_tasks) {}
ItemParallelJob::~ItemParallelJob() {
for (size_t i = 0; i < items_.size(); i++) {
Item* item = items_[i];
CHECK(item->IsFinished());
delete item;
}
}
void ItemParallelJob::Run() {
DCHECK_GT(tasks_.size(), 0);
const size_t num_items = items_.size();
const size_t num_tasks = tasks_.size();
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ItemParallelJob::Run", TRACE_EVENT_SCOPE_THREAD,
"num_tasks", static_cast<int>(num_tasks), "num_items",
static_cast<int>(num_items));
// Some jobs have more tasks than items (when the items are mere coarse
// grain tasks that generate work dynamically for a second phase which all
// tasks participate in). Some jobs even have 0 items to preprocess but
// still have multiple tasks.
// TODO(gab): Figure out a cleaner scheme for this.
const size_t num_tasks_processing_items = std::min(num_items, tasks_.size());
// In the event of an uneven workload, distribute an extra item to the first
// |items_remainder| tasks.
const size_t items_remainder = num_tasks_processing_items > 0
? num_items % num_tasks_processing_items
: 0;
// Base |items_per_task|, will be bumped by 1 for the first
// |items_remainder| tasks.
const size_t items_per_task = num_tasks_processing_items > 0
? num_items / num_tasks_processing_items
: 0;
CancelableTaskManager::Id* task_ids =
new CancelableTaskManager::Id[num_tasks];
std::unique_ptr<Task> main_task;
for (size_t i = 0, start_index = 0; i < num_tasks;
i++, start_index += items_per_task + (i < items_remainder ? 1 : 0)) {
auto task = std::move(tasks_[i]);
DCHECK(task);
// By definition there are less |items_remainder| to distribute then
// there are tasks processing items so this cannot overflow while we are
// assigning work items.
DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items);
task->SetupInternal(pending_tasks_, &items_, start_index);
task_ids[i] = task->id();
if (i > 0) {
V8::GetCurrentPlatform()->CallBlockingTaskOnWorkerThread(std::move(task));
} else {
main_task = std::move(task);
}
}
// Contribute on main thread.
DCHECK(main_task);
main_task->WillRunOnForeground();
main_task->Run();
// Wait for background tasks.
for (size_t i = 0; i < num_tasks; i++) {
if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
TryAbortResult::kTaskAborted) {
pending_tasks_->Wait();
}
}
delete[] task_ids;
}
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_ITEM_PARALLEL_JOB_H_
#define V8_HEAP_ITEM_PARALLEL_JOB_H_
#include <memory>
#include <vector>
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace base {
class Semaphore;
} // namespace base
namespace internal {
class Counters;
class Isolate;
// This class manages background tasks that process a set of items in parallel.
// The first task added is executed on the same thread as |job.Run()| is called.
// All other tasks are scheduled in the background.
//
// - Items need to inherit from ItemParallelJob::Item.
// - Tasks need to inherit from ItemParallelJob::Task.
//
// Items need to be marked as finished after processing them. Task and Item
// ownership is transferred to the job.
class V8_EXPORT_PRIVATE ItemParallelJob {
public:
class Task;
class V8_EXPORT_PRIVATE Item {
public:
Item() = default;
virtual ~Item() = default;
Item(const Item&) = delete;
Item& operator=(const Item&) = delete;
// Marks an item as being finished.
void MarkFinished() { CHECK_EQ(kProcessing, state_.exchange(kFinished)); }
private:
enum ProcessingState : uintptr_t { kAvailable, kProcessing, kFinished };
bool TryMarkingAsProcessing() {
ProcessingState available = kAvailable;
return state_.compare_exchange_strong(available, kProcessing);
}
bool IsFinished() { return state_ == kFinished; }
std::atomic<ProcessingState> state_{kAvailable};
friend class ItemParallelJob;
friend class ItemParallelJob::Task;
};
class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
enum class Runner { kForeground, kBackground };
explicit Task(Isolate* isolate);
~Task() override = default;
Task(const Task&) = delete;
Task& operator=(const Task&) = delete;
virtual void RunInParallel(Runner runner) = 0;
protected:
// Retrieves a new item that needs to be processed. Returns |nullptr| if
// all items are processed. Upon returning an item, the task is required
// to process the item and mark the item as finished after doing so.
template <class ItemType>
ItemType* GetItem() {
while (items_considered_++ != items_->size()) {
// Wrap around.
if (cur_index_ == items_->size()) {
cur_index_ = 0;
}
Item* item = (*items_)[cur_index_++];
if (item->TryMarkingAsProcessing()) {
return static_cast<ItemType*>(item);
}
}
return nullptr;
}
private:
friend class ItemParallelJob;
friend class Item;
// Sets up state required before invoking Run(). If
// |start_index is >= items_.size()|, this task will not process work items
// (some jobs have more tasks than work items in order to parallelize post-
// processing, e.g. scavenging).
void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
size_t start_index);
void WillRunOnForeground();
// We don't allow overriding this method any further.
void RunInternal() final;
std::vector<Item*>* items_ = nullptr;
size_t cur_index_ = 0;
size_t items_considered_ = 0;
Runner runner_ = Runner::kBackground;
base::Semaphore* on_finish_ = nullptr;
};
ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
base::Semaphore* pending_tasks);
~ItemParallelJob();
ItemParallelJob(const ItemParallelJob&) = delete;
ItemParallelJob& operator=(const ItemParallelJob&) = delete;
// Adds a task to the job. Transfers ownership to the job.
void AddTask(Task* task) { tasks_.push_back(std::unique_ptr<Task>(task)); }
// Adds an item to the job. Transfers ownership to the job.
void AddItem(Item* item) { items_.push_back(item); }
int NumberOfItems() const { return static_cast<int>(items_.size()); }
int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
// Runs this job.
void Run();
private:
std::vector<Item*> items_;
std::vector<std::unique_ptr<Task>> tasks_;
CancelableTaskManager* cancelable_task_manager_;
base::Semaphore* pending_tasks_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_ITEM_PARALLEL_JOB_H_
......@@ -23,7 +23,6 @@
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/large-spaces.h"
#include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h"
......
......@@ -9,7 +9,6 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/objects-visiting-inl.h"
......
......@@ -311,7 +311,6 @@ v8_source_set("unittests_sources") {
"heap/heap-utils.cc",
"heap/heap-utils.h",
"heap/index-generator-unittest.cc",
"heap/item-parallel-job-unittest.cc",
"heap/list-unittest.cc",
"heap/local-factory-unittest.cc",
"heap/local-heap-unittest.cc",
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/item-parallel-job.h"
#include "src/execution/isolate.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
class ItemParallelJobTest : public TestWithIsolate {
public:
ItemParallelJobTest() : parallel_job_semaphore_(0) {}
ItemParallelJobTest(const ItemParallelJobTest&) = delete;
ItemParallelJobTest& operator=(const ItemParallelJobTest&) = delete;
base::Semaphore* parallel_job_semaphore() { return &parallel_job_semaphore_; }
private:
base::Semaphore parallel_job_semaphore_;
};
namespace {
class SimpleTask : public ItemParallelJob::Task {
public:
SimpleTask(Isolate* isolate, bool* did_run)
: ItemParallelJob::Task(isolate), did_run_(did_run) {}
void RunInParallel(Runner runner) override {
ItemParallelJob::Item* item = nullptr;
while ((item = GetItem<ItemParallelJob::Item>()) != nullptr) {
item->MarkFinished();
}
*did_run_ = true;
}
private:
bool* did_run_;
};
// A simple work item which sets |was_processed| to true, if non-null, when it
// is processed.
class SimpleItem : public ItemParallelJob::Item {
public:
explicit SimpleItem(bool* was_processed = nullptr)
: ItemParallelJob::Item(), was_processed_(was_processed) {}
void Process() {
if (was_processed_) *was_processed_ = true;
}
private:
bool* was_processed_;
};
class EagerTask : public ItemParallelJob::Task {
public:
explicit EagerTask(Isolate* isolate) : ItemParallelJob::Task(isolate) {}
void RunInParallel(Runner runner) override {
SimpleItem* item = nullptr;
while ((item = GetItem<SimpleItem>()) != nullptr) {
item->Process();
item->MarkFinished();
}
}
};
// A OneShotBarrier is meant to be passed to |counter| users. Users should
// either Signal() or Wait() when done (based on whether they want to return
// immediately or wait until others are also done).
class OneShotBarrier {
public:
explicit OneShotBarrier(size_t counter) : counter_(counter) {
DCHECK_GE(counter_, 0);
}
void Wait() {
DCHECK_NE(counter_, 0);
mutex_.Lock();
counter_--;
if (counter_ == 0) {
condition_.NotifyAll();
} else {
while (counter_ > 0) {
condition_.Wait(&mutex_);
}
}
mutex_.Unlock();
}
void Signal() {
mutex_.Lock();
counter_--;
if (counter_ == 0) {
condition_.NotifyAll();
}
mutex_.Unlock();
}
private:
base::Mutex mutex_;
base::ConditionVariable condition_;
size_t counter_;
};
// A task that only processes a single item. Signals |barrier| when done; if
// |wait_when_done|, will blocks until all other tasks have signaled |barrier|.
// If |did_process_an_item| is non-null, will set it to true if it does process
// an item. Otherwise, it will expect to get an item to process (and will report
// a failure if it doesn't).
class TaskProcessingOneItem : public ItemParallelJob::Task {
public:
TaskProcessingOneItem(Isolate* isolate, OneShotBarrier* barrier,
bool wait_when_done,
bool* did_process_an_item = nullptr)
: ItemParallelJob::Task(isolate),
barrier_(barrier),
wait_when_done_(wait_when_done),
did_process_an_item_(did_process_an_item) {}
void RunInParallel(Runner runner) override {
SimpleItem* item = GetItem<SimpleItem>();
if (did_process_an_item_) {
*did_process_an_item_ = item != nullptr;
} else {
EXPECT_NE(nullptr, item);
}
if (item) {
item->Process();
item->MarkFinished();
}
if (wait_when_done_) {
barrier_->Wait();
} else {
barrier_->Signal();
}
}
private:
OneShotBarrier* barrier_;
bool wait_when_done_;
bool* did_process_an_item_;
};
class TaskForDifferentItems;
class BaseItem : public ItemParallelJob::Item {
public:
~BaseItem() override = default;
virtual void ProcessItem(TaskForDifferentItems* task) = 0;
};
class TaskForDifferentItems : public ItemParallelJob::Task {
public:
explicit TaskForDifferentItems(Isolate* isolate, bool* processed_a,
bool* processed_b)
: ItemParallelJob::Task(isolate),
processed_a_(processed_a),
processed_b_(processed_b) {}
~TaskForDifferentItems() override = default;
void RunInParallel(Runner runner) override {
BaseItem* item = nullptr;
while ((item = GetItem<BaseItem>()) != nullptr) {
item->ProcessItem(this);
item->MarkFinished();
}
}
void ProcessA() { *processed_a_ = true; }
void ProcessB() { *processed_b_ = true; }
private:
bool* processed_a_;
bool* processed_b_;
};
class ItemA : public BaseItem {
public:
~ItemA() override = default;
void ProcessItem(TaskForDifferentItems* task) override { task->ProcessA(); }
};
class ItemB : public BaseItem {
public:
~ItemB() override = default;
void ProcessItem(TaskForDifferentItems* task) override { task->ProcessB(); }
};
} // namespace
// ItemParallelJob runs tasks even without work items (as requested tasks may be
// responsible for post-processing).
TEST_F(ItemParallelJobTest, SimpleTaskWithNoItemsRuns) {
bool did_run = false;
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
parallel_job_semaphore());
job.AddTask(new SimpleTask(i_isolate(), &did_run));
job.Run();
EXPECT_TRUE(did_run);
}
TEST_F(ItemParallelJobTest, SimpleTaskWithSimpleItemRuns) {
bool did_run = false;
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
parallel_job_semaphore());
job.AddTask(new SimpleTask(i_isolate(), &did_run));
job.AddItem(new ItemParallelJob::Item);
job.Run();
EXPECT_TRUE(did_run);
}
TEST_F(ItemParallelJobTest, MoreTasksThanItems) {
const int kNumTasks = 128;
const int kNumItems = kNumTasks - 4;
TaskProcessingOneItem* tasks[kNumTasks] = {};
bool did_process_an_item[kNumTasks] = {};
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
parallel_job_semaphore());
// The barrier ensures that all tasks run. But only the first kNumItems tasks
// should be assigned an item to execute.
OneShotBarrier barrier(kNumTasks);
for (int i = 0; i < kNumTasks; i++) {
// Block the main thread when done to prevent it from returning control to
// the job (which could cancel tasks that have yet to be scheduled).
const bool wait_when_done = i == 0;
tasks[i] = new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done,
&did_process_an_item[i]);
job.AddTask(tasks[i]);
}
for (int i = 0; i < kNumItems; i++) {
job.AddItem(new SimpleItem);
}
job.Run();
for (int i = 0; i < kNumTasks; i++) {
// Only the first kNumItems tasks should have been assigned a work item.
EXPECT_EQ(i < kNumItems, did_process_an_item[i]);
}
}
TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
const int kItems = 111;
bool was_processed[kItems] = {};
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
parallel_job_semaphore());
job.AddTask(new EagerTask(i_isolate()));
for (int i = 0; i < kItems; i++) {
job.AddItem(new SimpleItem(&was_processed[i]));
}
job.Run();
for (int i = 0; i < kItems; i++) {
EXPECT_TRUE(was_processed[i]);
}
}
TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
const int kItemsAndTasks = 256;
bool was_processed[kItemsAndTasks] = {};
OneShotBarrier barrier(kItemsAndTasks);
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
parallel_job_semaphore());
for (int i = 0; i < kItemsAndTasks; i++) {
job.AddItem(new SimpleItem(&was_processed[i]));
// Block the main thread when done to prevent it from returning control to
// the job (which could cancel tasks that have yet to be scheduled).
const bool wait_when_done = i == 0;
job.AddTask(
new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done));
}
job.Run();
for (int i = 0; i < kItemsAndTasks; i++) {
EXPECT_TRUE(was_processed[i]);
}
}
TEST_F(ItemParallelJobTest, DifferentItems) {
bool item_a = false;
bool item_b = false;
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
parallel_job_semaphore());
job.AddItem(new ItemA());
job.AddItem(new ItemB());
job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b));
job.Run();
EXPECT_TRUE(item_a);
EXPECT_TRUE(item_b);
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment