Commit 63c4be59 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

Remove V8.GC.ParallelTaskLatencyMicroSeconds histogram

The histogram is not used anymore. Remove to safe resources.

Bug: chromium:926072
Change-Id: I929f34f7ab0613431eaf9740f3342b6b2cec6cbd
Reviewed-on: https://chromium-review.googlesource.com/c/1477672Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59684}
parent d74840b7
...@@ -1328,9 +1328,7 @@ class RuntimeCallTimerScope { ...@@ -1328,9 +1328,7 @@ class RuntimeCallTimerScope {
HT(compile_script_on_background, \ HT(compile_script_on_background, \
V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \ V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
HT(compile_function_on_background, \ HT(compile_function_on_background, \
V8.CompileFunctionMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \ V8.CompileFunctionMicroSeconds.BackgroundThread, 1000000, MICROSECOND)
HT(gc_parallel_task_latency, V8.GC.ParallelTaskLatencyMicroSeconds, 1000000, \
MICROSECOND)
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \ #define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
AHT(compile_lazy, V8.CompileLazyMicroSeconds) AHT(compile_lazy, V8.CompileLazyMicroSeconds)
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "src/heap/item-parallel-job.h" #include "src/heap/item-parallel-job.h"
#include "src/base/platform/semaphore.h" #include "src/base/platform/semaphore.h"
#include "src/counters.h"
#include "src/v8.h" #include "src/v8.h"
namespace v8 { namespace v8 {
...@@ -12,16 +13,9 @@ namespace internal { ...@@ -12,16 +13,9 @@ namespace internal {
ItemParallelJob::Task::Task(Isolate* isolate) : CancelableTask(isolate) {} ItemParallelJob::Task::Task(Isolate* isolate) : CancelableTask(isolate) {}
ItemParallelJob::Task::~Task() { void ItemParallelJob::Task::SetupInternal(base::Semaphore* on_finish,
// The histogram is reset in RunInternal(). If it's still around it means std::vector<Item*>* items,
// this task was cancelled before being scheduled. size_t start_index) {
if (gc_parallel_task_latency_histogram_)
gc_parallel_task_latency_histogram_->RecordAbandon();
}
void ItemParallelJob::Task::SetupInternal(
base::Semaphore* on_finish, std::vector<Item*>* items, size_t start_index,
base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram) {
on_finish_ = on_finish; on_finish_ = on_finish;
items_ = items; items_ = items;
...@@ -30,17 +24,9 @@ void ItemParallelJob::Task::SetupInternal( ...@@ -30,17 +24,9 @@ void ItemParallelJob::Task::SetupInternal(
} else { } else {
items_considered_ = items_->size(); items_considered_ = items_->size();
} }
gc_parallel_task_latency_histogram_ =
std::move(gc_parallel_task_latency_histogram);
} }
void ItemParallelJob::Task::RunInternal() { void ItemParallelJob::Task::RunInternal() {
if (gc_parallel_task_latency_histogram_) {
gc_parallel_task_latency_histogram_->RecordDone();
gc_parallel_task_latency_histogram_.reset();
}
RunInParallel(); RunInParallel();
on_finish_->Signal(); on_finish_->Signal();
} }
...@@ -58,7 +44,7 @@ ItemParallelJob::~ItemParallelJob() { ...@@ -58,7 +44,7 @@ ItemParallelJob::~ItemParallelJob() {
} }
} }
void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) { void ItemParallelJob::Run() {
DCHECK_GT(tasks_.size(), 0); DCHECK_GT(tasks_.size(), 0);
const size_t num_items = items_.size(); const size_t num_items = items_.size();
const size_t num_tasks = tasks_.size(); const size_t num_tasks = tasks_.size();
...@@ -68,9 +54,6 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) { ...@@ -68,9 +54,6 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
"num_tasks", static_cast<int>(num_tasks), "num_items", "num_tasks", static_cast<int>(num_tasks), "num_items",
static_cast<int>(num_items)); static_cast<int>(num_items));
AsyncTimedHistogram gc_parallel_task_latency_histogram(
async_counters->gc_parallel_task_latency(), async_counters);
// Some jobs have more tasks than items (when the items are mere coarse // Some jobs have more tasks than items (when the items are mere coarse
// grain tasks that generate work dynamically for a second phase which all // grain tasks that generate work dynamically for a second phase which all
// tasks participate in). Some jobs even have 0 items to preprocess but // tasks participate in). Some jobs even have 0 items to preprocess but
...@@ -101,9 +84,7 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) { ...@@ -101,9 +84,7 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
// assigning work items. // assigning work items.
DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items); DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items);
task->SetupInternal(pending_tasks_, &items_, start_index, task->SetupInternal(pending_tasks_, &items_, start_index);
i > 0 ? gc_parallel_task_latency_histogram
: base::Optional<AsyncTimedHistogram>());
task_ids[i] = task->id(); task_ids[i] = task->id();
if (i > 0) { if (i > 0) {
V8::GetCurrentPlatform()->CallBlockingTaskOnWorkerThread(std::move(task)); V8::GetCurrentPlatform()->CallBlockingTaskOnWorkerThread(std::move(task));
......
...@@ -11,9 +11,7 @@ ...@@ -11,9 +11,7 @@
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/cancelable-task.h" #include "src/cancelable-task.h"
#include "src/counters.h"
#include "src/globals.h" #include "src/globals.h"
namespace v8 { namespace v8 {
...@@ -36,9 +34,6 @@ class Isolate; ...@@ -36,9 +34,6 @@ class Isolate;
// //
// Items need to be marked as finished after processing them. Task and Item // Items need to be marked as finished after processing them. Task and Item
// ownership is transferred to the job. // ownership is transferred to the job.
//
// Each parallel (non-main thread) task will report the time between the job
// being created and it being scheduled to |gc_parallel_task_latency_histogram|.
class V8_EXPORT_PRIVATE ItemParallelJob { class V8_EXPORT_PRIVATE ItemParallelJob {
public: public:
class Task; class Task;
...@@ -71,7 +66,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob { ...@@ -71,7 +66,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
class V8_EXPORT_PRIVATE Task : public CancelableTask { class V8_EXPORT_PRIVATE Task : public CancelableTask {
public: public:
explicit Task(Isolate* isolate); explicit Task(Isolate* isolate);
~Task() override; ~Task() override = default;
virtual void RunInParallel() = 0; virtual void RunInParallel() = 0;
...@@ -101,13 +96,9 @@ class V8_EXPORT_PRIVATE ItemParallelJob { ...@@ -101,13 +96,9 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
// Sets up state required before invoking Run(). If // Sets up state required before invoking Run(). If
// |start_index is >= items_.size()|, this task will not process work items // |start_index is >= items_.size()|, this task will not process work items
// (some jobs have more tasks than work items in order to parallelize post- // (some jobs have more tasks than work items in order to parallelize post-
// processing, e.g. scavenging). If |gc_parallel_task_latency_histogram| is // processing, e.g. scavenging).
// provided, it will be used to report histograms on the latency between void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
// posting the task and it being scheduled. size_t start_index);
void SetupInternal(
base::Semaphore* on_finish, std::vector<Item*>* items,
size_t start_index,
base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram);
// We don't allow overriding this method any further. // We don't allow overriding this method any further.
void RunInternal() final; void RunInternal() final;
...@@ -116,7 +107,6 @@ class V8_EXPORT_PRIVATE ItemParallelJob { ...@@ -116,7 +107,6 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
size_t cur_index_ = 0; size_t cur_index_ = 0;
size_t items_considered_ = 0; size_t items_considered_ = 0;
base::Semaphore* on_finish_ = nullptr; base::Semaphore* on_finish_ = nullptr;
base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram_;
DISALLOW_COPY_AND_ASSIGN(Task); DISALLOW_COPY_AND_ASSIGN(Task);
}; };
...@@ -135,15 +125,15 @@ class V8_EXPORT_PRIVATE ItemParallelJob { ...@@ -135,15 +125,15 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
int NumberOfItems() const { return static_cast<int>(items_.size()); } int NumberOfItems() const { return static_cast<int>(items_.size()); }
int NumberOfTasks() const { return static_cast<int>(tasks_.size()); } int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
// Runs this job. Reporting metrics in a thread-safe manner to // Runs this job.
// |async_counters|. void Run();
void Run(const std::shared_ptr<Counters>& async_counters);
private: private:
std::vector<Item*> items_; std::vector<Item*> items_;
std::vector<std::unique_ptr<Task>> tasks_; std::vector<std::unique_ptr<Task>> tasks_;
CancelableTaskManager* cancelable_task_manager_; CancelableTaskManager* cancelable_task_manager_;
base::Semaphore* pending_tasks_; base::Semaphore* pending_tasks_;
DISALLOW_COPY_AND_ASSIGN(ItemParallelJob); DISALLOW_COPY_AND_ASSIGN(ItemParallelJob);
}; };
......
...@@ -2880,7 +2880,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( ...@@ -2880,7 +2880,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
evacuators[i]->AddObserver(migration_observer); evacuators[i]->AddObserver(migration_observer);
job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i])); job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
} }
job->Run(isolate()->async_counters()); job->Run();
for (int i = 0; i < wanted_num_tasks; i++) { for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize(); evacuators[i]->Finalize();
delete evacuators[i]; delete evacuators[i];
...@@ -3540,7 +3540,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { ...@@ -3540,7 +3540,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(), isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS)); GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
} }
updating_job.Run(isolate()->async_counters()); updating_job.Run();
} }
{ {
...@@ -3572,7 +3572,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { ...@@ -3572,7 +3572,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(), isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS)); GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
} }
updating_job.Run(isolate()->async_counters()); updating_job.Run();
heap()->array_buffer_collector()->FreeAllocations(); heap()->array_buffer_collector()->FreeAllocations();
} }
} }
...@@ -4104,7 +4104,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { ...@@ -4104,7 +4104,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{ {
TRACE_GC(heap()->tracer(), TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS); GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
updating_job.Run(isolate()->async_counters()); updating_job.Run();
heap()->array_buffer_collector()->FreeAllocations(); heap()->array_buffer_collector()->FreeAllocations();
} }
...@@ -4554,7 +4554,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel( ...@@ -4554,7 +4554,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
job.AddTask( job.AddTask(
new YoungGenerationMarkingTask(isolate(), this, worklist(), i)); new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
} }
job.Run(isolate()->async_counters()); job.Run();
DCHECK(worklist()->IsEmpty()); DCHECK(worklist()->IsEmpty());
} }
} }
......
...@@ -224,7 +224,7 @@ void ScavengerCollector::CollectGarbage() { ...@@ -224,7 +224,7 @@ void ScavengerCollector::CollectGarbage() {
{ {
// Parallel phase scavenging all copied and promoted objects. // Parallel phase scavenging all copied and promoted objects.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL); TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
job.Run(isolate_->async_counters()); job.Run();
DCHECK(copied_list.IsEmpty()); DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty()); DCHECK(promotion_list.IsEmpty());
} }
......
...@@ -202,7 +202,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithNoItemsRuns) { ...@@ -202,7 +202,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithNoItemsRuns) {
parallel_job_semaphore()); parallel_job_semaphore());
job.AddTask(new SimpleTask(i_isolate(), &did_run)); job.AddTask(new SimpleTask(i_isolate(), &did_run));
job.Run(i_isolate()->async_counters()); job.Run();
EXPECT_TRUE(did_run); EXPECT_TRUE(did_run);
} }
...@@ -214,7 +214,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithSimpleItemRuns) { ...@@ -214,7 +214,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithSimpleItemRuns) {
job.AddItem(new ItemParallelJob::Item); job.AddItem(new ItemParallelJob::Item);
job.Run(i_isolate()->async_counters()); job.Run();
EXPECT_TRUE(did_run); EXPECT_TRUE(did_run);
} }
...@@ -244,7 +244,7 @@ TEST_F(ItemParallelJobTest, MoreTasksThanItems) { ...@@ -244,7 +244,7 @@ TEST_F(ItemParallelJobTest, MoreTasksThanItems) {
job.AddItem(new SimpleItem); job.AddItem(new SimpleItem);
} }
job.Run(i_isolate()->async_counters()); job.Run();
for (int i = 0; i < kNumTasks; i++) { for (int i = 0; i < kNumTasks; i++) {
// Only the first kNumItems tasks should have been assigned a work item. // Only the first kNumItems tasks should have been assigned a work item.
...@@ -261,7 +261,7 @@ TEST_F(ItemParallelJobTest, SingleThreadProcessing) { ...@@ -261,7 +261,7 @@ TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
for (int i = 0; i < kItems; i++) { for (int i = 0; i < kItems; i++) {
job.AddItem(new SimpleItem(&was_processed[i])); job.AddItem(new SimpleItem(&was_processed[i]));
} }
job.Run(i_isolate()->async_counters()); job.Run();
for (int i = 0; i < kItems; i++) { for (int i = 0; i < kItems; i++) {
EXPECT_TRUE(was_processed[i]); EXPECT_TRUE(was_processed[i]);
} }
...@@ -282,7 +282,7 @@ TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) { ...@@ -282,7 +282,7 @@ TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
job.AddTask( job.AddTask(
new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done)); new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done));
} }
job.Run(i_isolate()->async_counters()); job.Run();
for (int i = 0; i < kItemsAndTasks; i++) { for (int i = 0; i < kItemsAndTasks; i++) {
EXPECT_TRUE(was_processed[i]); EXPECT_TRUE(was_processed[i]);
} }
...@@ -296,7 +296,7 @@ TEST_F(ItemParallelJobTest, DifferentItems) { ...@@ -296,7 +296,7 @@ TEST_F(ItemParallelJobTest, DifferentItems) {
job.AddItem(new ItemA()); job.AddItem(new ItemA());
job.AddItem(new ItemB()); job.AddItem(new ItemB());
job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b)); job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b));
job.Run(i_isolate()->async_counters()); job.Run();
EXPECT_TRUE(item_a); EXPECT_TRUE(item_a);
EXPECT_TRUE(item_b); EXPECT_TRUE(item_b);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment