Commit 63c4be59 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

Remove V8.GC.ParallelTaskLatencyMicroSeconds histogram

The histogram is not used anymore. Remove to safe resources.

Bug: chromium:926072
Change-Id: I929f34f7ab0613431eaf9740f3342b6b2cec6cbd
Reviewed-on: https://chromium-review.googlesource.com/c/1477672Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59684}
parent d74840b7
......@@ -1328,9 +1328,7 @@ class RuntimeCallTimerScope {
HT(compile_script_on_background, \
V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
HT(compile_function_on_background, \
V8.CompileFunctionMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
HT(gc_parallel_task_latency, V8.GC.ParallelTaskLatencyMicroSeconds, 1000000, \
MICROSECOND)
V8.CompileFunctionMicroSeconds.BackgroundThread, 1000000, MICROSECOND)
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
AHT(compile_lazy, V8.CompileLazyMicroSeconds)
......
......@@ -5,6 +5,7 @@
#include "src/heap/item-parallel-job.h"
#include "src/base/platform/semaphore.h"
#include "src/counters.h"
#include "src/v8.h"
namespace v8 {
......@@ -12,16 +13,9 @@ namespace internal {
ItemParallelJob::Task::Task(Isolate* isolate) : CancelableTask(isolate) {}
ItemParallelJob::Task::~Task() {
// The histogram is reset in RunInternal(). If it's still around it means
// this task was cancelled before being scheduled.
if (gc_parallel_task_latency_histogram_)
gc_parallel_task_latency_histogram_->RecordAbandon();
}
void ItemParallelJob::Task::SetupInternal(
base::Semaphore* on_finish, std::vector<Item*>* items, size_t start_index,
base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram) {
void ItemParallelJob::Task::SetupInternal(base::Semaphore* on_finish,
std::vector<Item*>* items,
size_t start_index) {
on_finish_ = on_finish;
items_ = items;
......@@ -30,17 +24,9 @@ void ItemParallelJob::Task::SetupInternal(
} else {
items_considered_ = items_->size();
}
gc_parallel_task_latency_histogram_ =
std::move(gc_parallel_task_latency_histogram);
}
void ItemParallelJob::Task::RunInternal() {
if (gc_parallel_task_latency_histogram_) {
gc_parallel_task_latency_histogram_->RecordDone();
gc_parallel_task_latency_histogram_.reset();
}
RunInParallel();
on_finish_->Signal();
}
......@@ -58,7 +44,7 @@ ItemParallelJob::~ItemParallelJob() {
}
}
void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
void ItemParallelJob::Run() {
DCHECK_GT(tasks_.size(), 0);
const size_t num_items = items_.size();
const size_t num_tasks = tasks_.size();
......@@ -68,9 +54,6 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
"num_tasks", static_cast<int>(num_tasks), "num_items",
static_cast<int>(num_items));
AsyncTimedHistogram gc_parallel_task_latency_histogram(
async_counters->gc_parallel_task_latency(), async_counters);
// Some jobs have more tasks than items (when the items are mere coarse
// grain tasks that generate work dynamically for a second phase which all
// tasks participate in). Some jobs even have 0 items to preprocess but
......@@ -101,9 +84,7 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
// assigning work items.
DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items);
task->SetupInternal(pending_tasks_, &items_, start_index,
i > 0 ? gc_parallel_task_latency_histogram
: base::Optional<AsyncTimedHistogram>());
task->SetupInternal(pending_tasks_, &items_, start_index);
task_ids[i] = task->id();
if (i > 0) {
V8::GetCurrentPlatform()->CallBlockingTaskOnWorkerThread(std::move(task));
......
......@@ -11,9 +11,7 @@
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/cancelable-task.h"
#include "src/counters.h"
#include "src/globals.h"
namespace v8 {
......@@ -36,9 +34,6 @@ class Isolate;
//
// Items need to be marked as finished after processing them. Task and Item
// ownership is transferred to the job.
//
// Each parallel (non-main thread) task will report the time between the job
// being created and it being scheduled to |gc_parallel_task_latency_histogram|.
class V8_EXPORT_PRIVATE ItemParallelJob {
public:
class Task;
......@@ -71,7 +66,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
explicit Task(Isolate* isolate);
~Task() override;
~Task() override = default;
virtual void RunInParallel() = 0;
......@@ -101,13 +96,9 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
// Sets up state required before invoking Run(). If
// |start_index is >= items_.size()|, this task will not process work items
// (some jobs have more tasks than work items in order to parallelize post-
// processing, e.g. scavenging). If |gc_parallel_task_latency_histogram| is
// provided, it will be used to report histograms on the latency between
// posting the task and it being scheduled.
void SetupInternal(
base::Semaphore* on_finish, std::vector<Item*>* items,
size_t start_index,
base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram);
// processing, e.g. scavenging).
void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
size_t start_index);
// We don't allow overriding this method any further.
void RunInternal() final;
......@@ -116,7 +107,6 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
size_t cur_index_ = 0;
size_t items_considered_ = 0;
base::Semaphore* on_finish_ = nullptr;
base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
......@@ -135,15 +125,15 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
int NumberOfItems() const { return static_cast<int>(items_.size()); }
int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
// Runs this job. Reporting metrics in a thread-safe manner to
// |async_counters|.
void Run(const std::shared_ptr<Counters>& async_counters);
// Runs this job.
void Run();
private:
std::vector<Item*> items_;
std::vector<std::unique_ptr<Task>> tasks_;
CancelableTaskManager* cancelable_task_manager_;
base::Semaphore* pending_tasks_;
DISALLOW_COPY_AND_ASSIGN(ItemParallelJob);
};
......
......@@ -2880,7 +2880,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
evacuators[i]->AddObserver(migration_observer);
job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
}
job->Run(isolate()->async_counters());
job->Run();
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize();
delete evacuators[i];
......@@ -3540,7 +3540,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run(isolate()->async_counters());
updating_job.Run();
}
{
......@@ -3572,7 +3572,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run(isolate()->async_counters());
updating_job.Run();
heap()->array_buffer_collector()->FreeAllocations();
}
}
......@@ -4104,7 +4104,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
updating_job.Run(isolate()->async_counters());
updating_job.Run();
heap()->array_buffer_collector()->FreeAllocations();
}
......@@ -4554,7 +4554,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
job.AddTask(
new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
}
job.Run(isolate()->async_counters());
job.Run();
DCHECK(worklist()->IsEmpty());
}
}
......
......@@ -224,7 +224,7 @@ void ScavengerCollector::CollectGarbage() {
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
job.Run(isolate_->async_counters());
job.Run();
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
......
......@@ -202,7 +202,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithNoItemsRuns) {
parallel_job_semaphore());
job.AddTask(new SimpleTask(i_isolate(), &did_run));
job.Run(i_isolate()->async_counters());
job.Run();
EXPECT_TRUE(did_run);
}
......@@ -214,7 +214,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithSimpleItemRuns) {
job.AddItem(new ItemParallelJob::Item);
job.Run(i_isolate()->async_counters());
job.Run();
EXPECT_TRUE(did_run);
}
......@@ -244,7 +244,7 @@ TEST_F(ItemParallelJobTest, MoreTasksThanItems) {
job.AddItem(new SimpleItem);
}
job.Run(i_isolate()->async_counters());
job.Run();
for (int i = 0; i < kNumTasks; i++) {
// Only the first kNumItems tasks should have been assigned a work item.
......@@ -261,7 +261,7 @@ TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
for (int i = 0; i < kItems; i++) {
job.AddItem(new SimpleItem(&was_processed[i]));
}
job.Run(i_isolate()->async_counters());
job.Run();
for (int i = 0; i < kItems; i++) {
EXPECT_TRUE(was_processed[i]);
}
......@@ -282,7 +282,7 @@ TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
job.AddTask(
new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done));
}
job.Run(i_isolate()->async_counters());
job.Run();
for (int i = 0; i < kItemsAndTasks; i++) {
EXPECT_TRUE(was_processed[i]);
}
......@@ -296,7 +296,7 @@ TEST_F(ItemParallelJobTest, DifferentItems) {
job.AddItem(new ItemA());
job.AddItem(new ItemB());
job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b));
job.Run(i_isolate()->async_counters());
job.Run();
EXPECT_TRUE(item_a);
EXPECT_TRUE(item_b);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment