Commit ff27dca5 authored by Nico Hartmann's avatar Nico Hartmann Committed by Commit Bot

Revert "[Heap]: Marking use Jobs."

This reverts commit 4a2b2b2e.

Reason for revert: Speculative revert due to https://ci.chromium.org/p/chromium/builders/try/linux-rel/495075?

Original change's description:
> [Heap]: Marking use Jobs.
> 
> StopRequest is removed in favor of:
> COMPLETE_TASKS_FOR_TESTING -> JoinForTesting()
> PREEMPT_TASKS -> Pause()
> COMPLETE_ONGOING_TASKS now has the same behavior as PREEMPT_TASKS
> - we should avoid waiting on the main thread as much as possible.
> 
> Change-Id: Icceeb4f0c0fda2ed234b2f26fe308b11410fcfb7
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2376166
> Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#70037}

TBR=ulan@chromium.org,etiennep@chromium.org

Change-Id: I63f24bffa0f56c6ffa1d1977fc4fb8a76b6f3ba2
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2423722Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Commit-Queue: Nico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70049}
parent 27a8684a
This diff is collapsed.
......@@ -54,6 +54,17 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
const bool resume_on_exit_;
};
enum class StopRequest {
// Preempt ongoing tasks ASAP (and cancel unstarted tasks).
PREEMPT_TASKS,
// Wait for ongoing tasks to complete (and cancels unstarted tasks).
COMPLETE_ONGOING_TASKS,
// Wait for all scheduled tasks to complete (only use this in tests that
// control the full stack -- otherwise tasks cancelled by the platform can
// make this call hang).
COMPLETE_TASKS_FOR_TESTING,
};
// TODO(gab): The only thing that prevents this being above 7 is
// Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
// task 0, reserved for the main thread).
......@@ -65,14 +76,13 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// Schedules asynchronous tasks to perform concurrent marking. Objects in the
// heap should not be moved while these are active (can be stopped safely via
// Stop() or PauseScope).
void ScheduleJob();
void RescheduleJobIfNeeded();
void ScheduleTasks();
// Stops concurrent marking per |stop_request|'s semantics. Returns true
// if concurrent marking was in progress, false otherwise.
bool Pause();
void JoinForTesting();
bool Stop(StopRequest stop_request);
void RescheduleTasksIfNeeded();
// Flushes native context sizes to the given table of the main thread.
void FlushNativeContexts(NativeContextStats* main_stats);
// Flushes memory chunk data using the given marking state.
......@@ -93,24 +103,31 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
private:
struct TaskState {
// The main thread sets this flag to true when it wants the concurrent
// marker to give up the worker thread.
std::atomic<bool> preemption_request;
size_t marked_bytes = 0;
unsigned mark_compact_epoch;
bool is_forced_gc;
MemoryChunkDataMap memory_chunk_data;
NativeContextInferrer native_context_inferrer;
NativeContextStats native_context_stats;
char cache_line_padding[64];
};
class JobTask;
void Run(JobDelegate* delegate, unsigned mark_compact_epoch,
bool is_forced_gc);
size_t GetMaxConcurrency(size_t worker_count);
std::unique_ptr<JobHandle> job_handle_;
class Task;
void Run(int task_id, TaskState* task_state);
Heap* const heap_;
MarkingWorklists* const marking_worklists_;
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
int pending_task_count_ = 0;
bool is_pending_[kMaxTasks + 1] = {};
CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
int total_task_count_ = 0;
};
} // namespace internal
......
......@@ -5570,10 +5570,6 @@ void Heap::TearDown() {
stress_scavenge_observer_ = nullptr;
}
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
concurrent_marking_->Pause();
}
if (mark_compact_collector_) {
mark_compact_collector_->TearDown();
mark_compact_collector_.reset();
......@@ -5590,7 +5586,6 @@ void Heap::TearDown() {
scavenger_collector_.reset();
array_buffer_sweeper_.reset();
incremental_marking_.reset();
concurrent_marking_.reset();
gc_idle_time_handler_.reset();
......
......@@ -246,7 +246,7 @@ void IncrementalMarking::StartMarking() {
MarkRoots();
if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
heap_->concurrent_marking()->ScheduleJob();
heap_->concurrent_marking()->ScheduleTasks();
}
// Ready to start incremental marking.
......@@ -1104,7 +1104,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
}
if (FLAG_concurrent_marking) {
local_marking_worklists()->ShareWork();
heap_->concurrent_marking()->RescheduleJobIfNeeded();
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
}
if (state_ == MARKING) {
......
......@@ -904,11 +904,12 @@ void MarkCompactCollector::Prepare() {
}
}
void MarkCompactCollector::FinishConcurrentMarking() {
void MarkCompactCollector::FinishConcurrentMarking(
ConcurrentMarking::StopRequest stop_request) {
// FinishConcurrentMarking is called for both, concurrent and parallel,
// marking. It is safe to call this function when tasks are already finished.
if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Pause();
heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state());
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
......@@ -1663,11 +1664,12 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded();
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
work_to_do = ProcessEphemerons();
FinishConcurrentMarking();
FinishConcurrentMarking(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
}
CHECK(weak_objects_.current_ephemerons.IsEmpty());
......@@ -1982,11 +1984,12 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded();
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
DrainMarkingWorklist();
FinishConcurrentMarking();
FinishConcurrentMarking(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
DrainMarkingWorklist();
}
......
......@@ -479,7 +479,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Stop concurrent marking (either by preempting it right away or waiting for
// it to complete as requested by |stop_request|).
void FinishConcurrentMarking();
void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
bool StartCompaction();
......
......@@ -44,8 +44,9 @@ TEST(ConcurrentMarking) {
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleJob();
concurrent_marking->JoinForTesting();
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking;
}
......@@ -66,12 +67,14 @@ TEST(ConcurrentMarkingReschedule) {
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleJob();
concurrent_marking->Pause();
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleJobIfNeeded();
concurrent_marking->JoinForTesting();
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking;
}
......@@ -93,13 +96,14 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleJob();
concurrent_marking->Pause();
concurrent_marking->ScheduleTasks();
concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleJobIfNeeded();
concurrent_marking->JoinForTesting();
concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking;
}
......@@ -113,7 +117,8 @@ TEST(ConcurrentMarkingMarkedBytes) {
CcTest::CollectAllGarbage();
if (!heap->incremental_marking()->IsStopped()) return;
heap::SimulateIncrementalMarking(heap, false);
heap->concurrent_marking()->JoinForTesting();
heap->concurrent_marking()->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment