Commit 56d576d4 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Untangle parallel and concurrent marking flag

Callers of the ConcurrentMarking API should check the flags, which they were
already mostly doing.

Change-Id: Ie273bb740fe6d2b36dfb924ce813670ebd47bf3f
Reviewed-on: https://chromium-review.googlesource.com/c/1280307Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56638}
parent 27e5c0b3
......@@ -736,7 +736,6 @@ DEFINE_BOOL(write_protect_code_memory, V8_WRITE_PROTECT_CODE_MEMORY_BOOL,
DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
"use concurrent marking")
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_IMPLICATION(parallel_marking, concurrent_marking)
DEFINE_INT(ephemeron_fixpoint_iterations, 10,
"number of fixpoint iterations it takes to switch to linear "
"ephemeron algorithm")
......
......@@ -565,7 +565,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
embedder_objects_(embedder_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
#endif
}
......@@ -674,8 +674,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
void ConcurrentMarking::ScheduleTasks() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown());
if (!FLAG_concurrent_marking) return;
base::MutexGuard guard(&pending_lock_);
DCHECK_EQ(0, pending_task_count_);
if (task_count_ == 0) {
......@@ -713,7 +713,8 @@ void ConcurrentMarking::ScheduleTasks() {
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
if (!FLAG_concurrent_marking || heap_->IsTearingDown()) return;
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (heap_->IsTearingDown()) return;
{
base::MutexGuard guard(&pending_lock_);
if (pending_task_count_ > 0) return;
......@@ -726,7 +727,7 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
}
bool ConcurrentMarking::Stop(StopRequest stop_request) {
if (!FLAG_concurrent_marking) return false;
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
base::MutexGuard guard(&pending_lock_);
if (pending_task_count_ == 0) return false;
......@@ -800,8 +801,9 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking),
resume_on_exit_(concurrent_marking_->Stop(
ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
resume_on_exit_(FLAG_concurrent_marking &&
concurrent_marking_->Stop(
ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
}
......
......@@ -4310,7 +4310,7 @@ void Heap::SetUp() {
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
mark_compact_collector_->weak_objects());
if (FLAG_concurrent_marking) {
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
......
......@@ -331,7 +331,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
// Ensure that the object is either grey or black before pushing it
// into marking worklist.
marking_state()->WhiteToGrey(object);
if (FLAG_concurrent_marking) {
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
marking_worklist()->PushBailout(object);
} else {
marking_worklist()->Push(object);
......
......@@ -780,7 +780,9 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::FinishConcurrentMarking(
ConcurrentMarking::StopRequest stop_request) {
if (FLAG_concurrent_marking) {
// FinishConcurrentMarking is called for both, concurrent and parallel,
// marking. It is safe to call this function when tasks are already finished.
if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
}
......@@ -1458,7 +1460,6 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
......@@ -1738,7 +1739,6 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
ProcessMarkingWorklist();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment