Commit 065c47de authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Revert "[heap] Keep concurrent marking tasks running until marking is completed."

This reverts commit 20d5048a.

Revert "[heap] Ensure that concurrent marking tasks exit before heap tear down."

This reverts commit 387f65d4.

Reason: concurrent marking tasks waiting for a signal from the main thread
is susceptible to deadlocks. We should instead re-schedule concurrent marking
threads once they exit.

BUG=chromium:694255

Change-Id: I20db2f26b42e960f4cc04506d9598c1187b8a003
Reviewed-on: https://chromium-review.googlesource.com/571800
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46671}
parent afdb0b45
......@@ -284,9 +284,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
shared_(shared),
bailout_(bailout),
pending_task_semaphore_(0),
pending_task_count_(0),
waiting_task_count_(0),
task_exit_requested_(false) {
pending_task_count_(0) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
......@@ -300,15 +298,9 @@ void ConcurrentMarking::Run(int task_id, base::Mutex* lock) {
{
TimedScope scope(&time_ms);
while (true) {
bool worklist_is_empty;
{
// Process the worklist.
base::LockGuard<base::Mutex> guard(lock);
HeapObject* object;
if (!shared_->Pop(task_id, &object)) {
worklist_is_empty = true;
} else {
worklist_is_empty = false;
if (!shared_->Pop(task_id, &object)) break;
Address new_space_top = heap_->new_space()->original_top();
Address new_space_limit = heap_->new_space()->original_limit();
Address addr = object->address();
......@@ -319,16 +311,6 @@ void ConcurrentMarking::Run(int task_id, base::Mutex* lock) {
bytes_marked += visitor.Visit(map, object);
}
}
}
if (worklist_is_empty) {
base::LockGuard<base::Mutex> guard(&wait_lock_);
if (task_exit_requested_) break;
waiting_task_count_++;
wait_condition_.Wait(&wait_lock_);
waiting_task_count_--;
}
}
{
// Take the lock to synchronize with worklist update after
// young generation GC.
......@@ -349,8 +331,6 @@ void ConcurrentMarking::Start() {
heap_->isolate()->PrintWithTimestamp("Starting concurrent marking\n");
}
pending_task_count_ = kTasks;
waiting_task_count_ = 0;
task_exit_requested_ = false;
for (int i = 0; i < kTasks; i++) {
int task_id = i + 1;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
......@@ -362,35 +342,12 @@ void ConcurrentMarking::Start() {
void ConcurrentMarking::EnsureCompleted() {
if (!FLAG_concurrent_marking) return;
RequestTaskExit();
while (pending_task_count_ > 0) {
pending_task_semaphore_.Wait();
pending_task_count_--;
}
}
void ConcurrentMarking::RequestTaskExit() {
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&wait_lock_);
task_exit_requested_ = true;
wait_condition_.NotifyAll();
}
void ConcurrentMarking::NotifyWaitingTasks() {
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&wait_lock_);
if (waiting_task_count_ > 0) {
if (!shared_->IsGlobalPoolEmpty()) {
wait_condition_.NotifyAll();
}
}
}
bool ConcurrentMarking::AllTasksWaitingForTesting() {
base::LockGuard<base::Mutex> guard(&wait_lock_);
return waiting_task_count_ == kTasks;
}
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking) {
if (!FLAG_concurrent_marking) return;
......
......@@ -39,12 +39,6 @@ class ConcurrentMarking {
void Start();
bool IsRunning() { return pending_task_count_ > 0; }
void EnsureCompleted();
// Wake up waiting tasks if the shared global pool is not empty.
void NotifyWaitingTasks();
// Set task exit request flag and wake up waiting tasks.
void RequestTaskExit();
// Returns true if all tasks are waiting. For testing only.
bool AllTasksWaitingForTesting();
private:
struct TaskLock {
......@@ -57,16 +51,8 @@ class ConcurrentMarking {
MarkingWorklist* shared_;
MarkingWorklist* bailout_;
TaskLock task_lock_[kTasks];
// Used by the main thread to wait for tasks to exit.
base::Semaphore pending_task_semaphore_;
int pending_task_count_;
// Used by the tasks to wait for
// - more work from the main thread
// - or for the exit request.
base::Mutex wait_lock_;
base::ConditionVariable wait_condition_;
int waiting_task_count_;
bool task_exit_requested_;
};
} // namespace internal
......
......@@ -5818,11 +5818,6 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
}
void Heap::CompleteTasksBeforeTearDown() {
mark_compact_collector()->EnsureSweepingCompleted();
concurrent_marking()->EnsureCompleted();
}
void Heap::TearDown() {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
......
......@@ -942,8 +942,6 @@ class Heap {
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
V8_INLINE void CreateObjectStats();
void CompleteTasksBeforeTearDown();
// Destroys all memory allocated by the heap.
void TearDown();
......
......@@ -592,7 +592,7 @@ void IncrementalMarking::StartMarking() {
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
if (FLAG_concurrent_marking) {
heap()->concurrent_marking()->Start();
heap_->concurrent_marking()->Start();
}
// Ready to start incremental marking.
......@@ -1056,9 +1056,6 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
// that shouldn't make us do a scavenge and keep being incremental, so we set
// the should-hurry flag to indicate that there can't be much work left to do.
set_should_hurry(true);
if (FLAG_concurrent_marking) {
heap()->concurrent_marking()->RequestTaskExit();
}
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Complete (normal).\n");
......@@ -1239,9 +1236,6 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
}
}
if (FLAG_concurrent_marking) {
heap()->concurrent_marking()->NotifyWaitingTasks();
}
}
double end = heap_->MonotonicallyIncreasingTimeInMs();
......
......@@ -2454,7 +2454,7 @@ void Isolate::Deinit() {
wasm_compilation_manager_->TearDown();
heap_.CompleteTasksBeforeTearDown();
heap_.mark_compact_collector()->EnsureSweepingCompleted();
DumpAndResetStats();
......
......@@ -15,43 +15,20 @@
namespace v8 {
namespace internal {
void PublishSegment(ConcurrentMarking::MarkingWorklist* worklist,
HeapObject* object) {
for (int i = 0; i <= ConcurrentMarking::MarkingWorklist::kSegmentCapacity;
i++) {
worklist->Push(0, object);
}
CHECK(worklist->Pop(0, &object));
}
TEST(ConcurrentMarking) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
ConcurrentMarking::MarkingWorklist shared, bailout;
for (int i = 0; i <= ConcurrentMarking::MarkingWorklist::kSegmentCapacity;
i++) {
shared.Push(0, heap->undefined_value());
}
HeapObject* object;
CHECK(shared.Pop(0, &object));
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->Start();
concurrent_marking->EnsureCompleted();
delete concurrent_marking;
}
TEST(ConcurrentMarkingWaiting) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
ConcurrentMarking::MarkingWorklist shared, bailout;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->Start();
while (!concurrent_marking->AllTasksWaitingForTesting()) {
// Busy wait.
}
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->NotifyWaitingTasks();
concurrent_marking->EnsureCompleted();
delete concurrent_marking;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment