Commit b7e6eb92 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Introduce on-hold concurrent marking work list

When hitting objects that are allocated in the most recent lienar
allocation area, the concurrent marker currently has to bail out to the
main thread.

However, we only have to delay processing those objects until we are at
a safepoint, e.g. IM::Step(). With this change we flush those
on-hold-objects back to the shared queue upon performing an incremental
marking step.

Bug: chromium:694255
Change-Id: I25647d0fc581a5c4de0346bc394dc51062f65f70
Reviewed-on: https://chromium-review.googlesource.com/707315
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48424}
parent 9f0bdf04
......@@ -326,10 +326,12 @@ class ConcurrentMarking::Task : public CancelableTask {
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout,
MarkingWorklist* on_hold,
WeakObjects* weak_objects)
: heap_(heap),
shared_(shared),
bailout_(bailout),
on_hold_(on_hold),
weak_objects_(weak_objects),
pending_task_count_(0),
task_count_(0) {
......@@ -377,7 +379,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
Address new_space_limit = heap_->new_space()->original_limit();
Address addr = object->address();
if (new_space_top <= addr && addr < new_space_limit) {
bailout_->Push(task_id, object);
on_hold_->Push(task_id, object);
} else {
Map* map = object->synchronized_map();
current_marked_bytes += visitor.Visit(map, object);
......@@ -395,6 +397,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
// young generation GC.
base::LockGuard<base::Mutex> guard(&task_state->lock);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
}
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
......
......@@ -40,7 +40,8 @@ class ConcurrentMarking {
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, WeakObjects* weak_objects);
MarkingWorklist* bailout, MarkingWorklist* on_hold,
WeakObjects* weak_objects);
void ScheduleTasks();
void WaitForTasks();
......@@ -76,6 +77,7 @@ class ConcurrentMarking {
Heap* heap_;
MarkingWorklist* shared_;
MarkingWorklist* bailout_;
MarkingWorklist* on_hold_;
WeakObjects* weak_objects_;
TaskState task_state_[kMaxTasks + 1];
base::AtomicNumber<size_t> total_marked_bytes_;
......
......@@ -5414,10 +5414,10 @@ bool Heap::SetUp() {
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
mark_compact_collector_->weak_objects());
marking_worklist->on_hold(), mark_compact_collector_->weak_objects());
} else {
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr);
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
}
for (int i = 0; i <= LAST_SPACE; i++) {
......
......@@ -1137,6 +1137,14 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
size_t bytes_processed = 0;
if (state_ == MARKING) {
if (FLAG_concurrent_marking) {
heap_->new_space()->ResetOriginalTop();
// It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects
// are properly initialized.
marking_worklist()->shared()->MergeGlobalPool(
marking_worklist()->on_hold());
}
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
marking_worklist()->Print();
......
......@@ -506,12 +506,18 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
if (bailout_.Pop(kMainThread, &result)) return result;
#endif
if (shared_.Pop(kMainThread, &result)) return result;
#ifdef V8_CONCURRENT_MARKING
// The expectation is that this work list is empty almost all the time
// and we can thus avoid the emptiness checks by putting it last.
if (on_hold_.Pop(kMainThread, &result)) return result;
#endif
return nullptr;
}
void Clear() {
bailout_.Clear();
shared_.Clear();
on_hold_.Clear();
}
bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
......@@ -519,12 +525,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool IsEmpty() {
return bailout_.IsLocalEmpty(kMainThread) &&
shared_.IsLocalEmpty(kMainThread) &&
bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty();
on_hold_.IsLocalEmpty(kMainThread) &&
bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
on_hold_.IsGlobalPoolEmpty();
}
int Size() {
return static_cast<int>(bailout_.LocalSize(kMainThread) +
shared_.LocalSize(kMainThread));
shared_.LocalSize(kMainThread) +
on_hold_.LocalSize(kMainThread));
}
// Calls the specified callback on each element of the deques and replaces
......@@ -535,14 +544,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void Update(Callback callback) {
bailout_.Update(callback);
shared_.Update(callback);
on_hold_.Update(callback);
}
ConcurrentMarkingWorklist* shared() { return &shared_; }
ConcurrentMarkingWorklist* bailout() { return &bailout_; }
ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
void Print() {
PrintWorklist("shared", &shared_);
PrintWorklist("bailout", &bailout_);
PrintWorklist("on_hold", &on_hold_);
}
private:
......@@ -572,6 +584,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
}
ConcurrentMarkingWorklist shared_;
ConcurrentMarkingWorklist bailout_;
ConcurrentMarkingWorklist on_hold_;
};
class RootMarkingVisitor;
......
......@@ -2616,8 +2616,13 @@ class NewSpace : public Space {
return allocation_info_.limit();
}
Address original_top() { return original_top_.Value(); }
void ResetOriginalTop() {
DCHECK_GE(top(), original_top());
DCHECK_LE(top(), original_limit());
original_top_.SetValue(top());
}
Address original_top() { return original_top_.Value(); }
Address original_limit() { return original_limit_.Value(); }
// Return the address of the first object in the active semispace.
......
......@@ -6,6 +6,7 @@
#define V8_HEAP_WORKLIST_
#include <cstddef>
#include <utility>
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
......@@ -168,6 +169,11 @@ class Worklist {
PublishPopSegmentToGlobal(task_id);
}
void MergeGlobalPool(Worklist* other) {
auto pair = other->global_pool_.Extract();
global_pool_.MergeList(pair.first, pair.second);
}
private:
FRIEND_TEST(WorkListTest, SegmentCreate);
FRIEND_TEST(WorkListTest, SegmentPush);
......@@ -305,6 +311,28 @@ class Worklist {
}
}
std::pair<Segment*, Segment*> Extract() {
Segment* top = nullptr;
{
base::LockGuard<base::Mutex> guard(&lock_);
if (top_ == nullptr) return std::make_pair(nullptr, nullptr);
top = top_;
set_top(nullptr);
}
Segment* end = top;
while (end->next() != nullptr) end = end->next();
return std::make_pair(top, end);
}
void MergeList(Segment* start, Segment* end) {
if (start == nullptr) return;
{
base::LockGuard<base::Mutex> guard(&lock_);
end->set_next(top_);
set_top(start);
}
}
private:
void set_top(Segment* segment) {
base::AsAtomicPointer::Relaxed_Store(&top_, segment);
......
......@@ -37,10 +37,11 @@ TEST(ConcurrentMarking) {
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, bailout;
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout, &weak_objects);
new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->WaitForTasks();
......@@ -59,10 +60,10 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted();
}
ConcurrentMarking::MarkingWorklist shared, bailout;
ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout, &weak_objects);
new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
concurrent_marking->WaitForTasks();
......
......@@ -301,5 +301,31 @@ TEST(WorkListTest, MultipleSegmentsStolen) {
EXPECT_TRUE(worklist.IsGlobalEmpty());
}
TEST(WorkListTest, MergeGlobalPool) {
TestWorklist worklist1;
TestWorklist::View worklist_view1(&worklist1, 0);
SomeObject dummy;
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view1.Push(&dummy));
}
SomeObject* retrieved = nullptr;
// One more push/pop to publish the full segment.
EXPECT_TRUE(worklist_view1.Push(nullptr));
EXPECT_TRUE(worklist_view1.Pop(&retrieved));
EXPECT_EQ(nullptr, retrieved);
// Merging global pool into a new Worklist.
TestWorklist worklist2;
TestWorklist::View worklist_view2(&worklist2, 0);
worklist2.MergeGlobalPool(&worklist1);
EXPECT_FALSE(worklist2.IsGlobalEmpty());
for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
EXPECT_TRUE(worklist_view2.Pop(&retrieved));
EXPECT_EQ(&dummy, retrieved);
EXPECT_FALSE(worklist_view1.Pop(&retrieved));
}
EXPECT_TRUE(worklist1.IsGlobalEmpty());
EXPECT_TRUE(worklist2.IsGlobalEmpty());
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment