Commit 9aba0159 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Visit individual ephemerons instead of collections

When marking ephemerons visit individual ephemerons with key and value
unreachable instead of simply iterating all ephemerons in all weak
collections. Also visit ephemerons at end of concurrent marking to do
work we would otherwise need to do in the atomic pause.

Bug: chromium:844008
Change-Id: I3400ad1f81c0cdc0fe6506a1f1146a6743a7fcd7
Reviewed-on: https://chromium-review.googlesource.com/1113934
Commit-Queue: Dominik Inführ <dinfuehr@google.com>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54039}
parent ec1e3ced
...@@ -335,7 +335,6 @@ ...@@ -335,7 +335,6 @@
F(MC_MARK_ROOTS) \ F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \ F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON) \ F(MC_MARK_WEAK_CLOSURE_EPHEMERON) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_VISITING) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \ F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \ F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \ F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
......
...@@ -375,11 +375,18 @@ class ConcurrentMarkingVisitor final ...@@ -375,11 +375,18 @@ class ConcurrentMarkingVisitor final
VisitPointer(table, value_slot); VisitPointer(table, value_slot);
} else { } else {
Object* value = table->ValueAt(i); Object* value_obj = table->ValueAt(i);
if (value->IsHeapObject()) { if (value_obj->IsHeapObject()) {
MarkCompactCollector::RecordSlot(table, value_slot, HeapObject* value = HeapObject::cast(value_obj);
HeapObject::cast(value)); MarkCompactCollector::RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (marking_state_.IsWhite(value)) {
weak_objects_->discovered_ephemerons.Push(task_id_,
Ephemeron{key, value});
}
} }
} }
} }
...@@ -388,6 +395,22 @@ class ConcurrentMarkingVisitor final ...@@ -388,6 +395,22 @@ class ConcurrentMarkingVisitor final
return table->SizeFromMap(map); return table->SizeFromMap(map);
} }
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
bool VisitEphemeron(HeapObject* key, HeapObject* value) {
if (marking_state_.IsBlackOrGrey(key)) {
if (marking_state_.WhiteToGrey(value)) {
shared_.Push(value);
return true;
}
} else if (marking_state_.IsWhite(value)) {
weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
}
return false;
}
void MarkObject(HeapObject* object) { void MarkObject(HeapObject* object) {
#ifdef THREAD_SANITIZER #ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race // Perform a dummy acquire load to tell TSAN that there is no data race
...@@ -566,9 +589,21 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) { ...@@ -566,9 +589,21 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
heap_->isolate()->PrintWithTimestamp( heap_->isolate()->PrintWithTimestamp(
"Starting concurrent marking task %d\n", task_id); "Starting concurrent marking task %d\n", task_id);
} }
bool ephemeron_marked = false;
{ {
TimedScope scope(&time_ms); TimedScope scope(&time_ms);
{
Ephemeron ephemeron;
while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
}
bool done = false; bool done = false;
while (!done) { while (!done) {
size_t current_marked_bytes = 0; size_t current_marked_bytes = 0;
...@@ -600,6 +635,17 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) { ...@@ -600,6 +635,17 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
break; break;
} }
} }
if (done) {
Ephemeron ephemeron;
while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
}
shared_->FlushToGlobal(task_id); shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id); bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id); on_hold_->FlushToGlobal(task_id);
...@@ -607,9 +653,17 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) { ...@@ -607,9 +653,17 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->weak_cells.FlushToGlobal(task_id); weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id); weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id); weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
weak_objects_->current_ephemerons.FlushToGlobal(task_id);
weak_objects_->next_ephemerons.FlushToGlobal(task_id);
weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
weak_objects_->weak_references.FlushToGlobal(task_id); weak_objects_->weak_references.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0); base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes; total_marked_bytes_ += marked_bytes;
if (ephemeron_marked) {
set_ephemeron_marked(true);
}
{ {
base::LockGuard<base::Mutex> guard(&pending_lock_); base::LockGuard<base::Mutex> guard(&pending_lock_);
is_pending_[task_id] = false; is_pending_[task_id] = false;
...@@ -669,7 +723,9 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() { ...@@ -669,7 +723,9 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
base::LockGuard<base::Mutex> guard(&pending_lock_); base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return; if (pending_task_count_ > 0) return;
} }
if (!shared_->IsGlobalPoolEmpty()) { if (!shared_->IsGlobalPoolEmpty() ||
!weak_objects_->current_ephemerons.IsGlobalEmpty() ||
!weak_objects_->discovered_ephemerons.IsGlobalEmpty()) {
ScheduleTasks(); ScheduleTasks();
} }
} }
......
...@@ -86,6 +86,11 @@ class ConcurrentMarking { ...@@ -86,6 +86,11 @@ class ConcurrentMarking {
size_t TotalMarkedBytes(); size_t TotalMarkedBytes();
void set_ephemeron_marked(bool ephemeron_marked) {
ephemeron_marked_.store(ephemeron_marked);
}
bool ephemeron_marked() { return ephemeron_marked_.load(); }
private: private:
struct TaskState { struct TaskState {
// The main thread sets this flag to true when it wants the concurrent // The main thread sets this flag to true when it wants the concurrent
...@@ -105,6 +110,7 @@ class ConcurrentMarking { ...@@ -105,6 +110,7 @@ class ConcurrentMarking {
WeakObjects* const weak_objects_; WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1]; TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0}; std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};
base::Mutex pending_lock_; base::Mutex pending_lock_;
base::ConditionVariable pending_condition_; base::ConditionVariable pending_condition_;
int pending_task_count_ = 0; int pending_task_count_ = 0;
......
...@@ -698,7 +698,6 @@ void GCTracer::PrintNVP() const { ...@@ -698,7 +698,6 @@ void GCTracer::PrintNVP() const {
"mark.main=%.1f " "mark.main=%.1f "
"mark.weak_closure=%.1f " "mark.weak_closure=%.1f "
"mark.weak_closure.ephemeron=%.1f " "mark.weak_closure.ephemeron=%.1f "
"mark.weak_closure.ephemeron.visiting=%.1f "
"mark.weak_closure.ephemeron.marking=%.1f " "mark.weak_closure.ephemeron.marking=%.1f "
"mark.weak_closure.weak_handles=%.1f " "mark.weak_closure.weak_handles=%.1f "
"mark.weak_closure.weak_roots=%.1f " "mark.weak_closure.weak_roots=%.1f "
...@@ -794,7 +793,6 @@ void GCTracer::PrintNVP() const { ...@@ -794,7 +793,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_MAIN], current_.scopes[Scope::MC_MARK_MAIN],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE], current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON], current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_VISITING],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING], current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES], current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS], current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
......
...@@ -653,70 +653,87 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() { ...@@ -653,70 +653,87 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
UpdateWeakReferencesAfterScavenge(); UpdateWeakReferencesAfterScavenge();
} }
namespace {
template <typename T>
T* ForwardingAddress(Heap* heap, T* heap_obj) {
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
return T::cast(map_word.ToForwardingAddress());
} else if (heap->InNewSpace(heap_obj)) {
return nullptr;
} else {
return heap_obj;
}
}
} // namespace
void IncrementalMarking::UpdateWeakReferencesAfterScavenge() { void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
Heap* heap = heap_; Heap* heap = heap_;
weak_objects_->weak_references.Update( weak_objects_->weak_references.Update(
[heap](std::pair<HeapObject*, HeapObjectReference**> slot_in, [heap](std::pair<HeapObject*, HeapObjectReference**> slot_in,
std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool { std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first; HeapObject* heap_obj = slot_in.first;
MapWord map_word = heap_obj->map_word(); HeapObject* forwarded = ForwardingAddress(heap, heap_obj);
if (map_word.IsForwardingAddress()) {
if (forwarded) {
ptrdiff_t distance_to_slot = ptrdiff_t distance_to_slot =
reinterpret_cast<Address>(slot_in.second) - reinterpret_cast<Address>(slot_in.second) -
reinterpret_cast<Address>(slot_in.first); reinterpret_cast<Address>(slot_in.first);
Address new_slot = Address new_slot =
reinterpret_cast<Address>(map_word.ToForwardingAddress()) + reinterpret_cast<Address>(forwarded) + distance_to_slot;
distance_to_slot; slot_out->first = forwarded;
slot_out->first = map_word.ToForwardingAddress();
slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot); slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
return true; return true;
} }
if (heap->InNewSpace(heap_obj)) {
// The new space object containing the weak reference died. return false;
return false;
}
*slot_out = slot_in;
return true;
}); });
weak_objects_->weak_objects_in_code.Update( weak_objects_->weak_objects_in_code.Update(
[heap](std::pair<HeapObject*, Code*> slot_in, [heap](std::pair<HeapObject*, Code*> slot_in,
std::pair<HeapObject*, Code*>* slot_out) -> bool { std::pair<HeapObject*, Code*>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first; HeapObject* heap_obj = slot_in.first;
MapWord map_word = heap_obj->map_word(); HeapObject* forwarded = ForwardingAddress(heap, heap_obj);
if (map_word.IsForwardingAddress()) {
slot_out->first = map_word.ToForwardingAddress(); if (forwarded) {
slot_out->first = forwarded;
slot_out->second = slot_in.second; slot_out->second = slot_in.second;
return true; return true;
} }
if (heap->InNewSpace(heap_obj)) {
// The new space object which is referred weakly is dead (i.e., didn't return false;
// get scavenged). Drop references to it.
return false;
}
*slot_out = slot_in;
return true;
}); });
weak_objects_->ephemeron_hash_tables.Update( weak_objects_->ephemeron_hash_tables.Update(
[heap](EphemeronHashTable* slot_in, [heap](EphemeronHashTable* slot_in,
EphemeronHashTable** slot_out) -> bool { EphemeronHashTable** slot_out) -> bool {
HeapObject* heap_obj = slot_in; EphemeronHashTable* forwarded = ForwardingAddress(heap, slot_in);
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
*slot_out = EphemeronHashTable::cast(map_word.ToForwardingAddress());
return true;
}
if (heap->InNewSpace(heap_obj)) { if (forwarded) {
// An object could die in scavenge even though an earlier full GC's *slot_out = forwarded;
// concurrent marking has already marked it. In the case of an return true;
// EphemeronHashTable it would have already been added to the
// worklist. If that happens the table needs to be removed again.
return false;
} }
*slot_out = slot_in; return false;
return true;
}); });
auto ephemeron_updater = [heap](Ephemeron slot_in,
Ephemeron* slot_out) -> bool {
HeapObject* key = slot_in.key;
HeapObject* value = slot_in.value;
HeapObject* forwarded_key = ForwardingAddress(heap, key);
HeapObject* forwarded_value = ForwardingAddress(heap, value);
if (forwarded_key && forwarded_value) {
*slot_out = Ephemeron{forwarded_key, forwarded_value};
return true;
}
return false;
};
weak_objects_->current_ephemerons.Update(ephemeron_updater);
weak_objects_->next_ephemerons.Update(ephemeron_updater);
weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
} }
void IncrementalMarking::UpdateMarkedBytesAfterScavenge( void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
......
...@@ -106,10 +106,17 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>:: ...@@ -106,10 +106,17 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitPointer(table, value_slot); VisitPointer(table, value_slot);
} else { } else {
Object* value = *value_slot; Object* value_obj = *value_slot;
if (value->IsHeapObject()) { if (value_obj->IsHeapObject()) {
collector_->RecordSlot(table, value_slot, HeapObject::cast(value)); HeapObject* value = HeapObject::cast(value_obj);
collector_->RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (marking_state()->IsWhite(value)) {
collector_->AddEphemeron(key, value);
}
} }
} }
} }
......
This diff is collapsed.
...@@ -411,6 +411,13 @@ class MajorNonAtomicMarkingState final ...@@ -411,6 +411,13 @@ class MajorNonAtomicMarkingState final
} }
}; };
struct Ephemeron {
HeapObject* key;
HeapObject* value;
};
typedef Worklist<Ephemeron, 64> EphemeronWorklist;
// Weak objects encountered during marking. // Weak objects encountered during marking.
struct WeakObjects { struct WeakObjects {
Worklist<WeakCell*, 64> weak_cells; Worklist<WeakCell*, 64> weak_cells;
...@@ -420,6 +427,24 @@ struct WeakObjects { ...@@ -420,6 +427,24 @@ struct WeakObjects {
// them in the atomic pause. // them in the atomic pause.
Worklist<EphemeronHashTable*, 64> ephemeron_hash_tables; Worklist<EphemeronHashTable*, 64> ephemeron_hash_tables;
// Keep track of all ephemerons for concurrent marking tasks. Only store
// ephemerons in these Worklists if both key and value are unreachable at the
// moment.
//
// MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
// worklists.
//
// current_ephemerons is used as draining worklist in the current fixpoint
// iteration.
EphemeronWorklist current_ephemerons;
// Stores ephemerons to visit in the next fixpoint iteration.
EphemeronWorklist next_ephemerons;
// When draining the marking worklist new discovered ephemerons are pushed
// into this worklist.
EphemeronWorklist discovered_ephemerons;
// TODO(marja): For old space, we only need the slot, not the host // TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space. // object. Optimize this by adding a different storage for old space.
Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references; Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
...@@ -628,6 +653,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -628,6 +653,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.ephemeron_hash_tables.Push(kMainThread, table); weak_objects_.ephemeron_hash_tables.Push(kMainThread, table);
} }
void AddEphemeron(HeapObject* key, HeapObject* value) {
weak_objects_.discovered_ephemerons.Push(kMainThread,
Ephemeron{key, value});
}
void AddWeakReference(HeapObject* host, HeapObjectReference** slot) { void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot)); weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
} }
...@@ -705,9 +735,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -705,9 +735,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// if no concurrent threads are running. // if no concurrent threads are running.
void ProcessMarkingWorklist() override; void ProcessMarkingWorklist() override;
// Drains the main thread marking work list. Will mark all pending objects // Implements ephemeron semantics: Marks value if key is already reachable.
// if no concurrent threads are running. // Returns true if value was actually marked.
void ProcessMarkingWorklistInParallel(); bool VisitEphemeron(HeapObject* key, HeapObject* value);
// Marks ephemerons and drains marking worklist iteratively
// until a fixpoint is reached.
void ProcessEphemeronsUntilFixpoint();
// Drains ephemeron and marking worklists. Single iteration of the
// fixpoint iteration.
bool ProcessEphemerons();
// Callback function for telling whether the object *p is an unmarked // Callback function for telling whether the object *p is an unmarked
// heap object. // heap object.
......
...@@ -78,6 +78,15 @@ class Worklist { ...@@ -78,6 +78,15 @@ class Worklist {
} }
} }
// Swaps content with the given worklist. Local buffers need to
// be empty, not thread safe.
void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
CHECK(AreLocalsEmpty());
CHECK(other.AreLocalsEmpty());
global_pool_.Swap(other.global_pool_);
}
bool Push(int task_id, EntryType entry) { bool Push(int task_id, EntryType entry) {
DCHECK_LT(task_id, num_tasks_); DCHECK_LT(task_id, num_tasks_);
DCHECK_NOT_NULL(private_push_segment(task_id)); DCHECK_NOT_NULL(private_push_segment(task_id));
...@@ -120,10 +129,15 @@ class Worklist { ...@@ -120,10 +129,15 @@ class Worklist {
bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); } bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
bool IsGlobalEmpty() { bool IsGlobalEmpty() {
if (!AreLocalsEmpty()) return false;
return global_pool_.IsEmpty();
}
bool AreLocalsEmpty() {
for (int i = 0; i < num_tasks_; i++) { for (int i = 0; i < num_tasks_; i++) {
if (!IsLocalEmpty(i)) return false; if (!IsLocalEmpty(i)) return false;
} }
return global_pool_.IsEmpty(); return true;
} }
size_t LocalSize(int task_id) { size_t LocalSize(int task_id) {
...@@ -260,6 +274,13 @@ class Worklist { ...@@ -260,6 +274,13 @@ class Worklist {
public: public:
GlobalPool() : top_(nullptr) {} GlobalPool() : top_(nullptr) {}
// Swaps contents, not thread safe.
void Swap(GlobalPool& other) {
Segment* temp = top_;
set_top(other.top_);
other.set_top(temp);
}
V8_INLINE void Push(Segment* segment) { V8_INLINE void Push(Segment* segment) {
base::LockGuard<base::Mutex> guard(&lock_); base::LockGuard<base::Mutex> guard(&lock_);
segment->set_next(top_); segment->set_next(top_);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment