Commit 4b429b10 authored by Jakob Kummerow's avatar Jakob Kummerow Committed by Commit Bot

Revert "[heap] Visit individual ephemerons instead of collections"

This reverts commit 9aba0159.

Reason for revert: causes widespread breakage on Chromium-integrated builds, failing here:

  CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());

in MarkCompactCollector::ProcessEphemeronMarking(). See e.g. https://logs.chromium.org/v/?s=chromium%2Fbb%2Fclient.v8.fyi%2FV8-Blink_Linux_64__dbg_%2F12321%2F%2B%2Frecipes%2Fsteps%2Fwebkit_unit_tests%2F0%2Fstdout and more on https://ci.chromium.org/p/v8/g/fyi/console

Original change's description:
> [heap] Visit individual ephemerons instead of collections
> 
> When marking ephemerons visit individual ephemerons with key and value
> unreachable instead of simply iterating all ephemerons in all weak
> collections. Also visit ephemerons at end of concurrent marking to do
> work we would otherwise need to do in the atomic pause.
> 
> Bug: chromium:844008
> Change-Id: I3400ad1f81c0cdc0fe6506a1f1146a6743a7fcd7
> Reviewed-on: https://chromium-review.googlesource.com/1113934
> Commit-Queue: Dominik Inführ <dinfuehr@google.com>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#54039}

TBR=ulan@chromium.org,dinfuehr@google.com

Change-Id: Ib44bfe8c49e8fc30c3d0f2beba03a2895530dfd6
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:844008
Reviewed-on: https://chromium-review.googlesource.com/1116118Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54045}
parent 36de7f8e
......@@ -335,6 +335,7 @@
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_VISITING) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
......
......@@ -375,18 +375,11 @@ class ConcurrentMarkingVisitor final
VisitPointer(table, value_slot);
} else {
Object* value_obj = table->ValueAt(i);
if (value_obj->IsHeapObject()) {
HeapObject* value = HeapObject::cast(value_obj);
MarkCompactCollector::RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (marking_state_.IsWhite(value)) {
weak_objects_->discovered_ephemerons.Push(task_id_,
Ephemeron{key, value});
}
Object* value = table->ValueAt(i);
if (value->IsHeapObject()) {
MarkCompactCollector::RecordSlot(table, value_slot,
HeapObject::cast(value));
}
}
}
......@@ -395,22 +388,6 @@ class ConcurrentMarkingVisitor final
return table->SizeFromMap(map);
}
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
bool VisitEphemeron(HeapObject* key, HeapObject* value) {
if (marking_state_.IsBlackOrGrey(key)) {
if (marking_state_.WhiteToGrey(value)) {
shared_.Push(value);
return true;
}
} else if (marking_state_.IsWhite(value)) {
weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
}
return false;
}
void MarkObject(HeapObject* object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
......@@ -589,21 +566,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
heap_->isolate()->PrintWithTimestamp(
"Starting concurrent marking task %d\n", task_id);
}
bool ephemeron_marked = false;
{
TimedScope scope(&time_ms);
{
Ephemeron ephemeron;
while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
}
bool done = false;
while (!done) {
size_t current_marked_bytes = 0;
......@@ -635,17 +600,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
break;
}
}
if (done) {
Ephemeron ephemeron;
while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
}
shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
......@@ -653,17 +607,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
weak_objects_->current_ephemerons.FlushToGlobal(task_id);
weak_objects_->next_ephemerons.FlushToGlobal(task_id);
weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
weak_objects_->weak_references.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
if (ephemeron_marked) {
set_ephemeron_marked(true);
}
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
is_pending_[task_id] = false;
......@@ -723,9 +669,7 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return;
}
if (!shared_->IsGlobalPoolEmpty() ||
!weak_objects_->current_ephemerons.IsGlobalEmpty() ||
!weak_objects_->discovered_ephemerons.IsGlobalEmpty()) {
if (!shared_->IsGlobalPoolEmpty()) {
ScheduleTasks();
}
}
......
......@@ -86,11 +86,6 @@ class ConcurrentMarking {
size_t TotalMarkedBytes();
void set_ephemeron_marked(bool ephemeron_marked) {
ephemeron_marked_.store(ephemeron_marked);
}
bool ephemeron_marked() { return ephemeron_marked_.load(); }
private:
struct TaskState {
// The main thread sets this flag to true when it wants the concurrent
......@@ -110,7 +105,6 @@ class ConcurrentMarking {
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
int pending_task_count_ = 0;
......
......@@ -698,6 +698,7 @@ void GCTracer::PrintNVP() const {
"mark.main=%.1f "
"mark.weak_closure=%.1f "
"mark.weak_closure.ephemeron=%.1f "
"mark.weak_closure.ephemeron.visiting=%.1f "
"mark.weak_closure.ephemeron.marking=%.1f "
"mark.weak_closure.weak_handles=%.1f "
"mark.weak_closure.weak_roots=%.1f "
......@@ -793,6 +794,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_MAIN],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_VISITING],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
......
......@@ -653,87 +653,70 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
UpdateWeakReferencesAfterScavenge();
}
namespace {
template <typename T>
T* ForwardingAddress(Heap* heap, T* heap_obj) {
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
return T::cast(map_word.ToForwardingAddress());
} else if (heap->InNewSpace(heap_obj)) {
return nullptr;
} else {
return heap_obj;
}
}
} // namespace
void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
Heap* heap = heap_;
weak_objects_->weak_references.Update(
[heap](std::pair<HeapObject*, HeapObjectReference**> slot_in,
std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first;
HeapObject* forwarded = ForwardingAddress(heap, heap_obj);
if (forwarded) {
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
ptrdiff_t distance_to_slot =
reinterpret_cast<Address>(slot_in.second) -
reinterpret_cast<Address>(slot_in.first);
Address new_slot =
reinterpret_cast<Address>(forwarded) + distance_to_slot;
slot_out->first = forwarded;
reinterpret_cast<Address>(map_word.ToForwardingAddress()) +
distance_to_slot;
slot_out->first = map_word.ToForwardingAddress();
slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
return true;
}
return false;
if (heap->InNewSpace(heap_obj)) {
// The new space object containing the weak reference died.
return false;
}
*slot_out = slot_in;
return true;
});
weak_objects_->weak_objects_in_code.Update(
[heap](std::pair<HeapObject*, Code*> slot_in,
std::pair<HeapObject*, Code*>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first;
HeapObject* forwarded = ForwardingAddress(heap, heap_obj);
if (forwarded) {
slot_out->first = forwarded;
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
slot_out->first = map_word.ToForwardingAddress();
slot_out->second = slot_in.second;
return true;
}
return false;
if (heap->InNewSpace(heap_obj)) {
// The new space object which is referred weakly is dead (i.e., didn't
// get scavenged). Drop references to it.
return false;
}
*slot_out = slot_in;
return true;
});
weak_objects_->ephemeron_hash_tables.Update(
[heap](EphemeronHashTable* slot_in,
EphemeronHashTable** slot_out) -> bool {
EphemeronHashTable* forwarded = ForwardingAddress(heap, slot_in);
if (forwarded) {
*slot_out = forwarded;
HeapObject* heap_obj = slot_in;
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
*slot_out = EphemeronHashTable::cast(map_word.ToForwardingAddress());
return true;
}
return false;
});
auto ephemeron_updater = [heap](Ephemeron slot_in,
Ephemeron* slot_out) -> bool {
HeapObject* key = slot_in.key;
HeapObject* value = slot_in.value;
HeapObject* forwarded_key = ForwardingAddress(heap, key);
HeapObject* forwarded_value = ForwardingAddress(heap, value);
if (forwarded_key && forwarded_value) {
*slot_out = Ephemeron{forwarded_key, forwarded_value};
return true;
}
return false;
};
if (heap->InNewSpace(heap_obj)) {
// An object could die in scavenge even though an earlier full GC's
// concurrent marking has already marked it. In the case of an
// EphemeronHashTable it would have already been added to the
// worklist. If that happens the table needs to be removed again.
return false;
}
weak_objects_->current_ephemerons.Update(ephemeron_updater);
weak_objects_->next_ephemerons.Update(ephemeron_updater);
weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
*slot_out = slot_in;
return true;
});
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
......
......@@ -106,17 +106,10 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitPointer(table, value_slot);
} else {
Object* value_obj = *value_slot;
Object* value = *value_slot;
if (value_obj->IsHeapObject()) {
HeapObject* value = HeapObject::cast(value_obj);
collector_->RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (marking_state()->IsWhite(value)) {
collector_->AddEphemeron(key, value);
}
if (value->IsHeapObject()) {
collector_->RecordSlot(table, value_slot, HeapObject::cast(value));
}
}
}
......
This diff is collapsed.
......@@ -411,13 +411,6 @@ class MajorNonAtomicMarkingState final
}
};
struct Ephemeron {
HeapObject* key;
HeapObject* value;
};
typedef Worklist<Ephemeron, 64> EphemeronWorklist;
// Weak objects encountered during marking.
struct WeakObjects {
Worklist<WeakCell*, 64> weak_cells;
......@@ -427,24 +420,6 @@ struct WeakObjects {
// them in the atomic pause.
Worklist<EphemeronHashTable*, 64> ephemeron_hash_tables;
// Keep track of all ephemerons for concurrent marking tasks. Only store
// ephemerons in these Worklists if both key and value are unreachable at the
// moment.
//
// MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
// worklists.
//
// current_ephemerons is used as draining worklist in the current fixpoint
// iteration.
EphemeronWorklist current_ephemerons;
// Stores ephemerons to visit in the next fixpoint iteration.
EphemeronWorklist next_ephemerons;
// When draining the marking worklist new discovered ephemerons are pushed
// into this worklist.
EphemeronWorklist discovered_ephemerons;
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
......@@ -653,11 +628,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.ephemeron_hash_tables.Push(kMainThread, table);
}
void AddEphemeron(HeapObject* key, HeapObject* value) {
weak_objects_.discovered_ephemerons.Push(kMainThread,
Ephemeron{key, value});
}
void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
}
......@@ -735,17 +705,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// if no concurrent threads are running.
void ProcessMarkingWorklist() override;
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
bool VisitEphemeron(HeapObject* key, HeapObject* value);
// Marks ephemerons and drains marking worklist iteratively
// until a fixpoint is reached.
void ProcessEphemeronsUntilFixpoint();
// Drains ephemeron and marking worklists. Single iteration of the
// fixpoint iteration.
bool ProcessEphemerons();
// Drains the main thread marking work list. Will mark all pending objects
// if no concurrent threads are running.
void ProcessMarkingWorklistInParallel();
// Callback function for telling whether the object *p is an unmarked
// heap object.
......
......@@ -78,15 +78,6 @@ class Worklist {
}
}
// Swaps content with the given worklist. Local buffers need to
// be empty, not thread safe.
void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
CHECK(AreLocalsEmpty());
CHECK(other.AreLocalsEmpty());
global_pool_.Swap(other.global_pool_);
}
bool Push(int task_id, EntryType entry) {
DCHECK_LT(task_id, num_tasks_);
DCHECK_NOT_NULL(private_push_segment(task_id));
......@@ -129,15 +120,10 @@ class Worklist {
bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
bool IsGlobalEmpty() {
if (!AreLocalsEmpty()) return false;
return global_pool_.IsEmpty();
}
bool AreLocalsEmpty() {
for (int i = 0; i < num_tasks_; i++) {
if (!IsLocalEmpty(i)) return false;
}
return true;
return global_pool_.IsEmpty();
}
size_t LocalSize(int task_id) {
......@@ -274,13 +260,6 @@ class Worklist {
public:
GlobalPool() : top_(nullptr) {}
// Swaps contents, not thread safe.
void Swap(GlobalPool& other) {
Segment* temp = top_;
set_top(other.top_);
other.set_top(temp);
}
V8_INLINE void Push(Segment* segment) {
base::LockGuard<base::Mutex> guard(&lock_);
segment->set_next(top_);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment