Commit 265d5c24 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

Revert "Reland "[heap] Visit individual ephemerons instead of collections""

This reverts commit 91f113e2.

Reason for revert: still causes breakage on Chromium-integrated builds, failing here:

  CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());

Original change's description:
> Reland "[heap] Visit individual ephemerons instead of collections"
>
> This is a reland of 9aba0159
>
> Original change's description:
> > [heap] Visit individual ephemerons instead of collections
> >
> > When marking ephemerons visit individual ephemerons with key and value
> > unreachable instead of simply iterating all ephemerons in all weak
> > collections. Also visit ephemerons at end of concurrent marking to do
> > work we would otherwise need to do in the atomic pause.
> >
> > Bug: chromium:844008
> > Change-Id: I3400ad1f81c0cdc0fe6506a1f1146a6743a7fcd7
> > Reviewed-on: https://chromium-review.googlesource.com/1113934
> > Commit-Queue: Dominik Inführ <dinfuehr@google.com>
> > Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#54039}
>
> Bug: chromium:844008
> Change-Id: I4c44e74c7cf5fe380ffa4ce9f106bebb57bc023d
> Reviewed-on: https://chromium-review.googlesource.com/1116438
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Dominik Inführ <dinfuehr@google.com>
> Cr-Commit-Position: refs/heads/master@{#54046}

TBR=ulan@chromium.org,dinfuehr@google.com

Change-Id: I4a059e86ba06e0b2562afc311b12d397cd78e857
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:844008
Reviewed-on: https://chromium-review.googlesource.com/1116718Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Commit-Queue: Sigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54047}
parent 91f113e2
......@@ -335,6 +335,7 @@
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_VISITING) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
......
......@@ -375,18 +375,11 @@ class ConcurrentMarkingVisitor final
VisitPointer(table, value_slot);
} else {
Object* value_obj = table->ValueAt(i);
if (value_obj->IsHeapObject()) {
HeapObject* value = HeapObject::cast(value_obj);
MarkCompactCollector::RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (marking_state_.IsWhite(value)) {
weak_objects_->discovered_ephemerons.Push(task_id_,
Ephemeron{key, value});
}
Object* value = table->ValueAt(i);
if (value->IsHeapObject()) {
MarkCompactCollector::RecordSlot(table, value_slot,
HeapObject::cast(value));
}
}
}
......@@ -395,22 +388,6 @@ class ConcurrentMarkingVisitor final
return table->SizeFromMap(map);
}
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
bool VisitEphemeron(HeapObject* key, HeapObject* value) {
if (marking_state_.IsBlackOrGrey(key)) {
if (marking_state_.WhiteToGrey(value)) {
shared_.Push(value);
return true;
}
} else if (marking_state_.IsWhite(value)) {
weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
}
return false;
}
void MarkObject(HeapObject* object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
......@@ -589,21 +566,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
heap_->isolate()->PrintWithTimestamp(
"Starting concurrent marking task %d\n", task_id);
}
bool ephemeron_marked = false;
{
TimedScope scope(&time_ms);
{
Ephemeron ephemeron;
while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
}
bool done = false;
while (!done) {
size_t current_marked_bytes = 0;
......@@ -635,17 +600,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
break;
}
}
if (done) {
Ephemeron ephemeron;
while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
}
shared_->FlushToGlobal(task_id);
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
......@@ -653,17 +607,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
weak_objects_->current_ephemerons.FlushToGlobal(task_id);
weak_objects_->next_ephemerons.FlushToGlobal(task_id);
weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
weak_objects_->weak_references.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
if (ephemeron_marked) {
set_ephemeron_marked(true);
}
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
is_pending_[task_id] = false;
......@@ -723,9 +669,7 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return;
}
if (!shared_->IsGlobalPoolEmpty() ||
!weak_objects_->current_ephemerons.IsGlobalEmpty() ||
!weak_objects_->discovered_ephemerons.IsGlobalEmpty()) {
if (!shared_->IsGlobalPoolEmpty()) {
ScheduleTasks();
}
}
......
......@@ -86,11 +86,6 @@ class ConcurrentMarking {
size_t TotalMarkedBytes();
void set_ephemeron_marked(bool ephemeron_marked) {
ephemeron_marked_.store(ephemeron_marked);
}
bool ephemeron_marked() { return ephemeron_marked_.load(); }
private:
struct TaskState {
// The main thread sets this flag to true when it wants the concurrent
......@@ -110,7 +105,6 @@ class ConcurrentMarking {
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
int pending_task_count_ = 0;
......
......@@ -698,6 +698,7 @@ void GCTracer::PrintNVP() const {
"mark.main=%.1f "
"mark.weak_closure=%.1f "
"mark.weak_closure.ephemeron=%.1f "
"mark.weak_closure.ephemeron.visiting=%.1f "
"mark.weak_closure.ephemeron.marking=%.1f "
"mark.weak_closure.weak_handles=%.1f "
"mark.weak_closure.weak_roots=%.1f "
......@@ -793,6 +794,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_MAIN],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_VISITING],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
......
......@@ -653,87 +653,70 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
UpdateWeakReferencesAfterScavenge();
}
namespace {
template <typename T>
T* ForwardingAddress(Heap* heap, T* heap_obj) {
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
return T::cast(map_word.ToForwardingAddress());
} else if (heap->InNewSpace(heap_obj)) {
return nullptr;
} else {
return heap_obj;
}
}
} // namespace
void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
Heap* heap = heap_;
weak_objects_->weak_references.Update(
[heap](std::pair<HeapObject*, HeapObjectReference**> slot_in,
std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first;
HeapObject* forwarded = ForwardingAddress(heap, heap_obj);
if (forwarded) {
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
ptrdiff_t distance_to_slot =
reinterpret_cast<Address>(slot_in.second) -
reinterpret_cast<Address>(slot_in.first);
Address new_slot =
reinterpret_cast<Address>(forwarded) + distance_to_slot;
slot_out->first = forwarded;
reinterpret_cast<Address>(map_word.ToForwardingAddress()) +
distance_to_slot;
slot_out->first = map_word.ToForwardingAddress();
slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
return true;
}
return false;
if (heap->InNewSpace(heap_obj)) {
// The new space object containing the weak reference died.
return false;
}
*slot_out = slot_in;
return true;
});
weak_objects_->weak_objects_in_code.Update(
[heap](std::pair<HeapObject*, Code*> slot_in,
std::pair<HeapObject*, Code*>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first;
HeapObject* forwarded = ForwardingAddress(heap, heap_obj);
if (forwarded) {
slot_out->first = forwarded;
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
slot_out->first = map_word.ToForwardingAddress();
slot_out->second = slot_in.second;
return true;
}
return false;
if (heap->InNewSpace(heap_obj)) {
// The new space object which is referred weakly is dead (i.e., didn't
// get scavenged). Drop references to it.
return false;
}
*slot_out = slot_in;
return true;
});
weak_objects_->ephemeron_hash_tables.Update(
[heap](EphemeronHashTable* slot_in,
EphemeronHashTable** slot_out) -> bool {
EphemeronHashTable* forwarded = ForwardingAddress(heap, slot_in);
if (forwarded) {
*slot_out = forwarded;
HeapObject* heap_obj = slot_in;
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
*slot_out = EphemeronHashTable::cast(map_word.ToForwardingAddress());
return true;
}
return false;
});
auto ephemeron_updater = [heap](Ephemeron slot_in,
Ephemeron* slot_out) -> bool {
HeapObject* key = slot_in.key;
HeapObject* value = slot_in.value;
HeapObject* forwarded_key = ForwardingAddress(heap, key);
HeapObject* forwarded_value = ForwardingAddress(heap, value);
if (forwarded_key && forwarded_value) {
*slot_out = Ephemeron{forwarded_key, forwarded_value};
return true;
}
return false;
};
if (heap->InNewSpace(heap_obj)) {
// An object could die in scavenge even though an earlier full GC's
// concurrent marking has already marked it. In the case of an
// EphemeronHashTable it would have already been added to the
// worklist. If that happens the table needs to be removed again.
return false;
}
weak_objects_->current_ephemerons.Update(ephemeron_updater);
weak_objects_->next_ephemerons.Update(ephemeron_updater);
weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
*slot_out = slot_in;
return true;
});
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
......
......@@ -106,17 +106,10 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitPointer(table, value_slot);
} else {
Object* value_obj = *value_slot;
Object* value = *value_slot;
if (value_obj->IsHeapObject()) {
HeapObject* value = HeapObject::cast(value_obj);
collector_->RecordSlot(table, value_slot, value);
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (marking_state()->IsWhite(value)) {
collector_->AddEphemeron(key, value);
}
if (value->IsHeapObject()) {
collector_->RecordSlot(table, value_slot, HeapObject::cast(value));
}
}
}
......
......@@ -845,10 +845,6 @@ void MarkCompactCollector::Finish() {
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
CHECK(weak_objects_.current_ephemerons.IsGlobalEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsGlobalEmpty());
weak_objects_.next_ephemerons.Clear();
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
......@@ -1468,78 +1464,16 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
ProcessTopOptimizedFrame(custom_root_body_visitor);
}
void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
bool work_to_do = true;
while (work_to_do) {
if (heap_->local_embedder_heap_tracer()->InUse()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
heap_->local_embedder_heap_tracer()->Trace(
0, EmbedderHeapTracer::AdvanceTracingActions(
EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
}
// Move ephemerons from next_ephemerons into current_ephemerons to
// drain them in this iteration.
weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
heap()->concurrent_marking()->set_ephemeron_marked(false);
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
work_to_do = ProcessEphemerons();
FinishConcurrentMarking(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
}
CHECK(weak_objects_.current_ephemerons.IsGlobalEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsGlobalEmpty());
work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
heap()->concurrent_marking()->ephemeron_marked();
}
CHECK(marking_worklist()->IsEmpty());
CHECK(weak_objects_.current_ephemerons.IsGlobalEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsGlobalEmpty());
}
bool MarkCompactCollector::ProcessEphemerons() {
Ephemeron ephemeron;
bool ephemeron_marked = false;
// Drain current_ephemerons and push ephemerons where key and value are still
// unreachable into next_ephemerons.
while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
void MarkCompactCollector::ProcessMarkingWorklistInParallel() {
if (FLAG_parallel_marking) {
DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
// Drain marking worklist and push discovered ephemerons into
// discovered_ephemerons.
ProcessMarkingWorklist();
// Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
// before) and push ephemerons where key and value are still unreachable into
// next_ephemerons.
while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
// Flush local ephemerons for main task to global pool.
weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
return ephemeron_marked;
FinishConcurrentMarking(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
ProcessMarkingWorklist();
}
void MarkCompactCollector::ProcessMarkingWorklist() {
......@@ -1558,29 +1492,30 @@ void MarkCompactCollector::ProcessMarkingWorklist() {
DCHECK(marking_worklist()->IsBailoutEmpty());
}
bool MarkCompactCollector::VisitEphemeron(HeapObject* key, HeapObject* value) {
if (marking_state()->IsBlackOrGrey(key)) {
if (marking_state()->WhiteToGrey(value)) {
marking_worklist()->Push(value);
return true;
}
} else if (marking_state()->IsWhite(value)) {
weak_objects_.next_ephemerons.Push(kMainThread, Ephemeron{key, value});
}
return false;
}
void MarkCompactCollector::ProcessEphemeronMarking() {
DCHECK(marking_worklist()->IsEmpty());
bool work_to_do = true;
while (work_to_do) {
if (heap_->local_embedder_heap_tracer()->InUse()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
heap_->local_embedder_heap_tracer()->Trace(
0, EmbedderHeapTracer::AdvanceTracingActions(
EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_VISITING);
ProcessWeakCollections();
}
work_to_do = !marking_worklist()->IsEmpty();
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
ProcessEphemeronsUntilFixpoint();
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
ProcessMarkingWorklistInParallel();
}
}
CHECK(marking_worklist()->IsEmpty());
CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
}
......@@ -1946,6 +1881,130 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
heap_->RightTrimFixedArray(indices, to_trim);
}
class EphemeronHashTableMarkingItem : public ItemParallelJob::Item {
public:
explicit EphemeronHashTableMarkingItem(EphemeronHashTable* table, int offset)
: table_(table), offset_(offset) {}
virtual ~EphemeronHashTableMarkingItem() {}
EphemeronHashTable* table() const { return table_; }
int offset() const { return offset_; }
private:
EphemeronHashTable* table_;
int offset_;
};
class EphemeronHashTableMarkingTask : public ItemParallelJob::Task {
public:
EphemeronHashTableMarkingTask(
Isolate* isolate, MarkCompactCollector* collector,
MarkCompactCollector::MarkingWorklist::ConcurrentMarkingWorklist*
worklist,
int task_id)
: ItemParallelJob::Task(isolate),
collector_(collector),
worklist_(worklist),
task_id_(task_id) {}
void RunInParallel() override {
EphemeronHashTableMarkingItem* item = nullptr;
while ((item = GetItem<EphemeronHashTableMarkingItem>()) != nullptr) {
EphemeronHashTable* table = item->table();
int start = item->offset();
int limit = Min(start + MarkCompactCollector::kEphemeronChunkSize,
table->Capacity());
for (int i = start; i < limit; i++) {
HeapObject* key = HeapObject::cast(table->KeyAt(i));
if (collector_->marking_state()->IsBlackOrGrey(key)) {
Object* value_obj = table->ValueAt(i);
if (value_obj->IsHeapObject()) {
HeapObject* value = HeapObject::cast(value_obj);
if (collector_->marking_state()->WhiteToGrey(value)) {
worklist_->Push(task_id_, value);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
collector_->heap()->AddEphemeronRetainer(key, value);
collector_->heap()->AddRetainer(table, value);
}
}
}
}
// Record slots if that wasn't done already in concurrent or
// incremental marking
if (V8_UNLIKELY(!FLAG_optimize_ephemerons)) {
Object** key_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
HeapObject* key = HeapObject::cast(table->KeyAt(i));
if (collector_->marking_state()->IsBlackOrGrey(key)) {
collector_->RecordSlot(table, key_slot, key);
Object* value = table->ValueAt(i);
if (value->IsHeapObject()) {
Object** value_slot = table->RawFieldOfElementAt(
EphemeronHashTable::EntryToValueIndex(i));
collector_->RecordSlot(table, value_slot,
HeapObject::cast(value));
}
}
}
}
item->MarkFinished();
}
worklist_->FlushToGlobal(task_id_);
}
private:
MarkCompactCollector* collector_;
MarkCompactCollector::MarkingWorklist::ConcurrentMarkingWorklist* worklist_;
int task_id_;
};
void MarkCompactCollector::ProcessWeakCollections() {
CHECK(heap()->concurrent_marking()->IsStopped());
ItemParallelJob marking_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
size_t elements = 0;
// Split EphemeronHashTables into chunks such that we can divide work more
// equally between tasks
weak_objects_.ephemeron_hash_tables.Iterate([&](EphemeronHashTable* table) {
int capacity = table->Capacity();
int chunks = (capacity + kEphemeronChunkSize - 1) / kEphemeronChunkSize;
elements += static_cast<size_t>(capacity);
for (int i = 0; i < chunks; i++) {
marking_job.AddItem(
new EphemeronHashTableMarkingItem(table, i * kEphemeronChunkSize));
}
});
int num_tasks = NumberOfParallelEphemeronVisitingTasks(elements);
for (int i = 0; i < num_tasks; i++) {
marking_job.AddTask(new EphemeronHashTableMarkingTask(
isolate(), this, marking_worklist_.shared(), i));
}
marking_job.Run(isolate()->async_counters());
}
int MarkCompactCollector::NumberOfParallelEphemeronVisitingTasks(
size_t elements) {
DCHECK_GE(elements, 0);
if (!FLAG_parallel_ephemeron_visiting || elements == 0) return 1;
size_t chunks = (elements + kEphemeronChunkSize - 1) / kEphemeronChunkSize;
const size_t kMaxNumTasks =
MarkingWorklist::ConcurrentMarkingWorklist::kMaxNumTasks;
return Min(NumberOfAvailableCores(),
static_cast<int>(Min(chunks, kMaxNumTasks)));
}
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
EphemeronHashTable* table;
......@@ -2026,9 +2085,6 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.weak_cells.Clear();
weak_objects_.transition_arrays.Clear();
weak_objects_.ephemeron_hash_tables.Clear();
weak_objects_.current_ephemerons.Clear();
weak_objects_.next_ephemerons.Clear();
weak_objects_.discovered_ephemerons.Clear();
weak_objects_.weak_references.Clear();
weak_objects_.weak_objects_in_code.Clear();
}
......
......@@ -411,13 +411,6 @@ class MajorNonAtomicMarkingState final
}
};
struct Ephemeron {
HeapObject* key;
HeapObject* value;
};
typedef Worklist<Ephemeron, 64> EphemeronWorklist;
// Weak objects encountered during marking.
struct WeakObjects {
Worklist<WeakCell*, 64> weak_cells;
......@@ -427,24 +420,6 @@ struct WeakObjects {
// them in the atomic pause.
Worklist<EphemeronHashTable*, 64> ephemeron_hash_tables;
// Keep track of all ephemerons for concurrent marking tasks. Only store
// ephemerons in these Worklists if both key and value are unreachable at the
// moment.
//
// MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
// worklists.
//
// current_ephemerons is used as draining worklist in the current fixpoint
// iteration.
EphemeronWorklist current_ephemerons;
// Stores ephemerons to visit in the next fixpoint iteration.
EphemeronWorklist next_ephemerons;
// When draining the marking worklist new discovered ephemerons are pushed
// into this worklist.
EphemeronWorklist discovered_ephemerons;
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
......@@ -653,11 +628,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.ephemeron_hash_tables.Push(kMainThread, table);
}
void AddEphemeron(HeapObject* key, HeapObject* value) {
weak_objects_.discovered_ephemerons.Push(kMainThread,
Ephemeron{key, value});
}
void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
}
......@@ -735,17 +705,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// if no concurrent threads are running.
void ProcessMarkingWorklist() override;
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
bool VisitEphemeron(HeapObject* key, HeapObject* value);
// Marks ephemerons and drains marking worklist iteratively
// until a fixpoint is reached.
void ProcessEphemeronsUntilFixpoint();
// Drains ephemeron and marking worklists. Single iteration of the
// fixpoint iteration.
bool ProcessEphemerons();
// Drains the main thread marking work list. Will mark all pending objects
// if no concurrent threads are running.
void ProcessMarkingWorklistInParallel();
// Callback function for telling whether the object *p is an unmarked
// heap object.
......
......@@ -78,15 +78,6 @@ class Worklist {
}
}
// Swaps content with the given worklist. Local buffers need to
// be empty, not thread safe.
void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
CHECK(AreLocalsEmpty());
CHECK(other.AreLocalsEmpty());
global_pool_.Swap(other.global_pool_);
}
bool Push(int task_id, EntryType entry) {
DCHECK_LT(task_id, num_tasks_);
DCHECK_NOT_NULL(private_push_segment(task_id));
......@@ -129,15 +120,10 @@ class Worklist {
bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
bool IsGlobalEmpty() {
if (!AreLocalsEmpty()) return false;
return global_pool_.IsEmpty();
}
bool AreLocalsEmpty() {
for (int i = 0; i < num_tasks_; i++) {
if (!IsLocalEmpty(i)) return false;
}
return true;
return global_pool_.IsEmpty();
}
size_t LocalSize(int task_id) {
......@@ -274,13 +260,6 @@ class Worklist {
public:
GlobalPool() : top_(nullptr) {}
// Swaps contents, not thread safe.
void Swap(GlobalPool& other) {
Segment* temp = top_;
set_top(other.top_);
other.set_top(temp);
}
V8_INLINE void Push(Segment* segment) {
base::LockGuard<base::Mutex> guard(&lock_);
segment->set_next(top_);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment