Commit 19b9b6eb authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

heap: Scavenger convert uses of Worklist to heap::base::Worklist

The CL converts uses of v8::internal::Worklist to heap::base::Worklist
which does not require to know the number of tasks working with the
work list upfront. heap::base::Worklist is the common implementation
for V8's heap and cppgc and should be used/optimized going forward.

Bug: v8:12426
Change-Id: I35713938ff80f43a763470f8bdf7e242439080f5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3297903
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78066}
parent aebd506e
......@@ -52,9 +52,9 @@ class Worklist {
bool Pop(Segment** segment);
// Returns true if the list of segments is empty.
bool IsEmpty();
bool IsEmpty() const;
// Returns the number of segments in the list.
size_t Size();
size_t Size() const;
// Moves the segments of the given marking worklist into this
// marking worklist.
......@@ -100,13 +100,13 @@ bool Worklist<EntryType, SegmentSize>::Pop(Segment** segment) {
}
template <typename EntryType, uint16_t SegmentSize>
bool Worklist<EntryType, SegmentSize>::IsEmpty() {
bool Worklist<EntryType, SegmentSize>::IsEmpty() const {
return v8::base::AsAtomicPtr(&top_)->load(std::memory_order_relaxed) ==
nullptr;
}
template <typename EntryType, uint16_t SegmentSize>
size_t Worklist<EntryType, SegmentSize>::Size() {
size_t Worklist<EntryType, SegmentSize>::Size() const {
// It is safe to read |size_| without a lock since this variable is
// atomic, keeping in mind that threads may not immediately see the new
// value when it is updated.
......
......@@ -11,12 +11,12 @@
#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
#include "src/common/globals.h"
#include "src/heap/base/worklist.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
......@@ -180,7 +180,7 @@ class RememberedSet : public AllStatic {
template <typename Callback>
static int IterateAndTrackEmptyBuckets(
MemoryChunk* chunk, Callback callback,
Worklist<MemoryChunk*, 64>::View empty_chunks) {
::heap::base::Worklist<MemoryChunk*, 64>::Local* empty_chunks) {
SlotSet* slot_set = chunk->slot_set<type>();
int slots = 0;
if (slot_set != nullptr) {
......@@ -189,7 +189,7 @@ class RememberedSet : public AllStatic {
slots += slot_set->IterateAndTrackEmptyBuckets(chunk->address(), 0,
chunk->buckets(), callback,
possibly_empty_buckets);
if (!possibly_empty_buckets->IsEmpty()) empty_chunks.Push(chunk);
if (!possibly_empty_buckets->IsEmpty()) empty_chunks->Push(chunk);
}
return slots;
}
......
......@@ -16,93 +16,59 @@
namespace v8 {
namespace internal {
void Scavenger::PromotionList::View::PushRegularObject(HeapObject object,
int size) {
promotion_list_->PushRegularObject(task_id_, object, size);
void Scavenger::PromotionList::Local::PushRegularObject(HeapObject object,
int size) {
regular_object_promotion_list_local_.Push({object, size});
}
void Scavenger::PromotionList::View::PushLargeObject(HeapObject object, Map map,
int size) {
promotion_list_->PushLargeObject(task_id_, object, map, size);
void Scavenger::PromotionList::Local::PushLargeObject(HeapObject object,
Map map, int size) {
large_object_promotion_list_local_.Push({object, map, size});
}
bool Scavenger::PromotionList::View::IsEmpty() {
return promotion_list_->IsEmpty();
size_t Scavenger::PromotionList::Local::LocalPushSegmentSize() const {
return regular_object_promotion_list_local_.PushSegmentSize() +
large_object_promotion_list_local_.PushSegmentSize();
}
size_t Scavenger::PromotionList::View::LocalPushSegmentSize() {
return promotion_list_->LocalPushSegmentSize(task_id_);
}
bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
return promotion_list_->Pop(task_id_, entry);
}
void Scavenger::PromotionList::View::FlushToGlobal() {
promotion_list_->FlushToGlobal(task_id_);
}
bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
return promotion_list_->IsGlobalPoolEmpty();
}
bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
}
void Scavenger::PromotionList::PushRegularObject(int task_id, HeapObject object,
int size) {
regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
}
void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject object,
Map map, int size) {
large_object_promotion_list_.Push(task_id, {object, map, size});
}
bool Scavenger::PromotionList::IsEmpty() {
return regular_object_promotion_list_.IsEmpty() &&
large_object_promotion_list_.IsEmpty();
}
size_t Scavenger::PromotionList::LocalPushSegmentSize(int task_id) {
return regular_object_promotion_list_.LocalPushSegmentSize(task_id) +
large_object_promotion_list_.LocalPushSegmentSize(task_id);
}
bool Scavenger::PromotionList::Pop(int task_id,
struct PromotionListEntry* entry) {
bool Scavenger::PromotionList::Local::Pop(struct PromotionListEntry* entry) {
ObjectAndSize regular_object;
if (regular_object_promotion_list_.Pop(task_id, &regular_object)) {
if (regular_object_promotion_list_local_.Pop(&regular_object)) {
entry->heap_object = regular_object.first;
entry->size = regular_object.second;
entry->map = entry->heap_object.map();
return true;
}
return large_object_promotion_list_.Pop(task_id, entry);
}
void Scavenger::PromotionList::FlushToGlobal(int task_id) {
regular_object_promotion_list_.FlushToGlobal(task_id);
large_object_promotion_list_.FlushToGlobal(task_id);
return large_object_promotion_list_local_.Pop(entry);
}
size_t Scavenger::PromotionList::GlobalPoolSize() const {
return regular_object_promotion_list_.GlobalPoolSize() +
large_object_promotion_list_.GlobalPoolSize();
void Scavenger::PromotionList::Local::Publish() {
regular_object_promotion_list_local_.Publish();
large_object_promotion_list_local_.Publish();
}
bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
large_object_promotion_list_.IsGlobalPoolEmpty();
bool Scavenger::PromotionList::Local::IsGlobalPoolEmpty() const {
return regular_object_promotion_list_local_.IsGlobalEmpty() &&
large_object_promotion_list_local_.IsGlobalEmpty();
}
bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
bool Scavenger::PromotionList::Local::ShouldEagerlyProcessPromotionList()
const {
// Threshold when to prioritize processing of the promotion list. Right
// now we only look into the regular object list.
const int kProcessPromotionListThreshold =
kRegularObjectPromotionListSegmentSize / 2;
return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
return LocalPushSegmentSize() < kProcessPromotionListThreshold;
}
bool Scavenger::PromotionList::IsEmpty() const {
return regular_object_promotion_list_.IsEmpty() &&
large_object_promotion_list_.IsEmpty();
}
size_t Scavenger::PromotionList::Size() const {
return regular_object_promotion_list_.Size() +
large_object_promotion_list_.Size();
}
void Scavenger::PageMemoryFence(MaybeObject object) {
......@@ -169,7 +135,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
}
HeapObjectReference::Update(slot, target);
if (object_fields == ObjectFields::kMaybePointers) {
copied_list_.Push(ObjectAndSize(target, object_size));
copied_list_local_.Push(ObjectAndSize(target, object_size));
}
copied_size_ += object_size;
return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
......@@ -217,7 +183,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
}
HeapObjectReference::Update(slot, target);
if (object_fields == ObjectFields::kMaybePointers) {
promotion_list_.PushRegularObject(target, object_size);
promotion_list_local_.PushRegularObject(target, object_size);
}
promoted_size_ += object_size;
return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
......@@ -246,7 +212,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
if (object_fields == ObjectFields::kMaybePointers) {
promotion_list_.PushLargeObject(object, map, object_size);
promotion_list_local_.PushLargeObject(object, map, object_size);
}
}
return true;
......
......@@ -212,9 +212,9 @@ size_t ScavengerCollector::JobTask::GetMaxConcurrency(
// GlobalPoolSize() of copied_list_ and promotion_list_.
return std::min<size_t>(
scavengers_->size(),
std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
worker_count + copied_list_->GlobalPoolSize() +
promotion_list_->GlobalPoolSize()));
std::max<size_t>(
remaining_memory_chunks_.load(std::memory_order_relaxed),
worker_count + copied_list_->Size() + promotion_list_->Size()));
}
void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
......@@ -272,11 +272,11 @@ void ScavengerCollector::CollectGarbage() {
DCHECK(surviving_new_large_objects_.empty());
std::vector<std::unique_ptr<Scavenger>> scavengers;
Worklist<MemoryChunk*, 64> empty_chunks;
Scavenger::EmptyChunksList empty_chunks;
const int num_scavenge_tasks = NumberOfScavengeTasks();
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
Scavenger::CopiedList copied_list;
Scavenger::PromotionList promotion_list;
EphemeronTableList ephemeron_table_list;
{
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
......@@ -341,7 +341,7 @@ void ScavengerCollector::CollectGarbage() {
heap_->IterateRoots(&root_scavenge_visitor, options);
isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
&root_scavenge_visitor);
scavengers[kMainThreadId]->Flush();
scavengers[kMainThreadId]->Publish();
}
{
// Parallel phase scavenging all copied and promoted objects.
......@@ -428,9 +428,9 @@ void ScavengerCollector::CollectGarbage() {
{
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
Scavenger::EmptyChunksList::Local empty_chunks_local(&empty_chunks);
MemoryChunk* chunk;
while (empty_chunks.Pop(kMainThreadId, &chunk)) {
while (empty_chunks_local.Pop(&chunk)) {
// Since sweeping was already restarted only check chunks that already got
// swept.
if (chunk->SweepingDone()) {
......@@ -534,16 +534,22 @@ int ScavengerCollector::NumberOfScavengeTasks() {
return tasks;
}
Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
: regular_object_promotion_list_local_(
&promotion_list->regular_object_promotion_list_),
large_object_promotion_list_local_(
&promotion_list->large_object_promotion_list_) {}
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
Worklist<MemoryChunk*, 64>* empty_chunks,
CopiedList* copied_list, PromotionList* promotion_list,
EmptyChunksList* empty_chunks, CopiedList* copied_list,
PromotionList* promotion_list,
EphemeronTableList* ephemeron_table_list, int task_id)
: collector_(collector),
heap_(heap),
empty_chunks_(empty_chunks, task_id),
promotion_list_(promotion_list, task_id),
copied_list_(copied_list, task_id),
ephemeron_table_list_(ephemeron_table_list, task_id),
empty_chunks_local_(empty_chunks),
promotion_list_local_(promotion_list),
copied_list_local_(copied_list),
ephemeron_table_list_local_(ephemeron_table_list),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
copied_size_(0),
promoted_size_(0),
......@@ -602,7 +608,7 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndScavengeObject(heap_, slot);
},
empty_chunks_);
&empty_chunks_local_);
}
if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
......@@ -641,24 +647,24 @@ void Scavenger::Process(JobDelegate* delegate) {
do {
done = true;
ObjectAndSize object_and_size;
while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
copied_list_.Pop(&object_and_size)) {
while (promotion_list_local_.ShouldEagerlyProcessPromotionList() &&
copied_list_local_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (!copied_list_.IsGlobalPoolEmpty()) {
if (!copied_list_local_.IsEmpty()) {
delegate->NotifyConcurrencyIncrease();
}
}
}
struct PromotionListEntry entry;
while (promotion_list_.Pop(&entry)) {
while (promotion_list_local_.Pop(&entry)) {
HeapObject target = entry.heap_object;
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (!promotion_list_.IsGlobalPoolEmpty()) {
if (!promotion_list_local_.IsGlobalPoolEmpty()) {
delegate->NotifyConcurrencyIncrease();
}
}
......@@ -735,8 +741,8 @@ void Scavenger::Finalize() {
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
empty_chunks_.FlushToGlobal();
ephemeron_table_list_.FlushToGlobal();
empty_chunks_local_.Publish();
ephemeron_table_list_local_.Publish();
for (auto it = ephemeron_remembered_set_.begin();
it != ephemeron_remembered_set_.end(); ++it) {
auto insert_result = heap()->ephemeron_remembered_set_.insert(
......@@ -747,13 +753,13 @@ void Scavenger::Finalize() {
}
}
void Scavenger::Flush() {
copied_list_.FlushToGlobal();
promotion_list_.FlushToGlobal();
void Scavenger::Publish() {
copied_list_local_.Publish();
promotion_list_local_.Publish();
}
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
ephemeron_table_list_.Push(table);
ephemeron_table_list_local_.Push(table);
}
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
......
......@@ -6,12 +6,12 @@
#define V8_HEAP_SCAVENGER_H_
#include "src/base/platform/condition-variable.h"
#include "src/heap/base/worklist.h"
#include "src/heap/index-generator.h"
#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/parallel-work-item.h"
#include "src/heap/slot-set.h"
#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
......@@ -33,7 +33,7 @@ using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject, Map>;
constexpr int kEphemeronTableListSegmentSize = 128;
using EphemeronTableList =
Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
::heap::base::Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
class ScavengerCollector;
......@@ -47,58 +47,49 @@ class Scavenger {
class PromotionList {
public:
class View {
static constexpr size_t kRegularObjectPromotionListSegmentSize = 256;
static constexpr size_t kLargeObjectPromotionListSegmentSize = 4;
using RegularObjectPromotionList =
::heap::base::Worklist<ObjectAndSize,
kRegularObjectPromotionListSegmentSize>;
using LargeObjectPromotionList =
::heap::base::Worklist<PromotionListEntry,
kLargeObjectPromotionListSegmentSize>;
class Local {
public:
View(PromotionList* promotion_list, int task_id)
: promotion_list_(promotion_list), task_id_(task_id) {}
explicit Local(PromotionList* promotion_list);
inline void PushRegularObject(HeapObject object, int size);
inline void PushLargeObject(HeapObject object, Map map, int size);
inline bool IsEmpty();
inline size_t LocalPushSegmentSize();
inline size_t LocalPushSegmentSize() const;
inline bool Pop(struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList();
inline void FlushToGlobal();
inline bool IsGlobalPoolEmpty() const;
inline bool ShouldEagerlyProcessPromotionList() const;
inline void Publish();
private:
PromotionList* promotion_list_;
int task_id_;
RegularObjectPromotionList::Local regular_object_promotion_list_local_;
LargeObjectPromotionList::Local large_object_promotion_list_local_;
};
explicit PromotionList(int num_tasks)
: regular_object_promotion_list_(num_tasks),
large_object_promotion_list_(num_tasks) {}
inline void PushRegularObject(int task_id, HeapObject object, int size);
inline void PushLargeObject(int task_id, HeapObject object, Map map,
int size);
inline bool IsEmpty();
inline size_t GlobalPoolSize() const;
inline size_t LocalPushSegmentSize(int task_id);
inline bool Pop(int task_id, struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList(int task_id);
inline void FlushToGlobal(int task_id);
inline bool IsEmpty() const;
inline size_t Size() const;
private:
static const int kRegularObjectPromotionListSegmentSize = 256;
static const int kLargeObjectPromotionListSegmentSize = 4;
using RegularObjectPromotionList =
Worklist<ObjectAndSize, kRegularObjectPromotionListSegmentSize>;
using LargeObjectPromotionList =
Worklist<PromotionListEntry, kLargeObjectPromotionListSegmentSize>;
RegularObjectPromotionList regular_object_promotion_list_;
LargeObjectPromotionList large_object_promotion_list_;
};
static const int kCopiedListSegmentSize = 256;
using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
using CopiedList =
::heap::base::Worklist<ObjectAndSize, kCopiedListSegmentSize>;
using EmptyChunksList = ::heap::base::Worklist<MemoryChunk*, 64>;
Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
Worklist<MemoryChunk*, 64>* empty_chunks, CopiedList* copied_list,
EmptyChunksList* empty_chunks, CopiedList* copied_list,
PromotionList* promotion_list,
EphemeronTableList* ephemeron_table_list, int task_id);
......@@ -112,7 +103,7 @@ class Scavenger {
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
void Flush();
void Publish();
void AddEphemeronHashTable(EphemeronHashTable table);
......@@ -198,10 +189,10 @@ class Scavenger {
ScavengerCollector* const collector_;
Heap* const heap_;
Worklist<MemoryChunk*, 64>::View empty_chunks_;
PromotionList::View promotion_list_;
CopiedList::View copied_list_;
EphemeronTableList::View ephemeron_table_list_;
EmptyChunksList::Local empty_chunks_local_;
PromotionList::Local promotion_list_local_;
CopiedList::Local copied_list_local_;
EphemeronTableList::Local ephemeron_table_list_local_;
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
size_t copied_size_;
size_t promoted_size_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment