Commit c3e8b003 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Move page scavenging logic into Scavenger

Mechanical move simplifying a follow up that removes the recursive
locking strategy by properly partitioning pages.

Bug: v8:6923
Change-Id: I688e61131731e2b9dc9c311b0b43f0902c149359
Reviewed-on: https://chromium-review.googlesource.com/723020Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48640}
parent 594803c9
......@@ -1805,10 +1805,15 @@ static bool IsLogging(Isolate* isolate) {
isolate->heap_profiler()->is_tracking_object_moves());
}
class ScavengingItem : public ItemParallelJob::Item {
class PageScavengingItem final : public ItemParallelJob::Item {
public:
virtual ~ScavengingItem() {}
virtual void Process(Scavenger* scavenger) = 0;
explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
virtual ~PageScavengingItem() {}
void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
private:
MemoryChunk* const chunk_;
};
class ScavengingTask final : public ItemParallelJob::Task {
......@@ -1824,8 +1829,8 @@ class ScavengingTask final : public ItemParallelJob::Task {
{
barrier_->Start();
TimedScope scope(&scavenging_time);
ScavengingItem* item = nullptr;
while ((item = GetItem<ScavengingItem>()) != nullptr) {
PageScavengingItem* item = nullptr;
while ((item = GetItem<PageScavengingItem>()) != nullptr) {
item->Process(scavenger_);
item->MarkFinished();
}
......@@ -1848,41 +1853,6 @@ class ScavengingTask final : public ItemParallelJob::Task {
OneshotBarrier* const barrier_;
};
class PageScavengingItem final : public ScavengingItem {
public:
explicit PageScavengingItem(Heap* heap, MemoryChunk* chunk)
: heap_(heap), chunk_(chunk) {}
virtual ~PageScavengingItem() {}
void Process(Scavenger* scavenger) final {
base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
scavenger->AnnounceLockedPage(chunk_);
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, scavenger](Address addr) {
return scavenger->CheckAndScavengeObject(heap_, addr);
},
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_,
[this, scavenger](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_->isolate(), type, addr, [this, scavenger](Object** addr) {
// We expect that objects referenced by code are long
// living. If we do not force promotion, then we need to
// clear old_to_new slots in dead code objects after
// mark-compact.
return scavenger->CheckAndScavengeObject(
heap_, reinterpret_cast<Address>(addr));
});
});
}
private:
Heap* const heap_;
MemoryChunk* const chunk_;
};
int Heap::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
......@@ -1945,8 +1915,8 @@ void Heap::Scavenge() {
}
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
this, [this, &job](MemoryChunk* chunk) {
job.AddItem(new PageScavengingItem(this, chunk));
this, [&job](MemoryChunk* chunk) {
job.AddItem(new PageScavengingItem(chunk));
});
{
......
......@@ -86,6 +86,24 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
target->IterateBody(target->map()->instance_type(), size, &visitor);
}
void Scavenger::ScavengePage(MemoryChunk* page) {
base::LockGuard<base::RecursiveMutex> guard(page->mutex());
AnnounceLockedPage(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
[this](Address addr) { return CheckAndScavengeObject(heap_, addr); },
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::IterateTyped(
page, [this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_->isolate(), type, addr, [this](Object** addr) {
return CheckAndScavengeObject(heap(),
reinterpret_cast<Address>(addr));
});
});
}
void Scavenger::Process(OneshotBarrier* barrier) {
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
......
......@@ -28,14 +28,9 @@ class Scavenger {
Scavenger(Heap* heap, bool is_logging, CopiedList* copied_list,
PromotionList* promotion_list, int task_id);
// Scavenges an object |object| referenced from slot |p|. |object| is required
// to be in from space.
inline void ScavengeObject(HeapObject** p, HeapObject* object);
// Potentially scavenges an object referenced from |slot_address| if it is
// indeed a HeapObject and resides in from space.
inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
Address slot_address);
// Entry point for scavenging an old generation page. For scavenging single
// objects see RootScavengingVisitor and ScavengeVisitor below.
void ScavengePage(MemoryChunk* page);
// Processes remaining work (=objects) after single objects have been
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
......@@ -61,6 +56,15 @@ class Scavenger {
inline void PageMemoryFence(Object* object);
// Potentially scavenges an object referenced from |slot_address| if it is
// indeed a HeapObject and resides in from space.
inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
Address slot_address);
// Scavenges an object |object| referenced from slot |p|. |object| is required
// to be in from space.
inline void ScavengeObject(HeapObject** p, HeapObject* object);
// Copies |source| to |target| and sets the forwarding pointer in |source|.
V8_INLINE bool MigrateObject(Map* map, HeapObject* source, HeapObject* target,
int size);
......@@ -106,6 +110,8 @@ class Scavenger {
const bool is_compacting_;
friend class IterateAndScavengePromotedObjectsVisitor;
friend class RootScavengeVisitor;
friend class ScavengeVisitor;
};
// Helper class for turning the scavenger into an object visitor that is also
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment