Commit 31251988 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Heap::EnsureSweepingCompleted only sweeps one page

This CL optimizes Heap::EnsureSweepingCompleted() by only ensure that
sweeping is finished for that object's page only and not for the whole
heap.

For this purpose the page is removed from the sweeping_list_ and
processed on the main thread. In case the object is in new space,
this method will just use EnsureIterabilityCompleted() since
the iterability task doesn't currently have any kind of synchronization.
Also the new space will generally be much smaller.

Bug: v8:11837
Change-Id: I7e878b5c84c223009fac0d58798197be5cd524e1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2958488
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75148}
parent 1835607b
......@@ -35,6 +35,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/barrier.h"
#include "src/heap/base/stack.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/code-range.h"
#include "src/heap/code-stats.h"
......@@ -2268,9 +2269,19 @@ void Heap::CompleteSweepingYoung(GarbageCollector collector) {
}
void Heap::EnsureSweepingCompleted(Handle<HeapObject> object) {
// TODO(dinfuehr): Only sweep that object's page instead of whole heap.
mark_compact_collector()->EnsureSweepingCompleted();
USE(object);
if (!mark_compact_collector()->sweeping_in_progress()) return;
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(*object);
if (basic_chunk->InReadOnlySpace()) return;
MemoryChunk* chunk = MemoryChunk::cast(basic_chunk);
if (chunk->SweepingDone()) return;
// SweepingDone() is always true for large pages.
DCHECK(!chunk->IsLargePage());
Page* page = Page::cast(chunk);
mark_compact_collector()->EnsurePageIsSwept(page);
}
void Heap::UpdateCurrentEpoch(GarbageCollector collector) {
......
......@@ -616,6 +616,10 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
#endif
}
void MarkCompactCollector::EnsurePageIsSwept(Page* page) {
sweeper()->EnsurePageIsSwept(page);
}
void MarkCompactCollector::DrainSweepingWorklists() {
if (!sweeper()->sweeping_in_progress()) return;
sweeper()->DrainSweepingWorklists();
......
......@@ -513,6 +513,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Note: Can only be called safely from main thread.
V8_EXPORT_PRIVATE void EnsureSweepingCompleted();
void EnsurePageIsSwept(Page* page);
void DrainSweepingWorklists();
void DrainSweepingWorklistForSpace(AllocationSpace space);
......
......@@ -229,6 +229,11 @@ class Page : public MemoryChunk {
return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
}
static Page* cast(MemoryChunk* chunk) {
DCHECK(!chunk->IsLargePage());
return static_cast<Page*>(chunk);
}
// Returns the page containing the address provided. The address can
// potentially point righter after the page. To be also safe for tagged values
// we subtract a hole word. The valid address ranges from
......
......@@ -506,10 +506,47 @@ int Sweeper::ParallelSweepPage(
{
base::MutexGuard guard(&mutex_);
swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
cv_page_swept_.NotifyAll();
}
return max_freed;
}
void Sweeper::EnsurePageIsSwept(Page* page) {
if (!sweeping_in_progress() || page->SweepingDone()) return;
AllocationSpace space = page->owner_identity();
if (IsValidSweepingSpace(space)) {
if (TryRemoveSweepingPageSafe(space, page)) {
// Page was successfully removed and can now be swept.
ParallelSweepPage(page, space);
} else {
// Some sweeper task already took ownership of that page, wait until
// sweeping is finished.
base::MutexGuard guard(&mutex_);
while (!page->SweepingDone()) {
cv_page_swept_.Wait(&mutex_);
}
}
} else {
DCHECK(page->InNewSpace());
EnsureIterabilityCompleted();
}
CHECK(page->SweepingDone());
}
bool Sweeper::TryRemoveSweepingPageSafe(AllocationSpace space, Page* page) {
base::MutexGuard guard(&mutex_);
DCHECK(IsValidSweepingSpace(space));
int space_index = GetSweepSpaceIndex(space);
SweepingList& sweeping_list = sweeping_list_[space_index];
SweepingList::iterator position =
std::find(sweeping_list.begin(), sweeping_list.end(), page);
if (position == sweeping_list.end()) return false;
sweeping_list.erase(position);
return true;
}
void Sweeper::ScheduleIncrementalSweepingTask() {
if (!incremental_sweeper_pending_) {
incremental_sweeper_pending_ = true;
......
......@@ -9,6 +9,7 @@
#include <map>
#include <vector>
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/tasks/cancelable-task.h"
......@@ -94,6 +95,8 @@ class Sweeper {
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
FreeSpaceMayContainInvalidatedSlots::kNo);
void EnsurePageIsSwept(Page* page);
void ScheduleIncrementalSweepingTask();
int RawSweep(
......@@ -185,6 +188,7 @@ class Sweeper {
bool IncrementalSweepSpace(AllocationSpace identity);
Page* GetSweepingPageSafe(AllocationSpace space);
bool TryRemoveSweepingPageSafe(AllocationSpace space, Page* page);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
......@@ -208,6 +212,7 @@ class Sweeper {
MajorNonAtomicMarkingState* marking_state_;
std::unique_ptr<JobHandle> job_handle_;
base::Mutex mutex_;
base::ConditionVariable cv_page_swept_;
SweptList swept_list_[kNumberOfSweepingSpaces];
SweepingList sweeping_list_[kNumberOfSweepingSpaces];
bool incremental_sweeper_pending_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment