Commit 0c0a4f0e authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Remove IsSweepingCompleted(AllocationSpace)

Adjusting the count requires us to call into Semaphore::WaitFor which
even on a z840 introduces a pause of at least 50us. We often call in
here from the unmapper that tries to add pages. E.g. for reducing the
new space size of 8M we call this for 16x2 pages, resulting in a pause
of 1.6ms for just checking the status of the sweeper tasks.

Avoiding reducing the count reduces the epilogue times. Example: FB
infinite scroll:

Before:
  heap.epilogue
    len: 102
    min: 0.01
    max: 4.83
    avg: 0.140196078431
    [0,5[: 102
After:
  heap.epilogue
    len: 106
    min: 0.01
    max: 0.24
    avg: 0.0260377358491
    [0,5[: 106

BUG=

Change-Id: I296c20ae3ac4b65218e4e038a9dbce504160a764
Reviewed-on: https://chromium-review.googlesource.com/455839
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#43863}
parent c5a46b79
......@@ -399,9 +399,11 @@ void MarkCompactCollector::ClearMarkbits() {
class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
public:
SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
base::AtomicNumber<intptr_t>* num_sweeping_tasks,
AllocationSpace space_to_start)
: sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
space_to_start_(space_to_start) {}
virtual ~SweeperTask() {}
......@@ -419,11 +421,13 @@ class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
DCHECK_LE(space_id, LAST_PAGED_SPACE);
sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
}
num_sweeping_tasks_->Decrement(1);
pending_sweeper_tasks_->Signal();
}
Sweeper* sweeper_;
base::Semaphore* pending_sweeper_tasks_;
base::AtomicNumber<intptr_t>* num_sweeping_tasks_;
AllocationSpace space_to_start_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
......@@ -442,8 +446,10 @@ void MarkCompactCollector::Sweeper::StartSweeperTasks() {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
num_sweeping_tasks_.Increment(1);
semaphore_counter_++;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space),
new SweeperTask(this, &pending_sweeper_tasks_semaphore_,
&num_sweeping_tasks_, space),
v8::Platform::kShortRunningTask);
});
}
......@@ -463,8 +469,7 @@ void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
}
void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
if (FLAG_concurrent_sweeping &&
!sweeper().IsSweepingCompleted(space->identity())) {
if (FLAG_concurrent_sweeping && sweeper().sweeping_in_progress()) {
sweeper().ParallelSweepSpace(space->identity(), 0);
space->RefillFreeList();
}
......@@ -484,16 +489,13 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
// If sweeping is not completed or not running at all, we try to complete it
// here.
ForAllSweepingSpaces([this](AllocationSpace space) {
if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) {
ParallelSweepSpace(space, 0);
}
});
ForAllSweepingSpaces(
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
if (FLAG_concurrent_sweeping) {
while (num_sweeping_tasks_.Value() > 0) {
while (semaphore_counter_ > 0) {
pending_sweeper_tasks_semaphore_.Wait();
num_sweeping_tasks_.Increment(-1);
semaphore_counter_--;
}
}
......@@ -508,7 +510,7 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) {
if (!FLAG_concurrent_sweeping || sweeping_in_progress()) {
for (Page* p : *heap_->new_space()) {
SweepOrWaitUntilSweepingCompleted(p);
}
......@@ -528,24 +530,15 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
VerifyEvacuation(heap_);
}
#endif
if (heap()->memory_allocator()->unmapper()->has_delayed_chunks())
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
DCHECK(FLAG_concurrent_sweeping);
while (pending_sweeper_tasks_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
num_sweeping_tasks_.Increment(-1);
}
return num_sweeping_tasks_.Value() != 0;
}
bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) {
DCHECK(FLAG_concurrent_sweeping);
if (AreSweeperTasksRunning()) return false;
base::LockGuard<base::Mutex> guard(&mutex_);
return sweeping_list_[space].empty();
}
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
......@@ -626,8 +619,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
pages.reserve(number_of_pages);
DCHECK(!sweeping_in_progress());
DCHECK(!FLAG_concurrent_sweeping ||
sweeper().IsSweepingCompleted(space->identity()));
Page* owner_of_linear_allocation_area =
space->top() == space->limit()
? nullptr
......
......@@ -424,6 +424,7 @@ class MarkCompactCollector {
explicit Sweeper(Heap* heap)
: heap_(heap),
pending_sweeper_tasks_semaphore_(0),
semaphore_counter_(0),
sweeping_in_progress_(false),
num_sweeping_tasks_(0) {}
......@@ -443,7 +444,6 @@ class MarkCompactCollector {
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool AreSweeperTasksRunning();
bool IsSweepingCompleted(AllocationSpace space);
void SweepOrWaitUntilSweepingCompleted(Page* page);
void AddSweptPageSafe(PagedSpace* space, Page* page);
......@@ -468,10 +468,14 @@ class MarkCompactCollector {
Heap* heap_;
base::Semaphore pending_sweeper_tasks_semaphore_;
// Counter is only used for waiting on the semaphore.
intptr_t semaphore_counter_;
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
bool sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
};
......
......@@ -417,7 +417,7 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
// Chunks in old generation are unmapped if they are empty.
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
mc->sweeper().IsSweepingCompleted(NEW_SPACE);
!mc->sweeper().sweeping_in_progress();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
......@@ -2866,6 +2866,11 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
if (FLAG_concurrent_sweeping && !is_local() &&
!collector->sweeper().AreSweeperTasksRunning()) {
collector->EnsureSweepingCompleted();
}
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
RefillFreeList();
......
......@@ -1176,6 +1176,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
bool WaitUntilCompleted();
void TearDown();
bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
private:
static const int kReservedQueueingSlots = 64;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment