Commit d6235fc4 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Add concurrency-safe refilling to compaction spaces

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1409363003

Cr-Commit-Position: refs/heads/master@{#31508}
parent 8e89e820
......@@ -547,6 +547,14 @@ void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
}
void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
if (heap()->concurrent_sweeping_enabled() && !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(space->identity()), 0);
space->RefillFreeList();
}
}
void MarkCompactCollector::EnsureSweepingCompleted() {
DCHECK(sweeping_in_progress_ == true);
......
......@@ -457,10 +457,19 @@ class MarkCompactCollector {
// size of the maximum continuous freed memory chunk.
int SweepInParallel(Page* page, PagedSpace* space);
// Ensures that sweeping is finished.
//
// Note: Can only be called safely from main thread.
void EnsureSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
// Help out in sweeping the corresponding space and refill memory that has
// been regained.
//
// Note: Thread-safe.
void SweepAndRefill(CompactionSpace* space);
// If sweeper threads are not active this method will return true. If
// this is a latency issue we should be smarter here. Otherwise, it will
// return true if the sweeper threads are done processing the pages.
......
......@@ -2713,8 +2713,7 @@ void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
}
HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
int size_in_bytes) {
HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
......@@ -2724,7 +2723,17 @@ HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
// entries.
return free_list_.Allocate(size_in_bytes);
}
return NULL;
return nullptr;
}
HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->SweepAndRefill(this);
return free_list_.Allocate(size_in_bytes);
}
return nullptr;
}
......@@ -2761,7 +2770,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
return object;
}
......@@ -2775,7 +2784,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
return SweepAndRetryAllocation(size_in_bytes);
}
......
......@@ -2121,7 +2121,7 @@ class PagedSpace : public Space {
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
......@@ -2838,6 +2838,9 @@ class CompactionSpace : public PagedSpace {
protected:
// The space is temporary and not included in any snapshots.
virtual bool snapshotable() override { return false; }
MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
int size_in_bytes) override;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment