Commit 982b3994 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Don't unmap new space pages while sweeping is active

- The barrier for scavenge only checked for whether new space pages were swept.
  This is not enough as a concurrent task could still hang right before trying to
  lock the page for sweeping. Remove the barrier completely.
- Avoid unmapping of new space pages while sweeping using a delayed list that
  gets emptied upon the next call to the unmapper.

BUG=chromium:628984
R=hpayer@chromium.org

Review-Url: https://codereview.chromium.org/2250423002
Cr-Commit-Position: refs/heads/master@{#38710}
parent 8ab555cc
......@@ -1595,8 +1595,6 @@ void Heap::Scavenge() {
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
gc_state_ = SCAVENGE;
// Implements Cheney's copying algorithm
......
......@@ -542,15 +542,6 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
sweeping_in_progress_ = false;
}
void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
for (Page* p : *heap_->new_space()) {
SweepOrWaitUntilSweepingCompleted(p);
}
}
}
void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper().sweeping_in_progress()) return;
......
......@@ -315,7 +315,6 @@ class MarkCompactCollector {
void StartSweeping();
void StartSweepingHelper(AllocationSpace space_to_start);
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool IsSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
......
......@@ -4,6 +4,8 @@
#include "src/heap/spaces.h"
#include <utility>
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
......@@ -348,6 +350,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
ReconsiderDelayedChunks();
if (FLAG_concurrent_sweeping) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
......@@ -381,6 +384,24 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
}
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
// Move constructed, so the permanent list should be empty.
DCHECK(delayed_regular_chunks_.empty());
for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
AddMemoryChunkSafe<kRegular>(*it);
}
}
bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
// We cannot free memory chunks in new space while the sweeper is running
// since a sweeper thread might be stuck right before trying to lock the
// corresponding page.
return !chunk->InNewSpace() || (mc == nullptr) ||
mc->sweeper().IsSweepingCompleted();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
if (!base::VirtualMemory::CommitRegion(base, size,
......
......@@ -1205,7 +1205,12 @@ class MemoryAllocator {
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_);
chunks_[type].push_back(chunk);
if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
chunks_[type].push_back(chunk);
} else {
DCHECK_EQ(type, kRegular);
delayed_regular_chunks_.push_back(chunk);
}
}
template <ChunkQueueType type>
......@@ -1217,11 +1222,16 @@ class MemoryAllocator {
return chunk;
}
void ReconsiderDelayedChunks();
void PerformFreeMemoryOnQueuedChunks();
base::Mutex mutex_;
MemoryAllocator* allocator_;
std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
std::list<MemoryChunk*> delayed_regular_chunks_;
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_;
......@@ -1260,6 +1270,8 @@ class MemoryAllocator {
template <MemoryAllocator::FreeMode mode = kFull>
void Free(MemoryChunk* chunk);
bool CanFreeMemoryChunk(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
intptr_t Size() { return size_.Value(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment