Commit 1c8031b8 authored by Joakim Bengtsson's avatar Joakim Bengtsson Committed by Commit Bot

Make sure that Unmapper tasks can start to prevent memory bloat.

In some workloads the Unmapper could reach kMaxUnmapperTasks at which
point it wouldn't start any new tasks and not free any more memory
until the next major GC. It could lead to a large buildup of memory in
the Unmapper.

Bug: v8:7440
Change-Id: I23fda67b2e27824c04ac886d7e111bb01188be74
Reviewed-on: https://chromium-review.googlesource.com/913490Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51308}
parent 8f234751
...@@ -661,6 +661,7 @@ DEFINE_BOOL(trace_mutator_utilization, false, ...@@ -661,6 +661,7 @@ DEFINE_BOOL(trace_mutator_utilization, false,
DEFINE_BOOL(incremental_marking, true, "use incremental marking") DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_wrappers, true, DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers") "use incremental marking for marking wrappers")
DEFINE_BOOL(trace_unmapper, false, "Trace the unmapping")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge") DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge") DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(write_protect_code_memory, false, "write protect code memory") DEFINE_BOOL(write_protect_code_memory, false, "write protect code memory")
......
...@@ -324,7 +324,12 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask { ...@@ -324,7 +324,12 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
TRACE_BACKGROUND_GC(tracer_, TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER); GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(); unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->active_unmapping_tasks_.Decrement(1);
unmapper_->pending_unmapping_tasks_semaphore_.Signal(); unmapper_->pending_unmapping_tasks_semaphore_.Signal();
if (FLAG_trace_unmapper) {
PrintIsolate(unmapper_->heap_->isolate(),
"UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
}
} }
Unmapper* const unmapper_; Unmapper* const unmapper_;
...@@ -334,13 +339,26 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask { ...@@ -334,13 +339,26 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
void MemoryAllocator::Unmapper::FreeQueuedChunks() { void MemoryAllocator::Unmapper::FreeQueuedChunks() {
if (heap_->use_tasks() && FLAG_concurrent_sweeping) { if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) { if (!MakeRoomForNewTasks()) {
// kMaxUnmapperTasks are already running. Avoid creating any more. // kMaxUnmapperTasks are already running. Avoid creating any more.
if (FLAG_trace_unmapper) {
PrintIsolate(heap_->isolate(),
"Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
kMaxUnmapperTasks);
}
return; return;
} }
UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this); UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks); if (FLAG_trace_unmapper) {
task_ids_[concurrent_unmapping_tasks_active_++] = task->id(); PrintIsolate(heap_->isolate(),
"Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
task->id());
}
DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
DCHECK_LE(active_unmapping_tasks_.Value(), pending_unmapping_tasks_);
DCHECK_GE(active_unmapping_tasks_.Value(), 0);
active_unmapping_tasks_.Increment(1);
task_ids_[pending_unmapping_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread( V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask); task, v8::Platform::kShortRunningTask);
} else { } else {
...@@ -349,18 +367,41 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() { ...@@ -349,18 +367,41 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
} }
void MemoryAllocator::Unmapper::WaitUntilCompleted() { void MemoryAllocator::Unmapper::WaitUntilCompleted() {
for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) { for (int i = 0; i < pending_unmapping_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) != if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
CancelableTaskManager::kTaskAborted) { CancelableTaskManager::kTaskAborted) {
pending_unmapping_tasks_semaphore_.Wait(); pending_unmapping_tasks_semaphore_.Wait();
} }
} }
concurrent_unmapping_tasks_active_ = 0; pending_unmapping_tasks_ = 0;
active_unmapping_tasks_.SetValue(0);
if (FLAG_trace_unmapper) {
PrintIsolate(heap_->isolate(),
"Unmapper::WaitUntilCompleted: no tasks remaining\n");
}
}
bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
if (active_unmapping_tasks_.Value() == 0 && pending_unmapping_tasks_ > 0) {
// All previous unmapping tasks have been run to completion.
// Finalize those tasks to make room for new ones.
WaitUntilCompleted();
}
return pending_unmapping_tasks_ != kMaxUnmapperTasks;
} }
template <MemoryAllocator::Unmapper::FreeMode mode> template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
MemoryChunk* chunk = nullptr; MemoryChunk* chunk = nullptr;
if (FLAG_trace_unmapper) {
PrintIsolate(
heap_->isolate(),
"Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
NumberOfChunks());
}
// Regular chunks. // Regular chunks.
while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) { while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED); bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
...@@ -382,7 +423,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { ...@@ -382,7 +423,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
} }
void MemoryAllocator::Unmapper::TearDown() { void MemoryAllocator::Unmapper::TearDown() {
CHECK_EQ(0, concurrent_unmapping_tasks_active_); CHECK_EQ(0, pending_unmapping_tasks_);
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>(); PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) { for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty()); DCHECK(chunks_[i].empty());
......
...@@ -1172,7 +1172,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1172,7 +1172,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
: heap_(heap), : heap_(heap),
allocator_(allocator), allocator_(allocator),
pending_unmapping_tasks_semaphore_(0), pending_unmapping_tasks_semaphore_(0),
concurrent_unmapping_tasks_active_(0) { pending_unmapping_tasks_(0),
active_unmapping_tasks_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots); chunks_[kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots); chunks_[kPooled].reserve(kReservedQueueingSlots);
} }
...@@ -1240,6 +1241,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1240,6 +1241,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk; return chunk;
} }
bool MakeRoomForNewTasks();
template <FreeMode mode> template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks(); void PerformFreeMemoryOnQueuedChunks();
...@@ -1249,7 +1252,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1249,7 +1252,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues]; std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks]; CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_; base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_; intptr_t pending_unmapping_tasks_;
base::AtomicNumber<intptr_t> active_unmapping_tasks_;
friend class MemoryAllocator; friend class MemoryAllocator;
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment