Commit ece2f108 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Don't unmap new space pages while sweeping is active

- Avoid unmapping of new space pages while sweeping using a delayed list that
  gets emptied upon the next call to the unmapper.

BUG=chromium:628984
R=hpayer@chromium.org

This reverts commit 4a1f2807.

Review-Url: https://codereview.chromium.org/2255153002
Cr-Commit-Position: refs/heads/master@{#38715}
parent 8fd0ce04
......@@ -4,6 +4,8 @@
#include "src/heap/spaces.h"
#include <utility>
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
......@@ -348,6 +350,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
ReconsiderDelayedChunks();
if (FLAG_concurrent_sweeping) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
......@@ -381,6 +384,24 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
}
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
// Move constructed, so the permanent list should be empty.
DCHECK(delayed_regular_chunks_.empty());
for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
AddMemoryChunkSafe<kRegular>(*it);
}
}
bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
// We cannot free memory chunks in new space while the sweeper is running
// since a sweeper thread might be stuck right before trying to lock the
// corresponding page.
return !chunk->InNewSpace() || (mc == nullptr) ||
mc->sweeper().IsSweepingCompleted();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) {
if (!base::VirtualMemory::CommitRegion(base, size,
......
......@@ -1205,7 +1205,12 @@ class MemoryAllocator {
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_);
chunks_[type].push_back(chunk);
if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
chunks_[type].push_back(chunk);
} else {
DCHECK_EQ(type, kRegular);
delayed_regular_chunks_.push_back(chunk);
}
}
template <ChunkQueueType type>
......@@ -1217,11 +1222,16 @@ class MemoryAllocator {
return chunk;
}
void ReconsiderDelayedChunks();
void PerformFreeMemoryOnQueuedChunks();
base::Mutex mutex_;
MemoryAllocator* allocator_;
std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
std::list<MemoryChunk*> delayed_regular_chunks_;
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_;
......@@ -1260,6 +1270,8 @@ class MemoryAllocator {
template <MemoryAllocator::FreeMode mode = kFull>
void Free(MemoryChunk* chunk);
bool CanFreeMemoryChunk(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
intptr_t Size() { return size_.Value(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment