Commit e5a1993b authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Remove delayed chunks in Unmapper

The dependency between restoring iterability and the Scavenger is
explicit. Delayed chunks are thus not needed anymore.

Bug: chromium:791043
Change-Id: I9f2c95c1856f53299af2737f922a3cb4cc578aa5
Reviewed-on: https://chromium-review.googlesource.com/805816Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49855}
parent faad1c19
...@@ -526,7 +526,6 @@ void GCTracer::PrintNVP() const { ...@@ -526,7 +526,6 @@ void GCTracer::PrintNVP() const {
"semi_space_copy_rate=%.1f%% " "semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f " "new_space_allocation_throughput=%.1f "
"unmapper_chunks=%d " "unmapper_chunks=%d "
"unmapper_delayed_chunks=%d "
"context_disposal_rate=%.1f\n", "context_disposal_rate=%.1f\n",
duration, spent_in_mutator, current_.TypeName(true), duration, spent_in_mutator, current_.TypeName(true),
current_.reduce_memory, current_.scopes[Scope::HEAP_PROLOGUE], current_.reduce_memory, current_.scopes[Scope::HEAP_PROLOGUE],
...@@ -560,7 +559,6 @@ void GCTracer::PrintNVP() const { ...@@ -560,7 +559,6 @@ void GCTracer::PrintNVP() const {
heap_->semi_space_copied_rate_, heap_->semi_space_copied_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(), NewSpaceAllocationThroughputInBytesPerMillisecond(),
heap_->memory_allocator()->unmapper()->NumberOfChunks(), heap_->memory_allocator()->unmapper()->NumberOfChunks(),
heap_->memory_allocator()->unmapper()->NumberOfDelayedChunks(),
ContextDisposalRateInMilliseconds()); ContextDisposalRateInMilliseconds());
break; break;
case Event::MINOR_MARK_COMPACTOR: case Event::MINOR_MARK_COMPACTOR:
...@@ -707,7 +705,6 @@ void GCTracer::PrintNVP() const { ...@@ -707,7 +705,6 @@ void GCTracer::PrintNVP() const {
"semi_space_copy_rate=%.1f%% " "semi_space_copy_rate=%.1f%% "
"new_space_allocation_throughput=%.1f " "new_space_allocation_throughput=%.1f "
"unmapper_chunks=%d " "unmapper_chunks=%d "
"unmapper_delayed_chunks=%d "
"context_disposal_rate=%.1f " "context_disposal_rate=%.1f "
"compaction_speed=%.f\n", "compaction_speed=%.f\n",
duration, spent_in_mutator, current_.TypeName(true), duration, spent_in_mutator, current_.TypeName(true),
...@@ -791,7 +788,6 @@ void GCTracer::PrintNVP() const { ...@@ -791,7 +788,6 @@ void GCTracer::PrintNVP() const {
heap_->semi_space_copied_rate_, heap_->semi_space_copied_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(), NewSpaceAllocationThroughputInBytesPerMillisecond(),
heap_->memory_allocator()->unmapper()->NumberOfChunks(), heap_->memory_allocator()->unmapper()->NumberOfChunks(),
heap_->memory_allocator()->unmapper()->NumberOfDelayedChunks(),
ContextDisposalRateInMilliseconds(), ContextDisposalRateInMilliseconds(),
CompactionSpeedInBytesPerMillisecond()); CompactionSpeedInBytesPerMillisecond());
break; break;
......
...@@ -1964,11 +1964,6 @@ void Heap::Scavenge() { ...@@ -1964,11 +1964,6 @@ void Heap::Scavenge() {
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation( IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking()); incremental_marking());
if (mark_compact_collector()->sweeper()->sweeping_in_progress() &&
memory_allocator_->unmapper()->NumberOfDelayedChunks() >
static_cast<int>(new_space_->MaximumCapacity() / Page::kPageSize)) {
mark_compact_collector()->EnsureSweepingCompleted();
}
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted(); mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
......
...@@ -625,9 +625,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() { ...@@ -625,9 +625,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
verifier.Run(); verifier.Run();
} }
#endif #endif
if (heap()->memory_allocator()->unmapper()->has_delayed_chunks())
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
} }
void MarkCompactCollector::ComputeEvacuationHeuristics( void MarkCompactCollector::ComputeEvacuationHeuristics(
......
...@@ -326,7 +326,6 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask { ...@@ -326,7 +326,6 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
}; };
void MemoryAllocator::Unmapper::FreeQueuedChunks() { void MemoryAllocator::Unmapper::FreeQueuedChunks() {
ReconsiderDelayedChunks();
if (heap_->use_tasks() && FLAG_concurrent_sweeping) { if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) { if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
// kMaxUnmapperTasks are already running. Avoid creating any more. // kMaxUnmapperTasks are already running. Avoid creating any more.
...@@ -377,23 +376,12 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { ...@@ -377,23 +376,12 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
void MemoryAllocator::Unmapper::TearDown() { void MemoryAllocator::Unmapper::TearDown() {
CHECK_EQ(0, concurrent_unmapping_tasks_active_); CHECK_EQ(0, concurrent_unmapping_tasks_active_);
ReconsiderDelayedChunks();
CHECK(delayed_regular_chunks_.empty());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>(); PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) { for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty()); DCHECK(chunks_[i].empty());
} }
} }
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
// Move constructed, so the permanent list should be empty.
DCHECK(delayed_regular_chunks_.empty());
for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
AddMemoryChunkSafe<kRegular>(*it);
}
}
int MemoryAllocator::Unmapper::NumberOfChunks() { int MemoryAllocator::Unmapper::NumberOfChunks() {
base::LockGuard<base::Mutex> guard(&mutex_); base::LockGuard<base::Mutex> guard(&mutex_);
size_t result = 0; size_t result = 0;
...@@ -403,16 +391,6 @@ int MemoryAllocator::Unmapper::NumberOfChunks() { ...@@ -403,16 +391,6 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
return static_cast<int>(result); return static_cast<int>(result);
} }
bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
// We cannot free a memory chunk in new space while the sweeper is running
// because the memory chunk can be in the queue of a sweeper task.
// Chunks in old generation are unmapped if they are empty.
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
return !chunk->InNewSpace() || mc == nullptr ||
!mc->sweeper()->sweeping_in_progress();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size, bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) { Executability executable) {
if (!base::OS::SetPermissions(base, size, if (!base::OS::SetPermissions(base, size,
......
...@@ -1225,14 +1225,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1225,14 +1225,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void FreeQueuedChunks(); void FreeQueuedChunks();
void WaitUntilCompleted(); void WaitUntilCompleted();
void TearDown(); void TearDown();
bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
int NumberOfDelayedChunks() {
base::LockGuard<base::Mutex> guard(&mutex_);
return static_cast<int>(delayed_regular_chunks_.size());
}
int NumberOfChunks(); int NumberOfChunks();
private: private:
...@@ -1255,12 +1247,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1255,12 +1247,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <ChunkQueueType type> template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) { void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_); base::LockGuard<base::Mutex> guard(&mutex_);
if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
chunks_[type].push_back(chunk); chunks_[type].push_back(chunk);
} else {
DCHECK_EQ(type, kRegular);
delayed_regular_chunks_.push_back(chunk);
}
} }
template <ChunkQueueType type> template <ChunkQueueType type>
...@@ -1272,7 +1259,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1272,7 +1259,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk; return chunk;
} }
void ReconsiderDelayedChunks();
template <FreeMode mode> template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks(); void PerformFreeMemoryOnQueuedChunks();
...@@ -1280,10 +1266,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1280,10 +1266,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryAllocator* const allocator_; MemoryAllocator* const allocator_;
base::Mutex mutex_; base::Mutex mutex_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues]; std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
std::list<MemoryChunk*> delayed_regular_chunks_;
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks]; CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_; base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_; intptr_t concurrent_unmapping_tasks_active_;
...@@ -1344,8 +1326,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1344,8 +1326,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <MemoryAllocator::FreeMode mode = kFull> template <MemoryAllocator::FreeMode mode = kFull>
void Free(MemoryChunk* chunk); void Free(MemoryChunk* chunk);
bool CanFreeMemoryChunk(MemoryChunk* chunk);
// Returns allocated spaces in bytes. // Returns allocated spaces in bytes.
size_t Size() { return size_.Value(); } size_t Size() { return size_.Value(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment