Commit 4a1f2807 authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of [heap] Don't unmap new space pages while sweeping is active...

Revert of [heap] Don't unmap new space pages while sweeping is active (patchset #4 id:80001 of https://codereview.chromium.org/2250423002/ )

Reason for revert:
The barrier in newspace is still needed.

Original issue's description:
> [heap] Don't unmap new space pages while sweeping is active
>
> - The barrier for scavenge only checked for whether new space pages were swept.
>   This is not enough as a concurrent task could still hang right before trying to
>   lock the page for sweeping. Remove the barrier completely.
> - Avoid unmapping of new space pages while sweeping using a delayed list that
>   gets emptied upon the next call to the unmapper.
>
> BUG=chromium:628984
> R=hpayer@chromium.org
>
> Committed: https://crrev.com/982b399423e6bd941cabb2b825031cd8d5eb4980
> Cr-Commit-Position: refs/heads/master@{#38710}

TBR=hpayer@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:628984

Review-Url: https://codereview.chromium.org/2244233007
Cr-Commit-Position: refs/heads/master@{#38712}
parent 06cde13e
...@@ -1595,6 +1595,8 @@ void Heap::Scavenge() { ...@@ -1595,6 +1595,8 @@ void Heap::Scavenge() {
// Pause the inline allocation steps. // Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this); PauseAllocationObserversScope pause_observers(this);
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
gc_state_ = SCAVENGE; gc_state_ = SCAVENGE;
// Implements Cheney's copying algorithm // Implements Cheney's copying algorithm
......
...@@ -542,6 +542,15 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() { ...@@ -542,6 +542,15 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
sweeping_in_progress_ = false; sweeping_in_progress_ = false;
} }
void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
for (Page* p : *heap_->new_space()) {
SweepOrWaitUntilSweepingCompleted(p);
}
}
}
void MarkCompactCollector::EnsureSweepingCompleted() { void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper().sweeping_in_progress()) return; if (!sweeper().sweeping_in_progress()) return;
......
...@@ -315,6 +315,7 @@ class MarkCompactCollector { ...@@ -315,6 +315,7 @@ class MarkCompactCollector {
void StartSweeping(); void StartSweeping();
void StartSweepingHelper(AllocationSpace space_to_start); void StartSweepingHelper(AllocationSpace space_to_start);
void EnsureCompleted(); void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool IsSweepingCompleted(); bool IsSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page); void SweepOrWaitUntilSweepingCompleted(Page* page);
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
#include <utility>
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h" #include "src/base/platform/semaphore.h"
...@@ -350,7 +348,6 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task { ...@@ -350,7 +348,6 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
}; };
void MemoryAllocator::Unmapper::FreeQueuedChunks() { void MemoryAllocator::Unmapper::FreeQueuedChunks() {
ReconsiderDelayedChunks();
if (FLAG_concurrent_sweeping) { if (FLAG_concurrent_sweeping) {
V8::GetCurrentPlatform()->CallOnBackgroundThread( V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask); new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
...@@ -384,24 +381,6 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { ...@@ -384,24 +381,6 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
} }
} }
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
// Move constructed, so the permanent list should be empty.
DCHECK(delayed_regular_chunks_.empty());
for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
AddMemoryChunkSafe<kRegular>(*it);
}
}
bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
// We cannot free memory chunks in new space while the sweeper is running
// since a sweeper thread might be stuck right before trying to lock the
// corresponding page.
return !chunk->InNewSpace() || (mc == nullptr) ||
mc->sweeper().IsSweepingCompleted();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size, bool MemoryAllocator::CommitMemory(Address base, size_t size,
Executability executable) { Executability executable) {
if (!base::VirtualMemory::CommitRegion(base, size, if (!base::VirtualMemory::CommitRegion(base, size,
......
...@@ -1205,12 +1205,7 @@ class MemoryAllocator { ...@@ -1205,12 +1205,7 @@ class MemoryAllocator {
template <ChunkQueueType type> template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) { void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_); base::LockGuard<base::Mutex> guard(&mutex_);
if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) { chunks_[type].push_back(chunk);
chunks_[type].push_back(chunk);
} else {
DCHECK_EQ(type, kRegular);
delayed_regular_chunks_.push_back(chunk);
}
} }
template <ChunkQueueType type> template <ChunkQueueType type>
...@@ -1222,16 +1217,11 @@ class MemoryAllocator { ...@@ -1222,16 +1217,11 @@ class MemoryAllocator {
return chunk; return chunk;
} }
void ReconsiderDelayedChunks();
void PerformFreeMemoryOnQueuedChunks(); void PerformFreeMemoryOnQueuedChunks();
base::Mutex mutex_; base::Mutex mutex_;
MemoryAllocator* allocator_; MemoryAllocator* allocator_;
std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
std::list<MemoryChunk*> delayed_regular_chunks_;
base::Semaphore pending_unmapping_tasks_semaphore_; base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_; intptr_t concurrent_unmapping_tasks_active_;
...@@ -1270,8 +1260,6 @@ class MemoryAllocator { ...@@ -1270,8 +1260,6 @@ class MemoryAllocator {
template <MemoryAllocator::FreeMode mode = kFull> template <MemoryAllocator::FreeMode mode = kFull>
void Free(MemoryChunk* chunk); void Free(MemoryChunk* chunk);
bool CanFreeMemoryChunk(MemoryChunk* chunk);
// Returns allocated spaces in bytes. // Returns allocated spaces in bytes.
intptr_t Size() { return size_.Value(); } intptr_t Size() { return size_.Value(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment