Commit d1aeb45d authored by hpayer's avatar hpayer Committed by Commit bot

Concurrently unmap free pages.

BUG=

Review URL: https://codereview.chromium.org/1303263002

Cr-Commit-Position: refs/heads/master@{#30306}
parent 1cdcae94
...@@ -135,6 +135,7 @@ Heap::Heap() ...@@ -135,6 +135,7 @@ Heap::Heap()
current_gc_flags_(Heap::kNoGCFlags), current_gc_flags_(Heap::kNoGCFlags),
external_string_table_(this), external_string_table_(this),
chunks_queued_for_free_(NULL), chunks_queued_for_free_(NULL),
pending_unmap_job_semaphore_(0),
gc_callbacks_depth_(0), gc_callbacks_depth_(0),
deserialization_complete_(false), deserialization_complete_(false),
concurrent_sweeping_enabled_(false), concurrent_sweeping_enabled_(false),
...@@ -6514,6 +6515,33 @@ void ExternalStringTable::TearDown() { ...@@ -6514,6 +6515,33 @@ void ExternalStringTable::TearDown() {
} }
class Heap::UnmapFreeMemoryTask : public v8::Task {
public:
UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
: heap_(heap), head_(head) {}
virtual ~UnmapFreeMemoryTask() {}
private:
// v8::Task overrides.
void Run() override {
heap_->FreeQueuedChunks(head_);
heap_->pending_unmap_job_semaphore_.Signal();
}
Heap* heap_;
MemoryChunk* head_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
// We start an unmap job after sweeping and after compaction.
pending_unmap_job_semaphore_.Wait();
pending_unmap_job_semaphore_.Wait();
}
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) { void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
chunk->set_next_chunk(chunks_queued_for_free_); chunk->set_next_chunk(chunks_queued_for_free_);
chunks_queued_for_free_ = chunk; chunks_queued_for_free_ = chunk;
...@@ -6528,19 +6556,32 @@ void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() { ...@@ -6528,19 +6556,32 @@ void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
next = chunk->next_chunk(); next = chunk->next_chunk();
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
} }
isolate_->heap()->store_buffer()->Compact(); store_buffer()->Compact();
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
} }
void Heap::FreeQueuedChunks() { void Heap::FreeQueuedChunks() {
if (chunks_queued_for_free_ != NULL) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
v8::Platform::kShortRunningTask);
chunks_queued_for_free_ = NULL;
} else {
// If we do not have anything to unmap, we just signal the semaphore
// that we are done.
pending_unmap_job_semaphore_.Signal();
}
}
void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
MemoryChunk* next; MemoryChunk* next;
MemoryChunk* chunk; MemoryChunk* chunk;
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { for (chunk = list_head; chunk != NULL; chunk = next) {
next = chunk->next_chunk(); next = chunk->next_chunk();
isolate_->memory_allocator()->Free(chunk); isolate_->memory_allocator()->Free(chunk);
} }
chunks_queued_for_free_ = NULL;
} }
......
...@@ -1423,7 +1423,9 @@ class Heap { ...@@ -1423,7 +1423,9 @@ class Heap {
void QueueMemoryChunkForFree(MemoryChunk* chunk); void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FilterStoreBufferEntriesOnAboutToBeFreedPages(); void FilterStoreBufferEntriesOnAboutToBeFreedPages();
void FreeQueuedChunks(MemoryChunk* list_head);
void FreeQueuedChunks(); void FreeQueuedChunks();
void WaitUntilUnmappingOfFreeChunksCompleted();
int gc_count() const { return gc_count_; } int gc_count() const { return gc_count_; }
...@@ -1600,6 +1602,8 @@ class Heap { ...@@ -1600,6 +1602,8 @@ class Heap {
bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; } bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
private: private:
class UnmapFreeMemoryTask;
static const int kInitialStringTableSize = 2048; static const int kInitialStringTableSize = 2048;
static const int kInitialEvalCacheSize = 64; static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256; static const int kInitialNumberStringCacheSize = 256;
...@@ -2277,6 +2281,8 @@ class Heap { ...@@ -2277,6 +2281,8 @@ class Heap {
MemoryChunk* chunks_queued_for_free_; MemoryChunk* chunks_queued_for_free_;
base::Semaphore pending_unmap_job_semaphore_;
base::Mutex relocation_mutex_; base::Mutex relocation_mutex_;
int gc_callbacks_depth_; int gc_callbacks_depth_;
......
...@@ -519,12 +519,15 @@ void MarkCompactCollector::EnsureSweepingCompleted() { ...@@ -519,12 +519,15 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
SweepInParallel(heap()->paged_space(CODE_SPACE), 0); SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
SweepInParallel(heap()->paged_space(MAP_SPACE), 0); SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
} }
// Wait twice for both jobs.
if (heap()->concurrent_sweeping_enabled()) { if (heap()->concurrent_sweeping_enabled()) {
pending_sweeper_jobs_semaphore_.Wait(); pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait(); pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait(); pending_sweeper_jobs_semaphore_.Wait();
} }
heap()->WaitUntilUnmappingOfFreeChunksCompleted();
ParallelSweepSpacesComplete(); ParallelSweepSpacesComplete();
sweeping_in_progress_ = false; sweeping_in_progress_ = false;
RefillFreeList(heap()->paged_space(OLD_SPACE)); RefillFreeList(heap()->paged_space(OLD_SPACE));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment