Commit f0e3d518 authored by hpayer's avatar hpayer Committed by Commit bot

Re-land "Concurrently unmap free pages."

BUG=chromium:507211
LOG=n

Review URL: https://codereview.chromium.org/1306183003

Cr-Commit-Position: refs/heads/master@{#30335}
parent 7fb31bdb
...@@ -135,6 +135,7 @@ Heap::Heap() ...@@ -135,6 +135,7 @@ Heap::Heap()
current_gc_flags_(Heap::kNoGCFlags), current_gc_flags_(Heap::kNoGCFlags),
external_string_table_(this), external_string_table_(this),
chunks_queued_for_free_(NULL), chunks_queued_for_free_(NULL),
pending_unmap_job_semaphore_(0),
gc_callbacks_depth_(0), gc_callbacks_depth_(0),
deserialization_complete_(false), deserialization_complete_(false),
concurrent_sweeping_enabled_(false), concurrent_sweeping_enabled_(false),
...@@ -6501,7 +6502,39 @@ void ExternalStringTable::TearDown() { ...@@ -6501,7 +6502,39 @@ void ExternalStringTable::TearDown() {
} }
class Heap::UnmapFreeMemoryTask : public v8::Task {
public:
UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
: heap_(heap), head_(head) {}
virtual ~UnmapFreeMemoryTask() {}
private:
// v8::Task overrides.
void Run() override {
heap_->FreeQueuedChunks(head_);
heap_->pending_unmap_job_semaphore_.Signal();
}
Heap* heap_;
MemoryChunk* head_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
// We start an unmap job after sweeping and after compaction.
pending_unmap_job_semaphore_.Wait();
pending_unmap_job_semaphore_.Wait();
}
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) { void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
// PreFree logically frees the memory chunk. However, the actual freeing
// will happen on a separate thread sometime later.
isolate_->memory_allocator()->PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
chunk->set_next_chunk(chunks_queued_for_free_); chunk->set_next_chunk(chunks_queued_for_free_);
chunks_queued_for_free_ = chunk; chunks_queued_for_free_ = chunk;
} }
...@@ -6515,19 +6548,32 @@ void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() { ...@@ -6515,19 +6548,32 @@ void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
next = chunk->next_chunk(); next = chunk->next_chunk();
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
} }
isolate_->heap()->store_buffer()->Compact(); store_buffer()->Compact();
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
} }
void Heap::FreeQueuedChunks() { void Heap::FreeQueuedChunks() {
if (chunks_queued_for_free_ != NULL) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
v8::Platform::kShortRunningTask);
chunks_queued_for_free_ = NULL;
} else {
// If we do not have anything to unmap, we just signal the semaphore
// that we are done.
pending_unmap_job_semaphore_.Signal();
}
}
void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
MemoryChunk* next; MemoryChunk* next;
MemoryChunk* chunk; MemoryChunk* chunk;
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { for (chunk = list_head; chunk != NULL; chunk = next) {
next = chunk->next_chunk(); next = chunk->next_chunk();
isolate_->memory_allocator()->Free(chunk); isolate_->memory_allocator()->PerformFreeMemory(chunk);
} }
chunks_queued_for_free_ = NULL;
} }
......
...@@ -1125,7 +1125,9 @@ class Heap { ...@@ -1125,7 +1125,9 @@ class Heap {
void QueueMemoryChunkForFree(MemoryChunk* chunk); void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FilterStoreBufferEntriesOnAboutToBeFreedPages(); void FilterStoreBufferEntriesOnAboutToBeFreedPages();
void FreeQueuedChunks(MemoryChunk* list_head);
void FreeQueuedChunks(); void FreeQueuedChunks();
void WaitUntilUnmappingOfFreeChunksCompleted();
bool RecentIdleNotificationHappened(); bool RecentIdleNotificationHappened();
...@@ -1656,6 +1658,8 @@ class Heap { ...@@ -1656,6 +1658,8 @@ class Heap {
#endif #endif
private: private:
class UnmapFreeMemoryTask;
struct StrongRootsList; struct StrongRootsList;
struct StringTypeTable { struct StringTypeTable {
...@@ -2337,6 +2341,8 @@ class Heap { ...@@ -2337,6 +2341,8 @@ class Heap {
MemoryChunk* chunks_queued_for_free_; MemoryChunk* chunks_queued_for_free_;
base::Semaphore pending_unmap_job_semaphore_;
base::Mutex relocation_mutex_; base::Mutex relocation_mutex_;
int gc_callbacks_depth_; int gc_callbacks_depth_;
......
...@@ -519,12 +519,15 @@ void MarkCompactCollector::EnsureSweepingCompleted() { ...@@ -519,12 +519,15 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
SweepInParallel(heap()->paged_space(CODE_SPACE), 0); SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
SweepInParallel(heap()->paged_space(MAP_SPACE), 0); SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
} }
// Wait twice for both jobs.
if (heap()->concurrent_sweeping_enabled()) { if (heap()->concurrent_sweeping_enabled()) {
pending_sweeper_jobs_semaphore_.Wait(); pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait(); pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait(); pending_sweeper_jobs_semaphore_.Wait();
} }
heap()->WaitUntilUnmappingOfFreeChunksCompleted();
ParallelSweepSpacesComplete(); ParallelSweepSpacesComplete();
sweeping_in_progress_ = false; sweeping_in_progress_ = false;
RefillFreeList(heap()->paged_space(OLD_SPACE)); RefillFreeList(heap()->paged_space(OLD_SPACE));
......
...@@ -163,24 +163,28 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) { ...@@ -163,24 +163,28 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) {
} }
} }
// Sort and merge the free blocks on the free list and the allocation list. {
free_list_.AddAll(allocation_list_); base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
allocation_list_.Clear();
free_list_.Sort(&CompareFreeBlockAddress); // Sort and merge the free blocks on the free list and the allocation list.
for (int i = 0; i < free_list_.length();) { free_list_.AddAll(allocation_list_);
FreeBlock merged = free_list_[i]; allocation_list_.Clear();
i++; free_list_.Sort(&CompareFreeBlockAddress);
// Add adjacent free blocks to the current merged block. for (int i = 0; i < free_list_.length();) {
while (i < free_list_.length() && FreeBlock merged = free_list_[i];
free_list_[i].start == merged.start + merged.size) {
merged.size += free_list_[i].size;
i++; i++;
// Add adjacent free blocks to the current merged block.
while (i < free_list_.length() &&
free_list_[i].start == merged.start + merged.size) {
merged.size += free_list_[i].size;
i++;
}
if (merged.size > 0) {
allocation_list_.Add(merged);
}
} }
if (merged.size > 0) { free_list_.Clear();
allocation_list_.Add(merged);
}
} }
free_list_.Clear();
for (current_allocation_block_index_ = 0; for (current_allocation_block_index_ = 0;
current_allocation_block_index_ < allocation_list_.length(); current_allocation_block_index_ < allocation_list_.length();
...@@ -229,6 +233,7 @@ bool CodeRange::UncommitRawMemory(Address start, size_t length) { ...@@ -229,6 +233,7 @@ bool CodeRange::UncommitRawMemory(Address start, size_t length) {
void CodeRange::FreeRawMemory(Address address, size_t length) { void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
free_list_.Add(FreeBlock(address, length)); free_list_.Add(FreeBlock(address, length));
code_range_->Uncommit(address, length); code_range_->Uncommit(address, length);
} }
...@@ -237,6 +242,7 @@ void CodeRange::FreeRawMemory(Address address, size_t length) { ...@@ -237,6 +242,7 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
void CodeRange::TearDown() { void CodeRange::TearDown() {
delete code_range_; // Frees all memory in the virtual memory range. delete code_range_; // Frees all memory in the virtual memory range.
code_range_ = NULL; code_range_ = NULL;
base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
free_list_.Free(); free_list_.Free();
allocation_list_.Free(); allocation_list_.Free();
} }
...@@ -264,7 +270,10 @@ bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) { ...@@ -264,7 +270,10 @@ bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
} }
void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); } void CodeRange::ReleaseBlock(const FreeBlock* block) {
base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
free_list_.Add(*block);
}
void CodeRange::ReserveEmergencyBlock() { void CodeRange::ReserveEmergencyBlock() {
...@@ -332,26 +341,30 @@ bool MemoryAllocator::CommitMemory(Address base, size_t size, ...@@ -332,26 +341,30 @@ bool MemoryAllocator::CommitMemory(Address base, size_t size,
} }
void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, void MemoryAllocator::FreeNewSpaceMemory(Address addr,
Executability executable) { base::VirtualMemory* reservation,
// TODO(gc) make code_range part of memory allocator? Executability executable) {
LOG(isolate_, DeleteEvent("NewSpace", addr));
DCHECK(reservation->IsReserved()); DCHECK(reservation->IsReserved());
size_t size = reservation->size(); const size_t size = reservation->size();
DCHECK(size_ >= size); DCHECK(size_ >= size);
size_ -= size; size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
FreeMemory(reservation, NOT_EXECUTABLE);
}
if (executable == EXECUTABLE) {
DCHECK(size_executable_ >= size); void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
size_executable_ -= size; Executability executable) {
} // TODO(gc) make code_range part of memory allocator?
// Code which is part of the code-range does not have its own VirtualMemory. // Code which is part of the code-range does not have its own VirtualMemory.
DCHECK(isolate_->code_range() == NULL || DCHECK(isolate_->code_range() == NULL ||
!isolate_->code_range()->contains( !isolate_->code_range()->contains(
static_cast<Address>(reservation->address()))); static_cast<Address>(reservation->address())));
DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
!isolate_->code_range()->valid() || size <= Page::kPageSize); !isolate_->code_range()->valid() ||
reservation->size() <= Page::kPageSize);
reservation->Release(); reservation->Release();
} }
...@@ -360,15 +373,6 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, ...@@ -360,15 +373,6 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
void MemoryAllocator::FreeMemory(Address base, size_t size, void MemoryAllocator::FreeMemory(Address base, size_t size,
Executability executable) { Executability executable) {
// TODO(gc) make code_range part of memory allocator? // TODO(gc) make code_range part of memory allocator?
DCHECK(size_ >= size);
size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (executable == EXECUTABLE) {
DCHECK(size_executable_ >= size);
size_executable_ -= size;
}
if (isolate_->code_range() != NULL && if (isolate_->code_range() != NULL &&
isolate_->code_range()->contains(static_cast<Address>(base))) { isolate_->code_range()->contains(static_cast<Address>(base))) {
DCHECK(executable == EXECUTABLE); DCHECK(executable == EXECUTABLE);
...@@ -742,7 +746,8 @@ LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, ...@@ -742,7 +746,8 @@ LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
} }
void MemoryAllocator::Free(MemoryChunk* chunk) { void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
if (chunk->owner() != NULL) { if (chunk->owner() != NULL) {
ObjectSpace space = ObjectSpace space =
...@@ -753,9 +758,29 @@ void MemoryAllocator::Free(MemoryChunk* chunk) { ...@@ -753,9 +758,29 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate()); chunk->IsEvacuationCandidate());
delete chunk->slots_buffer(); size_t size;
delete chunk->skip_list(); base::VirtualMemory* reservation = chunk->reserved_memory();
delete chunk->mutex(); if (reservation->IsReserved()) {
size = reservation->size();
} else {
size = chunk->size();
}
DCHECK(size_ >= size);
size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (chunk->executable() == EXECUTABLE) {
DCHECK(size_executable_ >= size);
size_executable_ -= size;
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
}
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllocatedMemory();
base::VirtualMemory* reservation = chunk->reserved_memory(); base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) { if (reservation->IsReserved()) {
...@@ -766,6 +791,12 @@ void MemoryAllocator::Free(MemoryChunk* chunk) { ...@@ -766,6 +791,12 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
} }
void MemoryAllocator::Free(MemoryChunk* chunk) {
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
}
bool MemoryAllocator::CommitBlock(Address start, size_t size, bool MemoryAllocator::CommitBlock(Address start, size_t size,
Executability executable) { Executability executable) {
if (!CommitMemory(start, size, executable)) return false; if (!CommitMemory(start, size, executable)) return false;
...@@ -918,6 +949,13 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { ...@@ -918,6 +949,13 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
} }
void MemoryChunk::ReleaseAllocatedMemory() {
delete slots_buffer_;
delete skip_list_;
delete mutex_;
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// PagedSpace implementation // PagedSpace implementation
...@@ -1284,11 +1322,9 @@ void NewSpace::TearDown() { ...@@ -1284,11 +1322,9 @@ void NewSpace::TearDown() {
to_space_.TearDown(); to_space_.TearDown();
from_space_.TearDown(); from_space_.TearDown();
LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
chunk_base_, &reservation_, NOT_EXECUTABLE);
DCHECK(reservation_.IsReserved());
heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
NOT_EXECUTABLE);
chunk_base_ = NULL; chunk_base_ = NULL;
chunk_size_ = 0; chunk_size_ = 0;
} }
......
...@@ -386,6 +386,10 @@ class MemoryChunk { ...@@ -386,6 +386,10 @@ class MemoryChunk {
// candidates selection cycle. // candidates selection cycle.
FORCE_EVACUATION_CANDIDATE_FOR_TESTING, FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
// The memory chunk is already logically freed, however the actual freeing
// still has to be performed.
PRE_FREED,
// Last flag, keep at bottom. // Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS NUM_MEMORY_CHUNK_FLAGS
}; };
...@@ -657,6 +661,9 @@ class MemoryChunk { ...@@ -657,6 +661,9 @@ class MemoryChunk {
// Approximate amount of physical memory committed for this chunk. // Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_; } size_t CommittedPhysicalMemory() { return high_water_mark_; }
// Should be called when memory chunk is about to be freed.
void ReleaseAllocatedMemory();
static inline void UpdateHighWaterMark(Address mark) { static inline void UpdateHighWaterMark(Address mark) {
if (mark == NULL) return; if (mark == NULL) return;
// Need to subtract one from the mark because when a chunk is full the // Need to subtract one from the mark because when a chunk is full the
...@@ -980,10 +987,15 @@ class CodeRange { ...@@ -980,10 +987,15 @@ class CodeRange {
size_t size; size_t size;
}; };
// All access to free_list_ require to take the free_list_mutex_. GC threads
// may access the free_list_ concurrently to the main thread.
base::Mutex free_list_mutex_;
// Freed blocks of memory are added to the free list. When the allocation // Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new // list is exhausted, the free list is sorted and merged to make the new
// allocation list. // allocation list.
List<FreeBlock> free_list_; List<FreeBlock> free_list_;
// Memory is allocated from the free blocks on the allocation list. // Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block. // The block at current_allocation_block_index_ is the current block.
List<FreeBlock> allocation_list_; List<FreeBlock> allocation_list_;
...@@ -1079,6 +1091,15 @@ class MemoryAllocator { ...@@ -1079,6 +1091,15 @@ class MemoryAllocator {
LargePage* AllocateLargePage(intptr_t object_size, Space* owner, LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
Executability executable); Executability executable);
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
void PreFreeMemory(MemoryChunk* chunk);
// FreeMemory can be called concurrently when PreFree was executed before.
void PerformFreeMemory(MemoryChunk* chunk);
// Free is a wrapper method, which calls PreFree and PerformFreeMemory
// together.
void Free(MemoryChunk* chunk); void Free(MemoryChunk* chunk);
// Returns the maximum available bytes of heaps. // Returns the maximum available bytes of heaps.
...@@ -1128,6 +1149,8 @@ class MemoryAllocator { ...@@ -1128,6 +1149,8 @@ class MemoryAllocator {
bool CommitMemory(Address addr, size_t size, Executability executable); bool CommitMemory(Address addr, size_t size, Executability executable);
void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable); void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable); void FreeMemory(Address addr, size_t size, Executability executable);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment