Commit f0e3d518 authored by hpayer's avatar hpayer Committed by Commit bot

Re-land "Concurrently unmap free pages."

BUG=chromium:507211
LOG=n

Review URL: https://codereview.chromium.org/1306183003

Cr-Commit-Position: refs/heads/master@{#30335}
parent 7fb31bdb
......@@ -135,6 +135,7 @@ Heap::Heap()
current_gc_flags_(Heap::kNoGCFlags),
external_string_table_(this),
chunks_queued_for_free_(NULL),
pending_unmap_job_semaphore_(0),
gc_callbacks_depth_(0),
deserialization_complete_(false),
concurrent_sweeping_enabled_(false),
......@@ -6501,7 +6502,39 @@ void ExternalStringTable::TearDown() {
}
class Heap::UnmapFreeMemoryTask : public v8::Task {
public:
UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
: heap_(heap), head_(head) {}
virtual ~UnmapFreeMemoryTask() {}
private:
// v8::Task overrides.
void Run() override {
heap_->FreeQueuedChunks(head_);
heap_->pending_unmap_job_semaphore_.Signal();
}
Heap* heap_;
MemoryChunk* head_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
// We start an unmap job after sweeping and after compaction.
pending_unmap_job_semaphore_.Wait();
pending_unmap_job_semaphore_.Wait();
}
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
// PreFree logically frees the memory chunk. However, the actual freeing
// will happen on a separate thread sometime later.
isolate_->memory_allocator()->PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
chunk->set_next_chunk(chunks_queued_for_free_);
chunks_queued_for_free_ = chunk;
}
......@@ -6515,19 +6548,32 @@ void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
next = chunk->next_chunk();
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
}
isolate_->heap()->store_buffer()->Compact();
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
store_buffer()->Compact();
store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
}
void Heap::FreeQueuedChunks() {
if (chunks_queued_for_free_ != NULL) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
v8::Platform::kShortRunningTask);
chunks_queued_for_free_ = NULL;
} else {
// If we do not have anything to unmap, we just signal the semaphore
// that we are done.
pending_unmap_job_semaphore_.Signal();
}
}
void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
MemoryChunk* next;
MemoryChunk* chunk;
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
for (chunk = list_head; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
isolate_->memory_allocator()->Free(chunk);
isolate_->memory_allocator()->PerformFreeMemory(chunk);
}
chunks_queued_for_free_ = NULL;
}
......
......@@ -1125,7 +1125,9 @@ class Heap {
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FilterStoreBufferEntriesOnAboutToBeFreedPages();
void FreeQueuedChunks(MemoryChunk* list_head);
void FreeQueuedChunks();
void WaitUntilUnmappingOfFreeChunksCompleted();
bool RecentIdleNotificationHappened();
......@@ -1656,6 +1658,8 @@ class Heap {
#endif
private:
class UnmapFreeMemoryTask;
struct StrongRootsList;
struct StringTypeTable {
......@@ -2337,6 +2341,8 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
base::Semaphore pending_unmap_job_semaphore_;
base::Mutex relocation_mutex_;
int gc_callbacks_depth_;
......
......@@ -519,12 +519,15 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
}
// Wait twice for both jobs.
if (heap()->concurrent_sweeping_enabled()) {
pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait();
}
heap()->WaitUntilUnmappingOfFreeChunksCompleted();
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
RefillFreeList(heap()->paged_space(OLD_SPACE));
......
......@@ -163,24 +163,28 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) {
}
}
// Sort and merge the free blocks on the free list and the allocation list.
free_list_.AddAll(allocation_list_);
allocation_list_.Clear();
free_list_.Sort(&CompareFreeBlockAddress);
for (int i = 0; i < free_list_.length();) {
FreeBlock merged = free_list_[i];
i++;
// Add adjacent free blocks to the current merged block.
while (i < free_list_.length() &&
free_list_[i].start == merged.start + merged.size) {
merged.size += free_list_[i].size;
{
base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
// Sort and merge the free blocks on the free list and the allocation list.
free_list_.AddAll(allocation_list_);
allocation_list_.Clear();
free_list_.Sort(&CompareFreeBlockAddress);
for (int i = 0; i < free_list_.length();) {
FreeBlock merged = free_list_[i];
i++;
// Add adjacent free blocks to the current merged block.
while (i < free_list_.length() &&
free_list_[i].start == merged.start + merged.size) {
merged.size += free_list_[i].size;
i++;
}
if (merged.size > 0) {
allocation_list_.Add(merged);
}
}
if (merged.size > 0) {
allocation_list_.Add(merged);
}
free_list_.Clear();
}
free_list_.Clear();
for (current_allocation_block_index_ = 0;
current_allocation_block_index_ < allocation_list_.length();
......@@ -229,6 +233,7 @@ bool CodeRange::UncommitRawMemory(Address start, size_t length) {
void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
free_list_.Add(FreeBlock(address, length));
code_range_->Uncommit(address, length);
}
......@@ -237,6 +242,7 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
void CodeRange::TearDown() {
delete code_range_; // Frees all memory in the virtual memory range.
code_range_ = NULL;
base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
free_list_.Free();
allocation_list_.Free();
}
......@@ -264,7 +270,10 @@ bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
}
void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
void CodeRange::ReleaseBlock(const FreeBlock* block) {
base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
free_list_.Add(*block);
}
void CodeRange::ReserveEmergencyBlock() {
......@@ -332,26 +341,30 @@ bool MemoryAllocator::CommitMemory(Address base, size_t size,
}
void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
void MemoryAllocator::FreeNewSpaceMemory(Address addr,
base::VirtualMemory* reservation,
Executability executable) {
LOG(isolate_, DeleteEvent("NewSpace", addr));
DCHECK(reservation->IsReserved());
size_t size = reservation->size();
const size_t size = reservation->size();
DCHECK(size_ >= size);
size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
FreeMemory(reservation, NOT_EXECUTABLE);
}
if (executable == EXECUTABLE) {
DCHECK(size_executable_ >= size);
size_executable_ -= size;
}
void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
// Code which is part of the code-range does not have its own VirtualMemory.
DCHECK(isolate_->code_range() == NULL ||
!isolate_->code_range()->contains(
static_cast<Address>(reservation->address())));
DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
!isolate_->code_range()->valid() || size <= Page::kPageSize);
!isolate_->code_range()->valid() ||
reservation->size() <= Page::kPageSize);
reservation->Release();
}
......@@ -360,15 +373,6 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
void MemoryAllocator::FreeMemory(Address base, size_t size,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
DCHECK(size_ >= size);
size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (executable == EXECUTABLE) {
DCHECK(size_executable_ >= size);
size_executable_ -= size;
}
if (isolate_->code_range() != NULL &&
isolate_->code_range()->contains(static_cast<Address>(base))) {
DCHECK(executable == EXECUTABLE);
......@@ -742,7 +746,8 @@ LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
}
void MemoryAllocator::Free(MemoryChunk* chunk) {
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
if (chunk->owner() != NULL) {
ObjectSpace space =
......@@ -753,9 +758,29 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
delete chunk->slots_buffer();
delete chunk->skip_list();
delete chunk->mutex();
size_t size;
base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
size = reservation->size();
} else {
size = chunk->size();
}
DCHECK(size_ >= size);
size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (chunk->executable() == EXECUTABLE) {
DCHECK(size_executable_ >= size);
size_executable_ -= size;
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
}
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllocatedMemory();
base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
......@@ -766,6 +791,12 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
}
void MemoryAllocator::Free(MemoryChunk* chunk) {
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
}
bool MemoryAllocator::CommitBlock(Address start, size_t size,
Executability executable) {
if (!CommitMemory(start, size, executable)) return false;
......@@ -918,6 +949,13 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
}
void MemoryChunk::ReleaseAllocatedMemory() {
delete slots_buffer_;
delete skip_list_;
delete mutex_;
}
// -----------------------------------------------------------------------------
// PagedSpace implementation
......@@ -1284,11 +1322,9 @@ void NewSpace::TearDown() {
to_space_.TearDown();
from_space_.TearDown();
LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
chunk_base_, &reservation_, NOT_EXECUTABLE);
DCHECK(reservation_.IsReserved());
heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
NOT_EXECUTABLE);
chunk_base_ = NULL;
chunk_size_ = 0;
}
......
......@@ -386,6 +386,10 @@ class MemoryChunk {
// candidates selection cycle.
FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
// The memory chunk is already logically freed, however the actual freeing
// still has to be performed.
PRE_FREED,
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
......@@ -657,6 +661,9 @@ class MemoryChunk {
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_; }
// Should be called when memory chunk is about to be freed.
void ReleaseAllocatedMemory();
static inline void UpdateHighWaterMark(Address mark) {
if (mark == NULL) return;
// Need to subtract one from the mark because when a chunk is full the
......@@ -980,10 +987,15 @@ class CodeRange {
size_t size;
};
// All access to free_list_ require to take the free_list_mutex_. GC threads
// may access the free_list_ concurrently to the main thread.
base::Mutex free_list_mutex_;
// Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new
// allocation list.
List<FreeBlock> free_list_;
// Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block.
List<FreeBlock> allocation_list_;
......@@ -1079,6 +1091,15 @@ class MemoryAllocator {
LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
Executability executable);
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
void PreFreeMemory(MemoryChunk* chunk);
// FreeMemory can be called concurrently when PreFree was executed before.
void PerformFreeMemory(MemoryChunk* chunk);
// Free is a wrapper method, which calls PreFree and PerformFreeMemory
// together.
void Free(MemoryChunk* chunk);
// Returns the maximum available bytes of heaps.
......@@ -1128,6 +1149,8 @@ class MemoryAllocator {
bool CommitMemory(Address addr, size_t size, Executability executable);
void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment