Commit 7af79ae6 authored by mlippautz's avatar mlippautz Committed by Commit bot

Reland "[heap] Introduce parallel compaction algorithm."

This reverts commit 7a0a0b8b.

- The number of parallel tasks is still 1, i.e., we only compact on the main
  thread.
- Remove emergency memory (PagedSpace, and CodeRange)
- Introduce partial compaction of pages.
- Logic for multiple tasks is in place.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1356533002

Cr-Commit-Position: refs/heads/master@{#30796}
parent 5f44a910
...@@ -57,7 +57,8 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) ...@@ -57,7 +57,8 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
sweeping_in_progress_(false), sweeping_in_progress_(false),
parallel_compaction_in_progress_(false), parallel_compaction_in_progress_(false),
pending_sweeper_jobs_semaphore_(0), pending_sweeper_jobs_semaphore_(0),
pending_compaction_jobs_semaphore_(0), pending_compaction_tasks_semaphore_(0),
concurrent_compaction_tasks_active_(0),
evacuation_(false), evacuation_(false),
slots_buffer_allocator_(nullptr), slots_buffer_allocator_(nullptr),
migration_slots_buffer_(nullptr), migration_slots_buffer_(nullptr),
...@@ -474,21 +475,21 @@ void MarkCompactCollector::ClearMarkbits() { ...@@ -474,21 +475,21 @@ void MarkCompactCollector::ClearMarkbits() {
class MarkCompactCollector::CompactionTask : public v8::Task { class MarkCompactCollector::CompactionTask : public v8::Task {
public: public:
explicit CompactionTask(Heap* heap) : heap_(heap) {} explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
: heap_(heap), spaces_(spaces) {}
virtual ~CompactionTask() {} virtual ~CompactionTask() {}
private: private:
// v8::Task overrides. // v8::Task overrides.
void Run() override { void Run() override {
// TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be heap_->mark_compact_collector()->EvacuatePages(spaces_);
// called by one thread concurrently.
heap_->mark_compact_collector()->EvacuatePages();
heap_->mark_compact_collector() heap_->mark_compact_collector()
->pending_compaction_jobs_semaphore_.Signal(); ->pending_compaction_tasks_semaphore_.Signal();
} }
Heap* heap_; Heap* heap_;
CompactionSpaceCollection* spaces_;
DISALLOW_COPY_AND_ASSIGN(CompactionTask); DISALLOW_COPY_AND_ASSIGN(CompactionTask);
}; };
...@@ -3325,11 +3326,10 @@ void MarkCompactCollector::EvacuateNewSpace() { ...@@ -3325,11 +3326,10 @@ void MarkCompactCollector::EvacuateNewSpace() {
} }
void MarkCompactCollector::EvacuateLiveObjectsFromPage( bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
Page* p, PagedSpace* target_space) { Page* p, PagedSpace* target_space) {
AlwaysAllocateScope always_allocate(isolate()); AlwaysAllocateScope always_allocate(isolate());
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
p->SetWasSwept();
int offsets[16]; int offsets[16];
...@@ -3350,17 +3350,8 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage( ...@@ -3350,17 +3350,8 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(
HeapObject* target_object = nullptr; HeapObject* target_object = nullptr;
AllocationResult allocation = target_space->AllocateRaw(size, alignment); AllocationResult allocation = target_space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) { if (!allocation.To(&target_object)) {
// If allocation failed, use emergency memory and re-try allocation. return false;
CHECK(target_space->HasEmergencyMemory());
target_space->UseEmergencyMemory();
allocation = target_space->AllocateRaw(size, alignment);
}
if (!allocation.To(&target_object)) {
// OS refused to give us memory.
V8::FatalProcessOutOfMemory("Evacuation");
return;
} }
MigrateObject(target_object, object, size, target_space->identity()); MigrateObject(target_object, object, size, target_space->identity());
DCHECK(object->map_word().IsForwardingAddress()); DCHECK(object->map_word().IsForwardingAddress());
} }
...@@ -3369,80 +3360,142 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage( ...@@ -3369,80 +3360,142 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(
*cell = 0; *cell = 0;
} }
p->ResetLiveBytes(); p->ResetLiveBytes();
return true;
} }
void MarkCompactCollector::EvacuatePagesInParallel() { void MarkCompactCollector::EvacuatePagesInParallel() {
if (evacuation_candidates_.length() == 0) return;
int num_tasks = 1;
if (FLAG_parallel_compaction) {
num_tasks = NumberOfParallelCompactionTasks();
}
// Set up compaction spaces.
CompactionSpaceCollection** compaction_spaces_for_tasks =
new CompactionSpaceCollection*[num_tasks];
for (int i = 0; i < num_tasks; i++) {
compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
}
compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
heap()->old_space());
compaction_spaces_for_tasks[0]
->Get(CODE_SPACE)
->MoveOverFreeMemory(heap()->code_space());
parallel_compaction_in_progress_ = true; parallel_compaction_in_progress_ = true;
V8::GetCurrentPlatform()->CallOnBackgroundThread( // Kick off parallel tasks.
new CompactionTask(heap()), v8::Platform::kShortRunningTask); for (int i = 1; i < num_tasks; i++) {
concurrent_compaction_tasks_active_++;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
v8::Platform::kShortRunningTask);
}
// Contribute in main thread. Counter and signal are in principal not needed.
concurrent_compaction_tasks_active_++;
EvacuatePages(compaction_spaces_for_tasks[0]);
pending_compaction_tasks_semaphore_.Signal();
WaitUntilCompactionCompleted();
// Merge back memory (compacted and unused) from compaction spaces.
for (int i = 0; i < num_tasks; i++) {
heap()->old_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
delete compaction_spaces_for_tasks[i];
}
delete[] compaction_spaces_for_tasks;
// Finalize sequentially.
const int num_pages = evacuation_candidates_.length();
int abandoned_pages = 0;
for (int i = 0; i < num_pages; i++) {
Page* p = evacuation_candidates_[i];
switch (p->parallel_compaction_state().Value()) {
case MemoryChunk::ParallelCompactingState::kCompactingAborted:
// We have partially compacted the page, i.e., some objects may have
// moved, others are still in place.
// We need to:
// - Leave the evacuation candidate flag for later processing of
// slots buffer entries.
// - Leave the slots buffer there for processing of entries added by
// the write barrier.
// - Rescan the page as slot recording in the migration buffer only
// happens upon moving (which we potentially didn't do).
// - Leave the page in the list of pages of a space since we could not
// fully evacuate it.
DCHECK(p->IsEvacuationCandidate());
p->SetFlag(Page::RESCAN_ON_EVACUATION);
abandoned_pages++;
break;
case MemoryChunk::kCompactingFinalize:
DCHECK(p->IsEvacuationCandidate());
p->SetWasSwept();
p->Unlink();
break;
case MemoryChunk::kCompactingDone:
DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
break;
default:
// We should not observe kCompactingInProgress, or kCompactingDone.
UNREACHABLE();
}
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
if (num_pages > 0) {
if (FLAG_trace_fragmentation) {
if (abandoned_pages != 0) {
PrintF(
" Abandoned (at least partially) %d out of %d page compactions due"
" to lack of memory\n",
abandoned_pages, num_pages);
} else {
PrintF(" Compacted %d pages\n", num_pages);
}
}
}
} }
void MarkCompactCollector::WaitUntilCompactionCompleted() { void MarkCompactCollector::WaitUntilCompactionCompleted() {
pending_compaction_jobs_semaphore_.Wait(); while (concurrent_compaction_tasks_active_-- > 0) {
pending_compaction_tasks_semaphore_.Wait();
}
parallel_compaction_in_progress_ = false; parallel_compaction_in_progress_ = false;
} }
void MarkCompactCollector::EvacuatePages() { void MarkCompactCollector::EvacuatePages(
int npages = evacuation_candidates_.length(); CompactionSpaceCollection* compaction_spaces) {
int abandoned_pages = 0; for (int i = 0; i < evacuation_candidates_.length(); i++) {
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i]; Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() || DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
DCHECK(static_cast<int>(p->parallel_sweeping()) == DCHECK(static_cast<int>(p->parallel_sweeping()) ==
MemoryChunk::SWEEPING_DONE); MemoryChunk::SWEEPING_DONE);
PagedSpace* space = static_cast<PagedSpace*>(p->owner()); if (p->parallel_compaction_state().TrySetValue(
// Allocate emergency memory for the case when compaction fails due to out MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
// of memory. if (p->IsEvacuationCandidate()) {
if (!space->HasEmergencyMemory()) { DCHECK_EQ(p->parallel_compaction_state().Value(),
space->CreateEmergencyMemory(); // If the OS lets us. MemoryChunk::kCompactingInProgress);
} if (EvacuateLiveObjectsFromPage(
if (p->IsEvacuationCandidate()) { p, compaction_spaces->Get(p->owner()->identity()))) {
// During compaction we might have to request a new page in order to free p->parallel_compaction_state().SetValue(
// up a page. Check that we actually got an emergency page above so we MemoryChunk::kCompactingFinalize);
// can guarantee that this succeeds. } else {
if (space->HasEmergencyMemory()) { p->parallel_compaction_state().SetValue(
EvacuateLiveObjectsFromPage(p, static_cast<PagedSpace*>(p->owner())); MemoryChunk::kCompactingAborted);
// Unlink the page from the list of pages here. We must not iterate
// over that page later (e.g. when scan on scavenge pages are
// processed). The page itself will be freed later and is still
// reachable from the evacuation candidates list.
p->Unlink();
} else {
// Without room for expansion evacuation is not guaranteed to succeed.
// Pessimistically abandon unevacuated pages.
for (int j = i; j < npages; j++) {
Page* page = evacuation_candidates_[j];
slots_buffer_allocator_->DeallocateChain(
page->slots_buffer_address());
page->ClearEvacuationCandidate();
page->SetFlag(Page::RESCAN_ON_EVACUATION);
} }
abandoned_pages = npages - i;
break;
}
}
}
if (npages > 0) {
// Release emergency memory.
PagedSpaces spaces(heap());
for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
if (space->HasEmergencyMemory()) {
space->FreeEmergencyMemory();
}
}
if (FLAG_trace_fragmentation) {
if (abandoned_pages != 0) {
PrintF(
" Abandon %d out of %d page defragmentations due to lack of "
"memory\n",
abandoned_pages, npages);
} else { } else {
PrintF(" Defragmented %d pages\n", npages); // There could be popular pages in the list of evacuation candidates
// which we do compact.
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
} }
} }
} }
...@@ -3631,12 +3684,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { ...@@ -3631,12 +3684,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_PAGES); GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuationScope evacuation_scope(this); EvacuationScope evacuation_scope(this);
if (FLAG_parallel_compaction) { EvacuatePagesInParallel();
EvacuatePagesInParallel();
WaitUntilCompactionCompleted();
} else {
EvacuatePages();
}
} }
// Second pass: find pointers to new space and update them. // Second pass: find pointers to new space and update them.
...@@ -3696,13 +3744,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { ...@@ -3696,13 +3744,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer())); SlotsBuffer::SizeOfChain(p->slots_buffer()));
} }
slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
// Important: skip list should be cleared only after roots were updated // Important: skip list should be cleared only after roots were updated
// because root iteration traverses the stack and might have to find // because root iteration traverses the stack and might have to find
// code objects from non-updated pc pointing into evacuation candidate. // code objects from non-updated pc pointing into evacuation candidate.
SkipList* list = p->skip_list(); SkipList* list = p->skip_list();
if (list != NULL) list->Clear(); if (list != NULL) list->Clear();
} else { }
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
if (FLAG_gc_verbose) { if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
reinterpret_cast<intptr_t>(p)); reinterpret_cast<intptr_t>(p));
...@@ -3732,6 +3782,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { ...@@ -3732,6 +3782,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
break; break;
} }
} }
if (p->IsEvacuationCandidate() &&
p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
// Case where we've aborted compacting a page. Clear the flag here to
// avoid release the page later on.
p->ClearEvacuationCandidate();
}
} }
} }
...@@ -3778,7 +3834,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() { ...@@ -3778,7 +3834,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
PagedSpace* space = static_cast<PagedSpace*>(p->owner()); PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size()); space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false); p->set_scan_on_scavenge(false);
slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes(); p->ResetLiveBytes();
space->ReleasePage(p); space->ReleasePage(p);
} }
...@@ -4394,10 +4449,6 @@ void MarkCompactCollector::SweepSpaces() { ...@@ -4394,10 +4449,6 @@ void MarkCompactCollector::SweepSpaces() {
// Deallocate evacuated candidate pages. // Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates(); ReleaseEvacuationCandidates();
CodeRange* code_range = heap()->isolate()->code_range();
if (code_range != NULL && code_range->valid()) {
code_range->ReserveEmergencyBlock();
}
if (FLAG_print_cumulative_gc_stat) { if (FLAG_print_cumulative_gc_stat) {
heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
......
...@@ -554,8 +554,11 @@ class MarkCompactCollector { ...@@ -554,8 +554,11 @@ class MarkCompactCollector {
// Synchronize sweeper threads. // Synchronize sweeper threads.
base::Semaphore pending_sweeper_jobs_semaphore_; base::Semaphore pending_sweeper_jobs_semaphore_;
// Synchronize compaction threads. // Synchronize compaction tasks.
base::Semaphore pending_compaction_jobs_semaphore_; base::Semaphore pending_compaction_tasks_semaphore_;
// Number of active compaction tasks (including main thread).
intptr_t concurrent_compaction_tasks_active_;
bool evacuation_; bool evacuation_;
...@@ -713,12 +716,17 @@ class MarkCompactCollector { ...@@ -713,12 +716,17 @@ class MarkCompactCollector {
void EvacuateNewSpace(); void EvacuateNewSpace();
void EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space); bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space);
void EvacuatePages();
void EvacuatePages(CompactionSpaceCollection* compaction_spaces);
void EvacuatePagesInParallel(); void EvacuatePagesInParallel();
int NumberOfParallelCompactionTasks() {
// TODO(hpayer, mlippautz): Figure out some logic to determine the number
// of compaction tasks.
return 1;
}
void WaitUntilCompactionCompleted(); void WaitUntilCompactionCompleted();
void EvacuateNewSpaceAndCandidates(); void EvacuateNewSpaceAndCandidates();
......
...@@ -80,8 +80,7 @@ CodeRange::CodeRange(Isolate* isolate) ...@@ -80,8 +80,7 @@ CodeRange::CodeRange(Isolate* isolate)
code_range_(NULL), code_range_(NULL),
free_list_(0), free_list_(0),
allocation_list_(0), allocation_list_(0),
current_allocation_block_index_(0), current_allocation_block_index_(0) {}
emergency_block_() {}
bool CodeRange::SetUp(size_t requested) { bool CodeRange::SetUp(size_t requested) {
...@@ -140,7 +139,6 @@ bool CodeRange::SetUp(size_t requested) { ...@@ -140,7 +139,6 @@ bool CodeRange::SetUp(size_t requested) {
current_allocation_block_index_ = 0; current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
ReserveEmergencyBlock();
return true; return true;
} }
...@@ -276,24 +274,6 @@ void CodeRange::ReleaseBlock(const FreeBlock* block) { ...@@ -276,24 +274,6 @@ void CodeRange::ReleaseBlock(const FreeBlock* block) {
} }
void CodeRange::ReserveEmergencyBlock() {
const size_t requested_size = MemoryAllocator::CodePageAreaSize();
if (emergency_block_.size == 0) {
ReserveBlock(requested_size, &emergency_block_);
} else {
DCHECK(emergency_block_.size >= requested_size);
}
}
void CodeRange::ReleaseEmergencyBlock() {
if (emergency_block_.size != 0) {
ReleaseBlock(&emergency_block_);
emergency_block_.size = 0;
}
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// MemoryAllocator // MemoryAllocator
// //
...@@ -492,6 +472,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -492,6 +472,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->progress_bar_ = 0; chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->set_parallel_sweeping(SWEEPING_DONE); chunk->set_parallel_sweeping(SWEEPING_DONE);
chunk->parallel_compaction_state().SetValue(kCompactingDone);
chunk->mutex_ = NULL; chunk->mutex_ = NULL;
chunk->available_in_small_free_list_ = 0; chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0; chunk->available_in_medium_free_list_ = 0;
...@@ -974,8 +955,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, ...@@ -974,8 +955,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
: Space(heap, space, executable), : Space(heap, space, executable),
free_list_(this), free_list_(this),
unswept_free_bytes_(0), unswept_free_bytes_(0),
end_of_unswept_pages_(NULL), end_of_unswept_pages_(NULL) {
emergency_memory_(NULL) {
area_size_ = MemoryAllocator::PageAreaSize(space); area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear(); accounting_stats_.Clear();
...@@ -1003,30 +983,38 @@ void PagedSpace::TearDown() { ...@@ -1003,30 +983,38 @@ void PagedSpace::TearDown() {
} }
void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to
// (a) not waste the memory and
// (b) keep the rest of the chunk in an iterable state (filler is needed).
other->EmptyAllocationInfo();
// Move over the free list. Concatenate makes sure that the source free list
// gets properly reset after moving over all nodes.
intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
// Moved memory is not recorded as allocated memory, but rather increases and
// decreases capacity of the corresponding spaces. Used size and waste size
// are maintained by the receiving space upon allocating and freeing blocks.
other->accounting_stats_.DecreaseCapacity(freed_bytes);
accounting_stats_.IncreaseCapacity(freed_bytes);
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Unmerged fields: // Unmerged fields:
// area_size_ // area_size_
// allocation_info_ // allocation_info_
// emergency_memory_
// end_of_unswept_pages_ // end_of_unswept_pages_
// unswept_free_bytes_ // unswept_free_bytes_
// anchor_ // anchor_
// It only makes sense to merge compatible spaces. MoveOverFreeMemory(other);
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to (a) not
// waste the memory and (b) keep the rest of the chunk in an iterable state
// (filler is needed).
int linear_size = static_cast<int>(other->limit() - other->top());
other->Free(other->top(), linear_size);
// Move over the free list.
free_list_.Concatenate(other->free_list());
// Update and clear accounting statistics. // Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_); accounting_stats_.Merge(other->accounting_stats_);
other->accounting_stats_.Clear(); other->accounting_stats_.Reset();
// Move over pages. // Move over pages.
PageIterator it(other); PageIterator it(other);
...@@ -1110,9 +1098,6 @@ bool PagedSpace::Expand() { ...@@ -1110,9 +1098,6 @@ bool PagedSpace::Expand() {
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
DCHECK(heap()->CommittedOldGenerationMemory() <=
heap()->MaxOldGenerationSize() +
PagedSpace::MaxEmergencyMemoryAllocated());
p->InsertAfter(anchor_.prev_page()); p->InsertAfter(anchor_.prev_page());
...@@ -1182,51 +1167,6 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -1182,51 +1167,6 @@ void PagedSpace::ReleasePage(Page* page) {
} }
intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
// New space and large object space.
static const int spaces_without_emergency_memory = 2;
static const int spaces_with_emergency_memory =
LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
return Page::kPageSize * spaces_with_emergency_memory;
}
void PagedSpace::CreateEmergencyMemory() {
if (identity() == CODE_SPACE) {
// Make the emergency block available to the allocator.
CodeRange* code_range = heap()->isolate()->code_range();
if (code_range != NULL && code_range->valid()) {
code_range->ReleaseEmergencyBlock();
}
DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
}
emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
AreaSize(), AreaSize(), executable(), this);
}
void PagedSpace::FreeEmergencyMemory() {
Page* page = static_cast<Page*>(emergency_memory_);
DCHECK(page->LiveBytes() == 0);
DCHECK(AreaSize() == page->area_size());
DCHECK(!free_list_.ContainsPageFreeListItems(page));
heap()->isolate()->memory_allocator()->Free(page);
emergency_memory_ = NULL;
}
void PagedSpace::UseEmergencyMemory() {
// Page::Initialize makes the chunk into a real page and adds it to the
// accounting for this space. Unlike PagedSpace::Expand, we don't check
// CanExpand first, so we can go over the limits a little here. That's OK,
// because we are in the process of compacting which will free up at least as
// much memory as it allocates.
Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
page->InsertAfter(anchor_.prev_page());
emergency_memory_ = NULL;
}
#ifdef DEBUG #ifdef DEBUG
void PagedSpace::Print() {} void PagedSpace::Print() {}
#endif #endif
...@@ -2133,9 +2073,10 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { ...@@ -2133,9 +2073,10 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
if (category->top() != NULL) { if (category->top() != NULL) {
// This is safe (not going to deadlock) since Concatenate operations // This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in // are never performed on the same free lists at the same time in
// reverse order. // reverse order. Furthermore, we only lock if the PagedSpace containing
base::LockGuard<base::Mutex> target_lock_guard(mutex()); // the free list is know to be globally available, i.e., not local.
base::LockGuard<base::Mutex> source_lock_guard(category->mutex()); if (!this->owner()->owner()->is_local()) mutex()->Lock();
if (!category->owner()->owner()->is_local()) category->mutex()->Lock();
DCHECK(category->end_ != NULL); DCHECK(category->end_ != NULL);
free_bytes = category->available(); free_bytes = category->available();
if (end_ == NULL) { if (end_ == NULL) {
...@@ -2147,6 +2088,8 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { ...@@ -2147,6 +2088,8 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
base::NoBarrier_Store(&top_, category->top_); base::NoBarrier_Store(&top_, category->top_);
available_ += category->available(); available_ += category->available();
category->Reset(); category->Reset();
if (!category->owner()->owner()->is_local()) category->mutex()->Unlock();
if (!this->owner()->owner()->is_local()) mutex()->Unlock();
} }
return free_bytes; return free_bytes;
} }
...@@ -2254,7 +2197,13 @@ void FreeListCategory::RepairFreeList(Heap* heap) { ...@@ -2254,7 +2197,13 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
} }
FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) { FreeList::FreeList(PagedSpace* owner)
: owner_(owner),
heap_(owner->heap()),
small_list_(this),
medium_list_(this),
large_list_(this),
huge_list_(this) {
Reset(); Reset();
} }
......
...@@ -268,6 +268,19 @@ class SlotsBuffer; ...@@ -268,6 +268,19 @@ class SlotsBuffer;
// any heap object. // any heap object.
class MemoryChunk { class MemoryChunk {
public: public:
// |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
// |kCompactingInProgress|: Parallel compaction is currently in progress.
// |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
// be finalized.
// |kCompactingAborted|: Parallel compaction has been aborted, which should
// for now only happen in OOM scenarios.
enum ParallelCompactingState {
kCompactingDone,
kCompactingInProgress,
kCompactingFinalize,
kCompactingAborted,
};
// Only works if the pointer is in the first kPageSize of the MemoryChunk. // Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) { static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
...@@ -458,6 +471,10 @@ class MemoryChunk { ...@@ -458,6 +471,10 @@ class MemoryChunk {
base::Release_Store(&parallel_sweeping_, state); base::Release_Store(&parallel_sweeping_, state);
} }
AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
return parallel_compaction_;
}
bool TryLock() { return mutex_->TryLock(); } bool TryLock() { return mutex_->TryLock(); }
base::Mutex* mutex() { return mutex_; } base::Mutex* mutex() { return mutex_; }
...@@ -566,6 +583,7 @@ class MemoryChunk { ...@@ -566,6 +583,7 @@ class MemoryChunk {
+ kPointerSize // AtomicValue high_water_mark_ + kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_ + kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord parallel_sweeping_ + kPointerSize // base::AtomicWord parallel_sweeping_
+ kPointerSize // AtomicValue parallel_compaction_
+ 5 * kPointerSize // AtomicNumber free-list statistics + 5 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // base::AtomicWord next_chunk_ + kPointerSize // base::AtomicWord next_chunk_
+ kPointerSize; // base::AtomicWord prev_chunk_ + kPointerSize; // base::AtomicWord prev_chunk_
...@@ -726,6 +744,7 @@ class MemoryChunk { ...@@ -726,6 +744,7 @@ class MemoryChunk {
base::Mutex* mutex_; base::Mutex* mutex_;
base::AtomicWord parallel_sweeping_; base::AtomicWord parallel_sweeping_;
AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics. // PagedSpace free-list statistics.
AtomicNumber<intptr_t> available_in_small_free_list_; AtomicNumber<intptr_t> available_in_small_free_list_;
...@@ -986,9 +1005,6 @@ class CodeRange { ...@@ -986,9 +1005,6 @@ class CodeRange {
bool UncommitRawMemory(Address start, size_t length); bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length); void FreeRawMemory(Address buf, size_t length);
void ReserveEmergencyBlock();
void ReleaseEmergencyBlock();
private: private:
// Frees the range of virtual memory, and frees the data structures used to // Frees the range of virtual memory, and frees the data structures used to
// manage it. // manage it.
...@@ -1031,12 +1047,6 @@ class CodeRange { ...@@ -1031,12 +1047,6 @@ class CodeRange {
List<FreeBlock> allocation_list_; List<FreeBlock> allocation_list_;
int current_allocation_block_index_; int current_allocation_block_index_;
// Emergency block guarantees that we can always allocate a page for
// evacuation candidates when code space is compacted. Emergency block is
// reserved immediately after GC and is released immedietely before
// allocating a page for evacuation.
FreeBlock emergency_block_;
// Finds a block on the allocation list that contains at least the // Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges // requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again. // the existing free memory blocks, and searches again.
...@@ -1518,6 +1528,13 @@ class AllocationStats BASE_EMBEDDED { ...@@ -1518,6 +1528,13 @@ class AllocationStats BASE_EMBEDDED {
} }
} }
void DecreaseCapacity(intptr_t size_in_bytes) {
capacity_ -= size_in_bytes;
DCHECK_GE(capacity_, 0);
}
void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
private: private:
intptr_t capacity_; intptr_t capacity_;
intptr_t max_capacity_; intptr_t max_capacity_;
...@@ -1533,7 +1550,8 @@ class AllocationStats BASE_EMBEDDED { ...@@ -1533,7 +1550,8 @@ class AllocationStats BASE_EMBEDDED {
// the end element of the linked list of free memory blocks. // the end element of the linked list of free memory blocks.
class FreeListCategory { class FreeListCategory {
public: public:
FreeListCategory() : top_(0), end_(NULL), available_(0) {} explicit FreeListCategory(FreeList* owner)
: top_(0), end_(NULL), available_(0), owner_(owner) {}
intptr_t Concatenate(FreeListCategory* category); intptr_t Concatenate(FreeListCategory* category);
...@@ -1573,6 +1591,8 @@ class FreeListCategory { ...@@ -1573,6 +1591,8 @@ class FreeListCategory {
int FreeListLength(); int FreeListLength();
#endif #endif
FreeList* owner() { return owner_; }
private: private:
// top_ points to the top FreeSpace* in the free list category. // top_ points to the top FreeSpace* in the free list category.
base::AtomicWord top_; base::AtomicWord top_;
...@@ -1581,6 +1601,8 @@ class FreeListCategory { ...@@ -1581,6 +1601,8 @@ class FreeListCategory {
// Total available bytes in all blocks of this free list category. // Total available bytes in all blocks of this free list category.
int available_; int available_;
FreeList* owner_;
}; };
...@@ -1673,6 +1695,8 @@ class FreeList { ...@@ -1673,6 +1695,8 @@ class FreeList {
FreeListCategory* large_list() { return &large_list_; } FreeListCategory* large_list() { return &large_list_; }
FreeListCategory* huge_list() { return &huge_list_; } FreeListCategory* huge_list() { return &huge_list_; }
PagedSpace* owner() { return owner_; }
private: private:
// The size range of blocks, in bytes. // The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize; static const int kMinBlockSize = 3 * kPointerSize;
...@@ -1969,17 +1993,14 @@ class PagedSpace : public Space { ...@@ -1969,17 +1993,14 @@ class PagedSpace : public Space {
// Return size of allocatable area on a page in this space. // Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; } inline int AreaSize() { return area_size_; }
void CreateEmergencyMemory();
void FreeEmergencyMemory();
void UseEmergencyMemory();
intptr_t MaxEmergencyMemoryAllocated();
bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
// Merges {other} into the current space. Note that this modifies {other}, // Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics. // e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other); void MergeCompactionSpace(CompactionSpace* other);
void MoveOverFreeMemory(PagedSpace* other);
virtual bool is_local() { return false; }
protected: protected:
// PagedSpaces that should be included in snapshots have different, i.e., // PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages. // smaller, initial pages.
...@@ -2040,12 +2061,6 @@ class PagedSpace : public Space { ...@@ -2040,12 +2061,6 @@ class PagedSpace : public Space {
// end_of_unswept_pages_ page. // end_of_unswept_pages_ page.
Page* end_of_unswept_pages_; Page* end_of_unswept_pages_;
// Emergency memory is the memory of a full page for a given space, allocated
// conservatively before evacuating a page. If compaction fails due to out
// of memory error the emergency memory can be used to complete compaction.
// If not used, the emergency memory is released after compaction.
MemoryChunk* emergency_memory_;
// Mutex guarding any concurrent access to the space. // Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_; base::Mutex space_mutex_;
...@@ -2739,12 +2754,40 @@ class CompactionSpace : public PagedSpace { ...@@ -2739,12 +2754,40 @@ class CompactionSpace : public PagedSpace {
Free(start, size_in_bytes); Free(start, size_in_bytes);
} }
virtual bool is_local() { return true; }
protected: protected:
// The space is temporary and not included in any snapshots. // The space is temporary and not included in any snapshots.
virtual bool snapshotable() { return false; } virtual bool snapshotable() { return false; }
}; };
// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
case OLD_SPACE:
return &old_space_;
case CODE_SPACE:
return &code_space_;
default:
UNREACHABLE();
}
UNREACHABLE();
return nullptr;
}
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Old object space (includes the old space of objects and code space) // Old object space (includes the old space of objects and code space)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment