Commit 7a0a0b8b authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of [heap] Introduce parallel compaction algorithm. (patchset #9...

Revert of [heap] Introduce parallel compaction algorithm. (patchset #9 id:160001 of https://codereview.chromium.org/1343333002/ )

Reason for revert:
Check failed: https://chromegw.corp.google.com/i/client.v8/builders/V8%20Win64/builds/5535/steps/Check%20%28flakes%29/logs/IndependentWeakHandle

Original issue's description:
> [heap] Introduce parallel compaction algorithm.
>
> - The number of parallel tasks is still 1, i.e., we only compact on the main
>   thread.
> - Remove emergency memory (PagedSpace, and CodeRange)
> - Introduce partial compaction of pages.
> - Logic for multiple tasks is in place.
>
> BUG=chromium:524425
> LOG=N
>
> Committed: https://crrev.com/61ea4f55616d3f7bc2ce049a678f16f7475e03e0
> Cr-Commit-Position: refs/heads/master@{#30787}

TBR=hpayer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:524425

Review URL: https://codereview.chromium.org/1347873003

Cr-Commit-Position: refs/heads/master@{#30788}
parent 61ea4f55
This diff is collapsed.
......@@ -553,11 +553,8 @@ class MarkCompactCollector {
// Synchronize sweeper threads.
base::Semaphore pending_sweeper_jobs_semaphore_;
// Synchronize compaction tasks.
base::Semaphore pending_compaction_tasks_semaphore_;
// Number of active compaction tasks (including main thread).
intptr_t concurrent_compaction_tasks_active_;
// Synchronize compaction threads.
base::Semaphore pending_compaction_jobs_semaphore_;
bool evacuation_;
......@@ -715,16 +712,11 @@ class MarkCompactCollector {
void EvacuateNewSpace();
bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space);
void EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space);
void EvacuatePages(CompactionSpaceCollection* compaction_spaces);
void EvacuatePagesInParallel();
void EvacuatePages();
int NumberOfParallelCompactionTasks() {
// TODO(hpayer, mlippautz): Figure out some logic to determine the number
// of compaction tasks.
return 1;
}
void EvacuatePagesInParallel();
void WaitUntilCompactionCompleted();
......
......@@ -80,7 +80,8 @@ CodeRange::CodeRange(Isolate* isolate)
code_range_(NULL),
free_list_(0),
allocation_list_(0),
current_allocation_block_index_(0) {}
current_allocation_block_index_(0),
emergency_block_() {}
bool CodeRange::SetUp(size_t requested) {
......@@ -139,6 +140,7 @@ bool CodeRange::SetUp(size_t requested) {
current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
ReserveEmergencyBlock();
return true;
}
......@@ -274,6 +276,24 @@ void CodeRange::ReleaseBlock(const FreeBlock* block) {
}
void CodeRange::ReserveEmergencyBlock() {
const size_t requested_size = MemoryAllocator::CodePageAreaSize();
if (emergency_block_.size == 0) {
ReserveBlock(requested_size, &emergency_block_);
} else {
DCHECK(emergency_block_.size >= requested_size);
}
}
void CodeRange::ReleaseEmergencyBlock() {
if (emergency_block_.size != 0) {
ReleaseBlock(&emergency_block_);
emergency_block_.size = 0;
}
}
// -----------------------------------------------------------------------------
// MemoryAllocator
//
......@@ -472,7 +492,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->set_parallel_sweeping(SWEEPING_DONE);
chunk->parallel_compaction_state().SetValue(kCompactingDone);
chunk->mutex_ = NULL;
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
......@@ -955,7 +974,8 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
: Space(heap, space, executable),
free_list_(this),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
end_of_unswept_pages_(NULL),
emergency_memory_(NULL) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
......@@ -983,37 +1003,30 @@ void PagedSpace::TearDown() {
}
void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to
// (a) not waste the memory and
// (b) keep the rest of the chunk in an iterable state (filler is needed).
other->EmptyAllocationInfo();
// Move over the free list. Concatenate makes sure that the source free list
// gets properly reset after moving over all nodes.
intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
other->accounting_stats_.AllocateBytes(freed_bytes);
// We do not adjust accounting_stats_ for {this} as we treat the received
// memory as borrowed, i.e., the originating space keeps track of its
// capacity. Other stats, e.g. accounting_stats_.{size_,waste_} are properly
// maintained by allocating and freeing blocks.
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Unmerged fields:
// area_size_
// allocation_info_
// emergency_memory_
// end_of_unswept_pages_
// unswept_free_bytes_
// anchor_
MoveOverFreeMemory(other);
// It only makes sense to merge compatible spaces.
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to (a) not
// waste the memory and (b) keep the rest of the chunk in an iterable state
// (filler is needed).
int linear_size = static_cast<int>(other->limit() - other->top());
other->Free(other->top(), linear_size);
// Move over the free list.
free_list_.Concatenate(other->free_list());
// Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_);
other->accounting_stats_.Reset();
other->accounting_stats_.Clear();
// Move over pages.
PageIterator it(other);
......@@ -1097,6 +1110,9 @@ bool PagedSpace::Expand() {
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
DCHECK(heap()->CommittedOldGenerationMemory() <=
heap()->MaxOldGenerationSize() +
PagedSpace::MaxEmergencyMemoryAllocated());
p->InsertAfter(anchor_.prev_page());
......@@ -1166,6 +1182,51 @@ void PagedSpace::ReleasePage(Page* page) {
}
intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
// New space and large object space.
static const int spaces_without_emergency_memory = 2;
static const int spaces_with_emergency_memory =
LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
return Page::kPageSize * spaces_with_emergency_memory;
}
void PagedSpace::CreateEmergencyMemory() {
if (identity() == CODE_SPACE) {
// Make the emergency block available to the allocator.
CodeRange* code_range = heap()->isolate()->code_range();
if (code_range != NULL && code_range->valid()) {
code_range->ReleaseEmergencyBlock();
}
DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
}
emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
AreaSize(), AreaSize(), executable(), this);
}
void PagedSpace::FreeEmergencyMemory() {
Page* page = static_cast<Page*>(emergency_memory_);
DCHECK(page->LiveBytes() == 0);
DCHECK(AreaSize() == page->area_size());
DCHECK(!free_list_.ContainsPageFreeListItems(page));
heap()->isolate()->memory_allocator()->Free(page);
emergency_memory_ = NULL;
}
void PagedSpace::UseEmergencyMemory() {
// Page::Initialize makes the chunk into a real page and adds it to the
// accounting for this space. Unlike PagedSpace::Expand, we don't check
// CanExpand first, so we can go over the limits a little here. That's OK,
// because we are in the process of compacting which will free up at least as
// much memory as it allocates.
Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
page->InsertAfter(anchor_.prev_page());
emergency_memory_ = NULL;
}
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
......
......@@ -268,19 +268,6 @@ class SlotsBuffer;
// any heap object.
class MemoryChunk {
public:
// |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
// |kCompactingInProgress|: Parallel compaction is currently in progress.
// |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
// be finalized.
// |kCompactingAborted|: Parallel compaction has been aborted, which should
// for now only happen in OOM scenarios.
enum ParallelCompactingState {
kCompactingDone,
kCompactingInProgress,
kCompactingFinalize,
kCompactingAborted,
};
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
......@@ -471,10 +458,6 @@ class MemoryChunk {
base::Release_Store(&parallel_sweeping_, state);
}
AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
return parallel_compaction_;
}
bool TryLock() { return mutex_->TryLock(); }
base::Mutex* mutex() { return mutex_; }
......@@ -583,7 +566,6 @@ class MemoryChunk {
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord parallel_sweeping_
+ kPointerSize // AtomicValue parallel_compaction_
+ 5 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // base::AtomicWord next_chunk_
+ kPointerSize; // base::AtomicWord prev_chunk_
......@@ -744,7 +726,6 @@ class MemoryChunk {
base::Mutex* mutex_;
base::AtomicWord parallel_sweeping_;
AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics.
AtomicNumber<intptr_t> available_in_small_free_list_;
......@@ -1005,6 +986,9 @@ class CodeRange {
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
void ReserveEmergencyBlock();
void ReleaseEmergencyBlock();
private:
// Frees the range of virtual memory, and frees the data structures used to
// manage it.
......@@ -1047,6 +1031,12 @@ class CodeRange {
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
// Emergency block guarantees that we can always allocate a page for
// evacuation candidates when code space is compacted. Emergency block is
// reserved immediately after GC and is released immedietely before
// allocating a page for evacuation.
FreeBlock emergency_block_;
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
......@@ -1979,12 +1969,17 @@ class PagedSpace : public Space {
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; }
void CreateEmergencyMemory();
void FreeEmergencyMemory();
void UseEmergencyMemory();
intptr_t MaxEmergencyMemoryAllocated();
bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
void MoveOverFreeMemory(PagedSpace* other);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
......@@ -2045,6 +2040,12 @@ class PagedSpace : public Space {
// end_of_unswept_pages_ page.
Page* end_of_unswept_pages_;
// Emergency memory is the memory of a full page for a given space, allocated
// conservatively before evacuating a page. If compaction fails due to out
// of memory error the emergency memory can be used to complete compaction.
// If not used, the emergency memory is released after compaction.
MemoryChunk* emergency_memory_;
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
......@@ -2744,32 +2745,6 @@ class CompactionSpace : public PagedSpace {
};
// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
case OLD_SPACE:
return &old_space_;
case CODE_SPACE:
return &code_space_;
default:
UNREACHABLE();
}
UNREACHABLE();
return nullptr;
}
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
};
// -----------------------------------------------------------------------------
// Old object space (includes the old space of objects and code space)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment