Commit 5eff5420 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Cleanup: Remove WAS_SWEPT flag.

- Completely rely on the concurrent sweeping state for SweepingCompleted()
- Rename the state accordingly.

CQ_EXTRA_TRYBOTS=tryserver.v8:v8_linux_arm64_gc_stress_dbg,v8_linux_gc_stress_dbg,v8_mac_gc_stress_dbg,v8_linux64_asan_rel,v8_linux64_tsan_rel,v8_mac64_asan_rel
R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/1614953002

Cr-Commit-Position: refs/heads/master@{#33490}
parent 9602f4b2
...@@ -3100,7 +3100,7 @@ bool Heap::CanMoveObjectStart(HeapObject* object) { ...@@ -3100,7 +3100,7 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
// (3) the page was already concurrently swept. This case is an optimization // (3) the page was already concurrently swept. This case is an optimization
// for concurrent sweeping. The WasSwept predicate for concurrently swept // for concurrent sweeping. The WasSwept predicate for concurrently swept
// pages is set after sweeping all pages. // pages is set after sweeping all pages.
return !InOldSpace(address) || page->WasSwept() || page->SweepingCompleted(); return !InOldSpace(address) || page->SweepingDone();
} }
......
...@@ -540,9 +540,9 @@ void MarkCompactCollector::StartSweeperThreads() { ...@@ -540,9 +540,9 @@ void MarkCompactCollector::StartSweeperThreads() {
void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) { void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
if (!page->SweepingCompleted()) { if (!page->SweepingDone()) {
SweepInParallel(page, owner); SweepInParallel(page, owner);
if (!page->SweepingCompleted()) { if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent // We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper // sweeper thread currently owns this page. Wait for the sweeper
// thread to be done with this page. // thread to be done with this page.
...@@ -721,14 +721,14 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { ...@@ -721,14 +721,14 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
continue; continue;
} }
// Invariant: Evacuation candidates are just created when marking is // Invariant: Evacuation candidates are just created when marking is
// started. At the end of a GC all evacuation candidates are cleared and // started. This means that sweeping has finished. Furthermore, at the end
// their slot buffers are released. // of a GC all evacuation candidates are cleared and their slot buffers are
// released.
CHECK(!p->IsEvacuationCandidate()); CHECK(!p->IsEvacuationCandidate());
CHECK(p->slots_buffer() == NULL); CHECK(p->slots_buffer() == nullptr);
CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size); DCHECK(p->area_size() == area_size);
int live_bytes = pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
pages.push_back(std::make_pair(live_bytes, p));
} }
int candidate_count = 0; int candidate_count = 0;
...@@ -3224,7 +3224,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3224,7 +3224,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
break; break;
case MemoryChunk::kCompactingFinalize: case MemoryChunk::kCompactingFinalize:
DCHECK(p->IsEvacuationCandidate()); DCHECK(p->IsEvacuationCandidate());
p->SetWasSwept(); DCHECK(p->SweepingDone());
p->Unlink(); p->Unlink();
break; break;
case MemoryChunk::kCompactingDone: case MemoryChunk::kCompactingDone:
...@@ -3290,8 +3290,7 @@ void MarkCompactCollector::EvacuatePages( ...@@ -3290,8 +3290,7 @@ void MarkCompactCollector::EvacuatePages(
Page* p = evacuation_candidates_[i]; Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() || DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == DCHECK(p->SweepingDone());
MemoryChunk::kSweepingDone);
if (p->parallel_compaction_state().TrySetValue( if (p->parallel_compaction_state().TrySetValue(
MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
if (p->IsEvacuationCandidate()) { if (p->IsEvacuationCandidate()) {
...@@ -3365,7 +3364,7 @@ template <SweepingMode sweeping_mode, ...@@ -3365,7 +3364,7 @@ template <SweepingMode sweeping_mode,
FreeSpaceTreatmentMode free_space_mode> FreeSpaceTreatmentMode free_space_mode>
static int Sweep(PagedSpace* space, FreeList* free_list, Page* p, static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
ObjectVisitor* v) { ObjectVisitor* v) {
DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
space->identity() == CODE_SPACE); space->identity() == CODE_SPACE);
DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
...@@ -3428,14 +3427,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p, ...@@ -3428,14 +3427,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
freed_bytes = Free<parallelism>(space, free_list, free_start, size); freed_bytes = Free<parallelism>(space, free_list, free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes); max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} }
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
// When concurrent sweeping is active, the page will be marked after
// sweeping by the main thread.
p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
} else {
p->SetWasSwept();
}
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
} }
...@@ -3554,6 +3546,7 @@ void MarkCompactCollector::SweepAbortedPages() { ...@@ -3554,6 +3546,7 @@ void MarkCompactCollector::SweepAbortedPages() {
Page* p = evacuation_candidates_[i]; Page* p = evacuation_candidates_[i];
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
PagedSpace* space = static_cast<PagedSpace*>(p->owner()); PagedSpace* space = static_cast<PagedSpace*>(p->owner());
switch (space->identity()) { switch (space->identity()) {
case OLD_SPACE: case OLD_SPACE:
...@@ -3716,6 +3709,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { ...@@ -3716,6 +3709,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
} }
PagedSpace* space = static_cast<PagedSpace*>(p->owner()); PagedSpace* space = static_cast<PagedSpace*>(p->owner());
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
switch (space->identity()) { switch (space->identity()) {
case OLD_SPACE: case OLD_SPACE:
...@@ -3766,8 +3760,8 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() { ...@@ -3766,8 +3760,8 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
space->Free(p->area_start(), p->area_size()); space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false); p->set_scan_on_scavenge(false);
p->ResetLiveBytes(); p->ResetLiveBytes();
CHECK(p->WasSwept()); CHECK(p->SweepingDone());
space->ReleasePage(p); space->ReleasePage(p, true);
} }
evacuation_candidates_.Rewind(0); evacuation_candidates_.Rewind(0);
compacting_ = false; compacting_ = false;
...@@ -3802,12 +3796,11 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { ...@@ -3802,12 +3796,11 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
int max_freed = 0; int max_freed = 0;
if (page->TryLock()) { if (page->TryLock()) {
// If this page was already swept in the meantime, we can return here. // If this page was already swept in the meantime, we can return here.
if (page->parallel_sweeping_state().Value() != if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
MemoryChunk::kSweepingPending) {
page->mutex()->Unlock(); page->mutex()->Unlock();
return 0; return 0;
} }
page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress); page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
FreeList* free_list; FreeList* free_list;
FreeList private_free_list(space); FreeList private_free_list(space);
if (space->identity() == OLD_SPACE) { if (space->identity() == OLD_SPACE) {
...@@ -3827,6 +3820,7 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { ...@@ -3827,6 +3820,7 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL); IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
} }
free_list->Concatenate(&private_free_list); free_list->Concatenate(&private_free_list);
page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
page->mutex()->Unlock(); page->mutex()->Unlock();
} }
return max_freed; return max_freed;
...@@ -3843,10 +3837,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { ...@@ -3843,10 +3837,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
while (it.has_next()) { while (it.has_next()) {
Page* p = it.next(); Page* p = it.next();
DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone); DCHECK(p->SweepingDone());
// Clear sweeping flags indicating that marking bits are still intact.
p->ClearWasSwept();
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
p->IsEvacuationCandidate()) { p->IsEvacuationCandidate()) {
...@@ -3860,6 +3851,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { ...@@ -3860,6 +3851,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// that this adds unusable memory into the free list that is later on // that this adds unusable memory into the free list that is later on
// (in the free list) dropped again. Since we only use the flag for // (in the free list) dropped again. Since we only use the flag for
// testing this is fine. // testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
continue; continue;
...@@ -3871,14 +3863,14 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { ...@@ -3871,14 +3863,14 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (FLAG_gc_verbose) { if (FLAG_gc_verbose) {
PrintIsolate(isolate(), "sweeping: released page: %p", p); PrintIsolate(isolate(), "sweeping: released page: %p", p);
} }
space->ReleasePage(p); space->ReleasePage(p, false);
continue; continue;
} }
unused_page_present = true; unused_page_present = true;
} }
p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
sweeping_list(space).push_back(p); sweeping_list(space).push_back(p);
p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
int to_sweep = p->area_size() - p->LiveBytes(); int to_sweep = p->area_size() - p->LiveBytes();
space->accounting_stats_.ShrinkSpace(to_sweep); space->accounting_stats_.ShrinkSpace(to_sweep);
will_be_swept++; will_be_swept++;
...@@ -3940,22 +3932,7 @@ void MarkCompactCollector::SweepSpaces() { ...@@ -3940,22 +3932,7 @@ void MarkCompactCollector::SweepSpaces() {
} }
void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
for (Page* p : sweeping_list(space)) {
if (p->parallel_sweeping_state().Value() ==
MemoryChunk::kSweepingFinalize) {
p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
p->SetWasSwept();
}
DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
}
}
void MarkCompactCollector::ParallelSweepSpacesComplete() { void MarkCompactCollector::ParallelSweepSpacesComplete() {
ParallelSweepSpaceComplete(heap()->old_space());
ParallelSweepSpaceComplete(heap()->code_space());
ParallelSweepSpaceComplete(heap()->map_space());
sweeping_list(heap()->old_space()).clear(); sweeping_list(heap()->old_space()).clear();
sweeping_list(heap()->code_space()).clear(); sweeping_list(heap()->code_space()).clear();
sweeping_list(heap()->map_space()).clear(); sweeping_list(heap()->map_space()).clear();
......
...@@ -749,8 +749,6 @@ class MarkCompactCollector { ...@@ -749,8 +749,6 @@ class MarkCompactCollector {
// swept in parallel. // swept in parallel.
void ParallelSweepSpacesComplete(); void ParallelSweepSpacesComplete();
void ParallelSweepSpaceComplete(PagedSpace* space);
// Updates store buffer and slot buffer for a pointer in a migrating object. // Updates store buffer and slot buffer for a pointer in a migrating object.
void RecordMigratedSlot(Object* value, Address slot, void RecordMigratedSlot(Object* value, Address slot,
SlotsBuffer** evacuation_slots_buffer); SlotsBuffer** evacuation_slots_buffer);
......
...@@ -35,7 +35,7 @@ HeapObjectIterator::HeapObjectIterator(Page* page) { ...@@ -35,7 +35,7 @@ HeapObjectIterator::HeapObjectIterator(Page* page) {
owner == page->heap()->code_space()); owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
page->area_end(), kOnePageOnly); page->area_end(), kOnePageOnly);
DCHECK(page->WasSwept() || page->SweepingCompleted()); DCHECK(page->SweepingDone());
} }
...@@ -66,7 +66,7 @@ bool HeapObjectIterator::AdvanceToNextPage() { ...@@ -66,7 +66,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
cur_page); cur_page);
cur_addr_ = cur_page->area_start(); cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end(); cur_end_ = cur_page->area_end();
DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted()); DCHECK(cur_page->SweepingDone());
return true; return true;
} }
...@@ -469,7 +469,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -469,7 +469,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0; chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->parallel_sweeping_state().SetValue(kSweepingDone); chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
chunk->parallel_compaction_state().SetValue(kCompactingDone); chunk->parallel_compaction_state().SetValue(kCompactingDone);
chunk->mutex_ = NULL; chunk->mutex_ = NULL;
chunk->available_in_small_free_list_ = 0; chunk->available_in_small_free_list_ = 0;
...@@ -480,7 +480,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -480,7 +480,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->ResetLiveBytes(); chunk->ResetLiveBytes();
Bitmap::Clear(chunk); Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false); chunk->initialize_scan_on_scavenge(false);
chunk->SetFlag(WAS_SWEPT);
chunk->set_next_chunk(nullptr); chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr); chunk->set_prev_chunk(nullptr);
...@@ -923,7 +922,7 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, ...@@ -923,7 +922,7 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
static_cast<PagedSpace*>(chunk->owner())->Allocate(by); static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
} }
chunk->IncrementLiveBytes(by); chunk->IncrementLiveBytes(by);
...@@ -1225,11 +1224,11 @@ void PagedSpace::IncreaseCapacity(int size) { ...@@ -1225,11 +1224,11 @@ void PagedSpace::IncreaseCapacity(int size) {
} }
void PagedSpace::ReleasePage(Page* page) { void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
DCHECK(page->LiveBytes() == 0); DCHECK(page->LiveBytes() == 0);
DCHECK(AreaSize() == page->area_size()); DCHECK(AreaSize() == page->area_size());
if (page->WasSwept()) { if (evict_free_list_items) {
intptr_t size = free_list_.EvictFreeListItems(page); intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size); accounting_stats_.AllocateBytes(size);
DCHECK_EQ(AreaSize(), static_cast<int>(size)); DCHECK_EQ(AreaSize(), static_cast<int>(size));
...@@ -1275,7 +1274,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { ...@@ -1275,7 +1274,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (page == Page::FromAllocationTop(allocation_info_.top())) { if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true; allocation_pointer_found_in_space = true;
} }
CHECK(page->WasSwept()); CHECK(page->SweepingDone());
HeapObjectIterator it(page); HeapObjectIterator it(page);
Address end_of_previous_object = page->area_start(); Address end_of_previous_object = page->area_start();
Address top = page->area_end(); Address top = page->area_end();
......
...@@ -308,10 +308,6 @@ class MemoryChunk { ...@@ -308,10 +308,6 @@ class MemoryChunk {
NEVER_EVACUATE, // May contain immortal immutables. NEVER_EVACUATE, // May contain immortal immutables.
POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC. POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
// WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
// otherwise marking bits are still intact.
WAS_SWEPT,
// Large objects can have a progress bar in their page header. These object // Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned. // are scanned in increments and will be kept black while being scanned.
// Even if the mutator writes to them they will be kept black and a white // Even if the mutator writes to them they will be kept black and a white
...@@ -353,16 +349,14 @@ class MemoryChunk { ...@@ -353,16 +349,14 @@ class MemoryChunk {
}; };
// |kSweepingDone|: The page state when sweeping is complete or sweeping must // |kSweepingDone|: The page state when sweeping is complete or sweeping must
// not be performed on that page. // not be performed on that page. Sweeper threads that are done with their
// |kSweepingFinalize|: A sweeper thread is done sweeping this page and will // work will set this value and not touch the page anymore.
// not touch the page memory anymore.
// |kSweepingInProgress|: This page is currently swept by a sweeper thread.
// |kSweepingPending|: This page is ready for parallel sweeping. // |kSweepingPending|: This page is ready for parallel sweeping.
enum ParallelSweepingState { // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
enum ConcurrentSweepingState {
kSweepingDone, kSweepingDone,
kSweepingFinalize, kSweepingPending,
kSweepingInProgress, kSweepingInProgress,
kSweepingPending
}; };
// Every n write barrier invocations we go to runtime even though // Every n write barrier invocations we go to runtime even though
...@@ -556,8 +550,8 @@ class MemoryChunk { ...@@ -556,8 +550,8 @@ class MemoryChunk {
// Return all current flags. // Return all current flags.
intptr_t GetFlags() { return flags_; } intptr_t GetFlags() { return flags_; }
AtomicValue<ParallelSweepingState>& parallel_sweeping_state() { AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
return parallel_sweeping_; return concurrent_sweeping_;
} }
AtomicValue<ParallelCompactingState>& parallel_compaction_state() { AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
...@@ -568,19 +562,6 @@ class MemoryChunk { ...@@ -568,19 +562,6 @@ class MemoryChunk {
base::Mutex* mutex() { return mutex_; } base::Mutex* mutex() { return mutex_; }
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
// progress. In particular, when we know that right before this call a
// sweeper thread was sweeping this page.
void WaitUntilSweepingCompleted() {
mutex_->Lock();
mutex_->Unlock();
DCHECK(SweepingCompleted());
}
bool SweepingCompleted() {
return parallel_sweeping_state().Value() <= kSweepingFinalize;
}
// Manage live byte count (count of bytes known to be live, // Manage live byte count (count of bytes known to be live,
// because they are marked black). // because they are marked black).
void ResetLiveBytes() { void ResetLiveBytes() {
...@@ -759,7 +740,7 @@ class MemoryChunk { ...@@ -759,7 +740,7 @@ class MemoryChunk {
AtomicValue<intptr_t> high_water_mark_; AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_; base::Mutex* mutex_;
AtomicValue<ParallelSweepingState> parallel_sweeping_; AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
AtomicValue<ParallelCompactingState> parallel_compaction_; AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics. // PagedSpace free-list statistics.
...@@ -865,9 +846,18 @@ class Page : public MemoryChunk { ...@@ -865,9 +846,18 @@ class Page : public MemoryChunk {
void InitializeAsAnchor(PagedSpace* owner); void InitializeAsAnchor(PagedSpace* owner);
bool WasSwept() { return IsFlagSet(WAS_SWEPT); } // WaitUntilSweepingCompleted only works when concurrent sweeping is in
void SetWasSwept() { SetFlag(WAS_SWEPT); } // progress. In particular, when we know that right before this call a
void ClearWasSwept() { ClearFlag(WAS_SWEPT); } // sweeper thread was sweeping this page.
void WaitUntilSweepingCompleted() {
mutex_->Lock();
mutex_->Unlock();
DCHECK(SweepingDone());
}
bool SweepingDone() {
return concurrent_sweeping_state().Value() == kSweepingDone;
}
void ResetFreeListStatistics(); void ResetFreeListStatistics();
...@@ -2077,7 +2067,7 @@ class PagedSpace : public Space { ...@@ -2077,7 +2067,7 @@ class PagedSpace : public Space {
void IncreaseCapacity(int size); void IncreaseCapacity(int size);
// Releases an unused page and shrinks the space. // Releases an unused page and shrinks the space.
void ReleasePage(Page* page); void ReleasePage(Page* page, bool evict_free_list_items);
// The dummy page that anchors the linked list of pages. // The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; } Page* anchor() { return &anchor_; }
...@@ -2104,13 +2094,6 @@ class PagedSpace : public Space { ...@@ -2104,13 +2094,6 @@ class PagedSpace : public Space {
static void ResetCodeStatistics(Isolate* isolate); static void ResetCodeStatistics(Isolate* isolate);
#endif #endif
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
static bool ShouldBeSweptBySweeperThreads(Page* p) {
return !p->IsEvacuationCandidate() &&
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
}
// This function tries to steal size_in_bytes memory from the sweeper threads // This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait // free-lists. If it does not succeed stealing enough memory, it will wait
// for the sweeper threads to finish sweeping. // for the sweeper threads to finish sweeping.
......
...@@ -476,7 +476,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { ...@@ -476,7 +476,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
Page* page = reinterpret_cast<Page*>(chunk); Page* page = reinterpret_cast<Page*>(chunk);
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
if (owner == heap_->map_space()) { if (owner == heap_->map_space()) {
DCHECK(page->WasSwept()); DCHECK(page->SweepingDone());
HeapObjectIterator iterator(page); HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) { heap_object = iterator.Next()) {
......
...@@ -5571,33 +5571,6 @@ TEST(Regress507979) { ...@@ -5571,33 +5571,6 @@ TEST(Regress507979) {
} }
TEST(ArrayShiftSweeping) {
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
v8::Local<v8::Value> result = CompileRun(
"var array = new Array(400);"
"var tmp = new Array(1000);"
"array[0] = 10;"
"gc();"
"gc();"
"array.shift();"
"array;");
Handle<JSObject> o = Handle<JSObject>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result)));
CHECK(heap->InOldSpace(o->elements()));
CHECK(heap->InOldSpace(*o));
Page* page = Page::FromAddress(o->elements()->address());
CHECK(page->parallel_sweeping_state().Value() <=
MemoryChunk::kSweepingFinalize ||
Marking::IsBlack(Marking::MarkBitFrom(o->elements())));
}
UNINITIALIZED_TEST(PromotionQueue) { UNINITIALIZED_TEST(PromotionQueue) {
i::FLAG_expose_gc = true; i::FLAG_expose_gc = true;
i::FLAG_max_semi_space_size = 2 * (Page::kPageSize / MB); i::FLAG_max_semi_space_size = 2 * (Page::kPageSize / MB);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment