Commit 20107bf2 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Remove lazy sweeping.

BUG=
R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/254603002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20966 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 2dd4b5f9
...@@ -565,8 +565,6 @@ DEFINE_bool(native_code_counters, false, ...@@ -565,8 +565,6 @@ DEFINE_bool(native_code_counters, false,
// mark-compact.cc // mark-compact.cc
DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
DEFINE_bool(lazy_sweeping, true,
"Use lazy sweeping for old pointer and data spaces")
DEFINE_bool(never_compact, false, DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only") "Never perform compaction on full GC - testing only")
DEFINE_bool(compact_code_space, true, DEFINE_bool(compact_code_space, true,
......
...@@ -4600,17 +4600,8 @@ bool Heap::IdleNotification(int hint) { ...@@ -4600,17 +4600,8 @@ bool Heap::IdleNotification(int hint) {
// An incremental GC progresses as follows: // An incremental GC progresses as follows:
// 1. many incremental marking steps, // 1. many incremental marking steps,
// 2. one old space mark-sweep-compact, // 2. one old space mark-sweep-compact,
// 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round. // Use mark-sweep-compact events to count incremental GCs in a round.
if (incremental_marking()->IsStopped()) {
if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
!IsSweepingComplete() &&
!AdvanceSweepers(static_cast<int>(step_size))) {
return false;
}
}
if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
if (EnoughGarbageSinceLastIdleRound()) { if (EnoughGarbageSinceLastIdleRound()) {
StartIdleRound(); StartIdleRound();
...@@ -5334,14 +5325,6 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() { ...@@ -5334,14 +5325,6 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
} }
bool Heap::AdvanceSweepers(int step_size) {
ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated());
bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
return sweeping_complete;
}
int64_t Heap::PromotedExternalMemorySize() { int64_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_ if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0; <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
......
...@@ -1553,14 +1553,6 @@ class Heap { ...@@ -1553,14 +1553,6 @@ class Heap {
return &incremental_marking_; return &incremental_marking_;
} }
bool IsSweepingComplete() {
return !mark_compact_collector()->IsConcurrentSweepingInProgress() &&
old_data_space()->IsLazySweepingComplete() &&
old_pointer_space()->IsLazySweepingComplete();
}
bool AdvanceSweepers(int step_size);
bool EnsureSweepersProgressed(int step_size) { bool EnsureSweepersProgressed(int step_size) {
bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size); bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size);
sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size); sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size);
......
...@@ -565,7 +565,7 @@ void IncrementalMarking::Start(CompactionFlag flag) { ...@@ -565,7 +565,7 @@ void IncrementalMarking::Start(CompactionFlag flag) {
ResetStepCounters(); ResetStepCounters();
if (heap_->IsSweepingComplete()) { if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
StartMarking(flag); StartMarking(flag);
} else { } else {
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
......
...@@ -4152,7 +4152,6 @@ void MarkCompactCollector::SweepInParallel(PagedSpace* space) { ...@@ -4152,7 +4152,6 @@ void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_was_swept_conservatively(sweeper == CONSERVATIVE || space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
sweeper == LAZY_CONSERVATIVE ||
sweeper == PARALLEL_CONSERVATIVE || sweeper == PARALLEL_CONSERVATIVE ||
sweeper == CONCURRENT_CONSERVATIVE); sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats(); space->ClearStats();
...@@ -4160,7 +4159,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { ...@@ -4160,7 +4159,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PageIterator it(space); PageIterator it(space);
int pages_swept = 0; int pages_swept = 0;
bool lazy_sweeping_active = false;
bool unused_page_present = false; bool unused_page_present = false;
bool parallel_sweeping_active = false; bool parallel_sweeping_active = false;
...@@ -4206,25 +4204,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { ...@@ -4206,25 +4204,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
pages_swept++; pages_swept++;
break; break;
} }
case LAZY_CONSERVATIVE: {
if (lazy_sweeping_active) {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
reinterpret_cast<intptr_t>(p));
}
space->IncreaseUnsweptFreeBytes(p);
} else {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
pages_swept++;
space->SetPagesToSweep(p->next_page());
lazy_sweeping_active = true;
}
break;
}
case CONCURRENT_CONSERVATIVE: case CONCURRENT_CONSERVATIVE:
case PARALLEL_CONSERVATIVE: { case PARALLEL_CONSERVATIVE: {
if (!parallel_sweeping_active) { if (!parallel_sweeping_active) {
...@@ -4285,8 +4264,7 @@ void MarkCompactCollector::SweepSpaces() { ...@@ -4285,8 +4264,7 @@ void MarkCompactCollector::SweepSpaces() {
#ifdef DEBUG #ifdef DEBUG
state_ = SWEEP_SPACES; state_ = SWEEP_SPACES;
#endif #endif
SweeperType how_to_sweep = SweeperType how_to_sweep = CONSERVATIVE;
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
if (AreSweeperThreadsActivated()) { if (AreSweeperThreadsActivated()) {
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
......
...@@ -594,7 +594,6 @@ class MarkCompactCollector { ...@@ -594,7 +594,6 @@ class MarkCompactCollector {
enum SweeperType { enum SweeperType {
CONSERVATIVE, CONSERVATIVE,
LAZY_CONSERVATIVE,
PARALLEL_CONSERVATIVE, PARALLEL_CONSERVATIVE,
CONCURRENT_CONSERVATIVE, CONCURRENT_CONSERVATIVE,
PRECISE PRECISE
......
...@@ -953,7 +953,6 @@ PagedSpace::PagedSpace(Heap* heap, ...@@ -953,7 +953,6 @@ PagedSpace::PagedSpace(Heap* heap,
: Space(heap, id, executable), : Space(heap, id, executable),
free_list_(this), free_list_(this),
was_swept_conservatively_(false), was_swept_conservatively_(false),
first_unswept_page_(Page::FromAddress(NULL)),
unswept_free_bytes_(0) { unswept_free_bytes_(0) {
if (id == CODE_SPACE) { if (id == CODE_SPACE) {
area_size_ = heap->isolate()->memory_allocator()-> area_size_ = heap->isolate()->memory_allocator()->
...@@ -1131,14 +1130,6 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) { ...@@ -1131,14 +1130,6 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
ASSERT(page->LiveBytes() == 0); ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size()); ASSERT(AreaSize() == page->area_size());
// Adjust list of unswept pages if the page is the head of the list.
if (first_unswept_page_ == page) {
first_unswept_page_ = page->next_page();
if (first_unswept_page_ == anchor()) {
first_unswept_page_ = Page::FromAddress(NULL);
}
}
if (page->WasSwept()) { if (page->WasSwept()) {
intptr_t size = free_list_.EvictFreeListItems(page); intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size); accounting_stats_.AllocateBytes(size);
...@@ -2555,24 +2546,8 @@ void PagedSpace::PrepareForMarkCompact() { ...@@ -2555,24 +2546,8 @@ void PagedSpace::PrepareForMarkCompact() {
// on the first allocation after the sweep. // on the first allocation after the sweep.
EmptyAllocationInfo(); EmptyAllocationInfo();
// Stop lazy sweeping and clear marking bits for unswept pages. // This counter will be increased for pages which will be swept by the
if (first_unswept_page_ != NULL) { // sweeper threads.
Page* p = first_unswept_page_;
do {
// Do not use ShouldBeSweptLazily predicate here.
// New evacuation candidates were selected but they still have
// to be swept before collection starts.
if (!p->WasSwept()) {
Bitmap::Clear(p);
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
reinterpret_cast<intptr_t>(p));
}
}
p = p->next_page();
} while (p != anchor());
}
first_unswept_page_ = Page::FromAddress(NULL);
unswept_free_bytes_ = 0; unswept_free_bytes_ = 0;
// Clear the free list before a full GC---it will be rebuilt afterward. // Clear the free list before a full GC---it will be rebuilt afterward.
...@@ -2581,7 +2556,8 @@ void PagedSpace::PrepareForMarkCompact() { ...@@ -2581,7 +2556,8 @@ void PagedSpace::PrepareForMarkCompact() {
intptr_t PagedSpace::SizeOfObjects() { intptr_t PagedSpace::SizeOfObjects() {
ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0)); ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
(unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top()); return Size() - unswept_free_bytes_ - (limit() - top());
} }
...@@ -2595,39 +2571,6 @@ void PagedSpace::RepairFreeListsAfterBoot() { ...@@ -2595,39 +2571,6 @@ void PagedSpace::RepairFreeListsAfterBoot() {
} }
bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
if (IsLazySweepingComplete()) return true;
intptr_t freed_bytes = 0;
Page* p = first_unswept_page_;
do {
Page* next_page = p->next_page();
if (ShouldBeSweptLazily(p)) {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
reinterpret_cast<intptr_t>(p));
}
DecreaseUnsweptFreeBytes(p);
freed_bytes +=
MarkCompactCollector::
SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
this, NULL, p);
}
p = next_page;
} while (p != anchor() && freed_bytes < bytes_to_sweep);
if (p == anchor()) {
first_unswept_page_ = Page::FromAddress(NULL);
} else {
first_unswept_page_ = p;
}
heap()->FreeQueuedChunks();
return IsLazySweepingComplete();
}
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
if (allocation_info_.top() >= allocation_info_.limit()) return; if (allocation_info_.top() >= allocation_info_.limit()) return;
...@@ -2656,17 +2599,15 @@ bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { ...@@ -2656,17 +2599,15 @@ bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
} }
return false; return false;
} }
return true;
} else {
return AdvanceSweeper(size_in_bytes);
} }
return true;
} }
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed. // Allocation in this space has failed.
// If there are unswept pages advance lazy sweeper a bounded number of times // If there are unswept pages advance sweeping a bounded number of times
// until we find a size_in_bytes contiguous piece of memory // until we find a size_in_bytes contiguous piece of memory
const int kMaxSweepingTries = 5; const int kMaxSweepingTries = 5;
bool sweeping_complete = false; bool sweeping_complete = false;
...@@ -2693,10 +2634,9 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -2693,10 +2634,9 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
return free_list_.Allocate(size_in_bytes); return free_list_.Allocate(size_in_bytes);
} }
// Last ditch, sweep all the remaining pages to try to find space. This may // Last ditch, sweep all the remaining pages to try to find space.
// cause a pause. if (heap()->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
if (!IsLazySweepingComplete()) { heap()->mark_compact_collector()->WaitUntilSweepingCompleted();
EnsureSweeperProgress(kMaxInt);
// Retry the free list allocation. // Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes); HeapObject* object = free_list_.Allocate(size_in_bytes);
......
...@@ -1783,8 +1783,9 @@ class PagedSpace : public Space { ...@@ -1783,8 +1783,9 @@ class PagedSpace : public Space {
intptr_t Available() { return free_list_.available(); } intptr_t Available() { return free_list_.available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to // Allocated bytes in this space. Garbage bytes that were not found due to
// lazy sweeping are counted as being allocated! The bytes in the current // concurrent sweeping are counted as being allocated! The bytes in the
// linear allocation area (between top and limit) are also counted here. // current linear allocation area (between top and limit) are also counted
// here.
virtual intptr_t Size() { return accounting_stats_.Size(); } virtual intptr_t Size() { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes // As size, but the bytes in lazily swept pages are estimated and the bytes
...@@ -1885,24 +1886,18 @@ class PagedSpace : public Space { ...@@ -1885,24 +1886,18 @@ class PagedSpace : public Space {
// Evacuation candidates are swept by evacuator. Needs to return a valid // Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished. // result before _and_ after evacuation has finished.
static bool ShouldBeSweptLazily(Page* p) { static bool ShouldBeSweptBySweeperThreads(Page* p) {
return !p->IsEvacuationCandidate() && return !p->IsEvacuationCandidate() &&
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
!p->WasSweptPrecisely(); !p->WasSweptPrecisely();
} }
void SetPagesToSweep(Page* first) {
ASSERT(unswept_free_bytes_ == 0);
if (first == &anchor_) first = NULL;
first_unswept_page_ = first;
}
void IncrementUnsweptFreeBytes(intptr_t by) { void IncrementUnsweptFreeBytes(intptr_t by) {
unswept_free_bytes_ += by; unswept_free_bytes_ += by;
} }
void IncreaseUnsweptFreeBytes(Page* p) { void IncreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p)); ASSERT(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
} }
...@@ -1911,7 +1906,7 @@ class PagedSpace : public Space { ...@@ -1911,7 +1906,7 @@ class PagedSpace : public Space {
} }
void DecreaseUnsweptFreeBytes(Page* p) { void DecreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p)); ASSERT(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
} }
...@@ -1919,17 +1914,12 @@ class PagedSpace : public Space { ...@@ -1919,17 +1914,12 @@ class PagedSpace : public Space {
unswept_free_bytes_ = 0; unswept_free_bytes_ = 0;
} }
bool AdvanceSweeper(intptr_t bytes_to_sweep); // This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait
// When parallel sweeper threads are active and the main thread finished // for the sweeper threads to finish sweeping.
// its sweeping phase, this function waits for them to complete, otherwise // It returns true when sweeping is completed and false otherwise.
// AdvanceSweeper with size_in_bytes is called.
bool EnsureSweeperProgress(intptr_t size_in_bytes); bool EnsureSweeperProgress(intptr_t size_in_bytes);
bool IsLazySweepingComplete() {
return !first_unswept_page_->is_valid();
}
Page* FirstPage() { return anchor_.next_page(); } Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); } Page* LastPage() { return anchor_.prev_page(); }
...@@ -1969,13 +1959,9 @@ class PagedSpace : public Space { ...@@ -1969,13 +1959,9 @@ class PagedSpace : public Space {
bool was_swept_conservatively_; bool was_swept_conservatively_;
// The first page to be swept when the lazy sweeper advances. Is set
// to NULL when all pages have been swept.
Page* first_unswept_page_;
// The number of free bytes which could be reclaimed by advancing the // The number of free bytes which could be reclaimed by advancing the
// lazy sweeper. This is only an estimation because lazy sweeping is // concurrent sweeper threads. This is only an estimation because concurrent
// done conservatively. // sweeping is done conservatively.
intptr_t unswept_free_bytes_; intptr_t unswept_free_bytes_;
// Expands the space by allocating a fixed number of pages. Returns false if // Expands the space by allocating a fixed number of pages. Returns false if
......
...@@ -508,9 +508,9 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion( ...@@ -508,9 +508,9 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
// This function iterates over all the pointers in a paged space in the heap, // This function iterates over all the pointers in a paged space in the heap,
// looking for pointers into new space. Within the pages there may be dead // looking for pointers into new space. Within the pages there may be dead
// objects that have not been overwritten by free spaces or fillers because of // objects that have not been overwritten by free spaces or fillers because of
// lazy sweeping. These dead objects may not contain pointers to new space. // concurrent sweeping. These dead objects may not contain pointers to new
// The garbage areas that have been swept properly (these will normally be the // space. The garbage areas that have been swept properly (these will normally
// large ones) will be marked with free space and filler map words. In // be the large ones) will be marked with free space and filler map words. In
// addition any area that has never been used at all for object allocation must // addition any area that has never been used at all for object allocation must
// be marked with a free space or filler. Because the free space and filler // be marked with a free space or filler. Because the free space and filler
// maps do not move we can always recognize these even after a compaction. // maps do not move we can always recognize these even after a compaction.
......
...@@ -1606,12 +1606,16 @@ TEST(TestSizeOfObjects) { ...@@ -1606,12 +1606,16 @@ TEST(TestSizeOfObjects) {
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags); CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags); CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags); CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(CcTest::heap()->old_pointer_space()->IsLazySweepingComplete()); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->IsConcurrentSweepingInProgress()) {
collector->WaitUntilSweepingCompleted();
}
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects()); int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
{ {
// Allocate objects on several different old-space pages so that // Allocate objects on several different old-space pages so that
// lazy sweeping kicks in for subsequent GC runs. // concurrent sweeper threads will be busy sweeping the old space on
// subsequent GC runs.
AlwaysAllocateScope always_allocate(CcTest::i_isolate()); AlwaysAllocateScope always_allocate(CcTest::i_isolate());
int filler_size = static_cast<int>(FixedArray::SizeFor(8192)); int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
for (int i = 1; i <= 100; i++) { for (int i = 1; i <= 100; i++) {
...@@ -1629,11 +1633,11 @@ TEST(TestSizeOfObjects) { ...@@ -1629,11 +1633,11 @@ TEST(TestSizeOfObjects) {
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects())); CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
// Advancing the sweeper step-wise should not change the heap size. // Waiting for sweeper threads should not change heap size.
while (!CcTest::heap()->old_pointer_space()->IsLazySweepingComplete()) { if (collector->IsConcurrentSweepingInProgress()) {
CcTest::heap()->old_pointer_space()->AdvanceSweeper(KB); collector->WaitUntilSweepingCompleted();
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
} }
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
} }
...@@ -4199,11 +4203,8 @@ TEST(Regress357137) { ...@@ -4199,11 +4203,8 @@ TEST(Regress357137) {
} }
TEST(ArrayShiftLazySweeping) { TEST(ArrayShiftSweeping) {
i::FLAG_expose_gc = true; i::FLAG_expose_gc = true;
i::FLAG_parallel_sweeping = false;
i::FLAG_concurrent_sweeping = false;
i::FLAG_lazy_sweeping = true;
CcTest::InitializeVM(); CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate()); v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
......
...@@ -27,6 +27,10 @@ ...@@ -27,6 +27,10 @@
// Regression test of a very rare corner case where left-trimming an // Regression test of a very rare corner case where left-trimming an
// array caused invalid marking bit patterns on lazily swept pages. // array caused invalid marking bit patterns on lazily swept pages.
//
// Lazy sweeping was deprecated. We are keeping the test case to make
// sure that concurrent sweeping, which relies on similar assumptions
// as lazy sweeping works correctly.
// Flags: --expose-gc --noincremental-marking --max-new-space-size 1000 // Flags: --expose-gc --noincremental-marking --max-new-space-size 1000
...@@ -34,7 +38,7 @@ ...@@ -34,7 +38,7 @@
var head = new Array(1); var head = new Array(1);
var tail = head; var tail = head;
// Fill heap to increase old-space size and trigger lazy sweeping on // Fill heap to increase old-space size and trigger concurrent sweeping on
// some of the old-space pages. // some of the old-space pages.
for (var i = 0; i < 200; i++) { for (var i = 0; i < 200; i++) {
tail[1] = new Array(1000); tail[1] = new Array(1000);
...@@ -44,7 +48,7 @@ ...@@ -44,7 +48,7 @@
gc(); gc(); gc(); gc();
// At this point "array" should have been promoted to old-space and be // At this point "array" should have been promoted to old-space and be
// located in a lazy swept page with intact marking bits. Now shift // located in a concurrently swept page with intact marking bits. Now shift
// the array to trigger left-trimming operations. // the array to trigger left-trimming operations.
assertEquals(100, array.length); assertEquals(100, array.length);
for (var i = 0; i < 50; i++) { for (var i = 0; i < 50; i++) {
...@@ -54,7 +58,7 @@ ...@@ -54,7 +58,7 @@
// At this point "array" should have been trimmed from the left with // At this point "array" should have been trimmed from the left with
// marking bits being correctly transfered to the new object start. // marking bits being correctly transfered to the new object start.
// Scavenging operations cause lazy sweeping to advance and verify // Scavenging operations cause concurrent sweeping to advance and verify
// that marking bit patterns are still sane. // that marking bit patterns are still sane.
for (var i = 0; i < 200; i++) { for (var i = 0; i < 200; i++) {
tail[1] = new Array(1000); tail[1] = new Array(1000);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment