Commit 1b595838 authored by dslomov@chromium.org's avatar dslomov@chromium.org

Revert "Remove sequential sweeping mode and perform lazy sweeping when no...

Revert "Remove sequential sweeping mode and perform lazy sweeping when no sweeper threads are active."

This reverts commit r22346 for breaking GC stress tests.

TBR=hpayer@chromium.org

Review URL: https://codereview.chromium.org/386943003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22349 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4db1f680
......@@ -3312,7 +3312,8 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
// pages is set after sweeping all pages.
return (!is_in_old_pointer_space && !is_in_old_data_space) ||
page->WasSwept() ||
(page->parallel_sweeping() <=
(mark_compact_collector()->AreSweeperThreadsActivated() &&
page->parallel_sweeping() <=
MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
}
......@@ -4338,8 +4339,8 @@ bool Heap::IdleNotification(int hint) {
// If the IdleNotifcation is called with a large hint we will wait for
// the sweepter threads here.
if (hint >= kMinHintForFullGC &&
mark_compact_collector()->sweeping_in_progress()) {
mark_compact_collector()->EnsureSweepingCompleted();
mark_compact_collector()->IsConcurrentSweepingInProgress()) {
mark_compact_collector()->WaitUntilSweepingCompleted();
}
return false;
......
......@@ -536,7 +536,7 @@ void IncrementalMarking::Start(CompactionFlag flag) {
ResetStepCounters();
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
......@@ -883,11 +883,11 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
}
if (state_ == SWEEPING) {
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
if (heap_->mark_compact_collector()->IsConcurrentSweepingInProgress() &&
heap_->mark_compact_collector()->IsSweepingCompleted()) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
heap_->mark_compact_collector()->WaitUntilSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION);
}
......
......@@ -1558,8 +1558,8 @@ void Isolate::Deinit() {
sweeper_thread_ = NULL;
if (FLAG_job_based_sweeping &&
heap_.mark_compact_collector()->sweeping_in_progress()) {
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
}
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
......
......@@ -45,7 +45,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
marking_parity_(ODD_MARKING_PARITY),
compacting_(false),
was_marked_incrementally_(false),
sweeping_in_progress_(false),
sweeping_pending_(false),
pending_sweeper_jobs_semaphore_(0),
sequential_sweeping_(false),
tracer_(NULL),
......@@ -573,7 +573,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
void MarkCompactCollector::StartSweeperThreads() {
ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
ASSERT(free_list_old_data_space_.get()->IsEmpty());
sweeping_in_progress_ = true;
sweeping_pending_ = true;
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
......@@ -588,17 +588,8 @@ void MarkCompactCollector::StartSweeperThreads() {
}
void MarkCompactCollector::EnsureSweepingCompleted() {
ASSERT(sweeping_in_progress_ == true);
// If sweeping is not completed, we try to complete it here. If we do not
// have sweeper threads we have to complete since we do not have a good
// indicator for a swept space in that case.
if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
}
void MarkCompactCollector::WaitUntilSweepingCompleted() {
ASSERT(sweeping_pending_ == true);
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
......@@ -608,7 +599,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
pending_sweeper_jobs_semaphore_.Wait();
}
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
sweeping_pending_ = false;
RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
......@@ -622,7 +613,6 @@ bool MarkCompactCollector::IsSweepingCompleted() {
return false;
}
}
if (FLAG_job_based_sweeping) {
if (!pending_sweeper_jobs_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
......@@ -630,7 +620,6 @@ bool MarkCompactCollector::IsSweepingCompleted() {
}
pending_sweeper_jobs_semaphore_.Signal();
}
return true;
}
......@@ -659,6 +648,12 @@ bool MarkCompactCollector::AreSweeperThreadsActivated() {
}
bool MarkCompactCollector::IsConcurrentSweepingInProgress(PagedSpace* space) {
return (space == NULL || space->is_swept_concurrently()) &&
sweeping_pending_;
}
void Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
......@@ -964,9 +959,9 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
if (sweeping_in_progress()) {
if (IsConcurrentSweepingInProgress()) {
// Instead of waiting we could also abort the sweeper threads here.
EnsureSweepingCompleted();
WaitUntilSweepingCompleted();
}
// Clear marking bits if incremental marking is aborted.
......@@ -4102,6 +4097,7 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space,
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_is_iterable(sweeper == PRECISE);
space->set_is_swept_concurrently(sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page
......@@ -4146,6 +4142,15 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
}
switch (sweeper) {
case CONSERVATIVE: {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
pages_swept++;
break;
}
case CONCURRENT_CONSERVATIVE:
case PARALLEL_CONSERVATIVE: {
if (!parallel_sweeping_active) {
......@@ -4207,10 +4212,11 @@ void MarkCompactCollector::SweepSpaces() {
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
SweeperType how_to_sweep = CONSERVATIVE;
if (AreSweeperThreadsActivated()) {
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
}
if (sweep_precisely_) how_to_sweep = PRECISE;
MoveEvacuationCandidatesToEndOfPagesList();
......@@ -4232,7 +4238,7 @@ void MarkCompactCollector::SweepSpaces() {
}
if (how_to_sweep == PARALLEL_CONSERVATIVE) {
EnsureSweepingCompleted();
WaitUntilSweepingCompleted();
}
}
RemoveDeadInvalidatedCode();
......
......@@ -570,6 +570,7 @@ class MarkCompactCollector {
void EnableCodeFlushing(bool enable);
enum SweeperType {
CONSERVATIVE,
PARALLEL_CONSERVATIVE,
CONCURRENT_CONSERVATIVE,
PRECISE
......@@ -664,19 +665,18 @@ class MarkCompactCollector {
// then the whole given space is swept.
int SweepInParallel(PagedSpace* space, int required_freed_bytes);
void EnsureSweepingCompleted();
void WaitUntilSweepingCompleted();
// If sweeper threads are not active this method will return true. If
// this is a latency issue we should be smarter here. Otherwise, it will
// return true if the sweeper threads are done processing the pages.
bool IsSweepingCompleted();
void RefillFreeList(PagedSpace* space);
bool AreSweeperThreadsActivated();
// Checks if sweeping is in progress right now on any space.
bool sweeping_in_progress() { return sweeping_in_progress_; }
// If a paged space is passed in, this method checks if the given space is
// swept concurrently. Otherwise, this method checks if concurrent sweeping
// is in progress right now on any space.
bool IsConcurrentSweepingInProgress(PagedSpace* space = NULL);
void set_sequential_sweeping(bool sequential_sweeping) {
sequential_sweeping_ = sequential_sweeping;
......@@ -739,7 +739,7 @@ class MarkCompactCollector {
bool was_marked_incrementally_;
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_in_progress_;
bool sweeping_pending_;
base::Semaphore pending_sweeper_jobs_semaphore_;
......
......@@ -936,6 +936,7 @@ PagedSpace::PagedSpace(Heap* heap,
: Space(heap, id, executable),
free_list_(this),
is_iterable_(true),
is_swept_concurrently_(false),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) {
......@@ -2546,8 +2547,8 @@ void PagedSpace::PrepareForMarkCompact() {
intptr_t PagedSpace::SizeOfObjects() {
ASSERT(heap()->mark_compact_collector()->sweeping_in_progress() ||
(unswept_free_bytes_ == 0));
ASSERT(heap()->mark_compact_collector()->
IsConcurrentSweepingInProgress(this) || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
......@@ -2577,12 +2578,24 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
}
HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
HeapObject* PagedSpace::EnsureSweepingProgress(
int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
if (collector->IsConcurrentSweepingInProgress(this)) {
// If sweeping is still in progress try to sweep pages on the main thread.
int free_chunk =
collector->SweepInParallel(this, size_in_bytes);
if (free_chunk >= size_in_bytes) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// We should be able to allocate an object here since we just freed that
// much memory.
ASSERT(object != NULL);
if (object != NULL) return object;
}
// Wait for the sweeper threads here and complete the sweeping phase.
collector->EnsureSweepingCompleted();
collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
// entries.
......@@ -2595,28 +2608,14 @@ HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
// If sweeper threads are active, try to re-fill the free-lists.
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
if (collector->IsConcurrentSweepingInProgress(this)) {
collector->RefillFreeList(this);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
int free_chunk =
collector->SweepInParallel(this, size_in_bytes);
collector->RefillFreeList(this);
if (free_chunk >= size_in_bytes) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// We should be able to allocate an object here since we just freed that
// much memory.
ASSERT(object != NULL);
if (object != NULL) return object;
}
}
// Free list allocation failed and there is no next page. Fail if we have
......@@ -2626,7 +2625,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
&& heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
HeapObject* object = EnsureSweepingProgress(size_in_bytes);
if (object != NULL) return object;
}
......@@ -2639,7 +2638,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
return EnsureSweepingProgress(size_in_bytes);
}
......
......@@ -1905,6 +1905,9 @@ class PagedSpace : public Space {
bool is_iterable() { return is_iterable_; }
void set_is_iterable(bool b) { is_iterable_ = b; }
bool is_swept_concurrently() { return is_swept_concurrently_; }
void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; }
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
static bool ShouldBeSweptBySweeperThreads(Page* p) {
......@@ -1989,6 +1992,9 @@ class PagedSpace : public Space {
// This space was swept precisely, hence it is iterable.
bool is_iterable_;
// This space is currently swept by sweeper threads.
bool is_swept_concurrently_;
// The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because concurrent
// sweeping is done conservatively.
......@@ -2011,8 +2017,7 @@ class PagedSpace : public Space {
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
int size_in_bytes);
MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
......
......@@ -44,8 +44,8 @@ using namespace v8::internal;
static void SimulateIncrementalMarking() {
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
if (collector->IsConcurrentSweepingInProgress()) {
collector->WaitUntilSweepingCompleted();
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
......@@ -1595,8 +1595,8 @@ TEST(TestSizeOfObjects) {
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
if (collector->IsConcurrentSweepingInProgress()) {
collector->WaitUntilSweepingCompleted();
}
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
......@@ -1622,8 +1622,8 @@ TEST(TestSizeOfObjects) {
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
// Waiting for sweeper threads should not change heap size.
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
if (collector->IsConcurrentSweepingInProgress()) {
collector->WaitUntilSweepingCompleted();
}
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment