Commit f28f6102 authored by titzer@chromium.org's avatar titzer@chromium.org

Revert "Remove sequential sweeping mode and perform lazy sweeping when no...

Revert "Remove sequential sweeping mode and perform lazy sweeping when no sweeper threads are active."

Reason: broke win64 build

This reverts commit 221bfdd2da2b6f3c1cbe77c5d197f1ea626b0bd2.

TBR=hpayer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/393523002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22374 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f2536bf7
...@@ -3312,8 +3312,9 @@ bool Heap::CanMoveObjectStart(HeapObject* object) { ...@@ -3312,8 +3312,9 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
// pages is set after sweeping all pages. // pages is set after sweeping all pages.
return (!is_in_old_pointer_space && !is_in_old_data_space) || return (!is_in_old_pointer_space && !is_in_old_data_space) ||
page->WasSwept() || page->WasSwept() ||
(page->parallel_sweeping() <= (mark_compact_collector()->AreSweeperThreadsActivated() &&
MemoryChunk::PARALLEL_SWEEPING_FINALIZE); page->parallel_sweeping() <=
MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
} }
...@@ -4338,8 +4339,8 @@ bool Heap::IdleNotification(int hint) { ...@@ -4338,8 +4339,8 @@ bool Heap::IdleNotification(int hint) {
// If the IdleNotifcation is called with a large hint we will wait for // If the IdleNotifcation is called with a large hint we will wait for
// the sweepter threads here. // the sweepter threads here.
if (hint >= kMinHintForFullGC && if (hint >= kMinHintForFullGC &&
mark_compact_collector()->sweeping_in_progress()) { mark_compact_collector()->IsConcurrentSweepingInProgress()) {
mark_compact_collector()->EnsureSweepingCompleted(); mark_compact_collector()->WaitUntilSweepingCompleted();
} }
return false; return false;
......
...@@ -536,7 +536,7 @@ void IncrementalMarking::Start(CompactionFlag flag) { ...@@ -536,7 +536,7 @@ void IncrementalMarking::Start(CompactionFlag flag) {
ResetStepCounters(); ResetStepCounters();
if (!heap_->mark_compact_collector()->sweeping_in_progress()) { if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
StartMarking(flag); StartMarking(flag);
} else { } else {
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
...@@ -883,11 +883,11 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, ...@@ -883,11 +883,11 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
} }
if (state_ == SWEEPING) { if (state_ == SWEEPING) {
if (heap_->mark_compact_collector()->sweeping_in_progress() && if (heap_->mark_compact_collector()->IsConcurrentSweepingInProgress() &&
heap_->mark_compact_collector()->IsSweepingCompleted()) { heap_->mark_compact_collector()->IsSweepingCompleted()) {
heap_->mark_compact_collector()->EnsureSweepingCompleted(); heap_->mark_compact_collector()->WaitUntilSweepingCompleted();
} }
if (!heap_->mark_compact_collector()->sweeping_in_progress()) { if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
bytes_scanned_ = 0; bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION); StartMarking(PREVENT_COMPACTION);
} }
......
...@@ -1559,8 +1559,8 @@ void Isolate::Deinit() { ...@@ -1559,8 +1559,8 @@ void Isolate::Deinit() {
sweeper_thread_ = NULL; sweeper_thread_ = NULL;
if (FLAG_job_based_sweeping && if (FLAG_job_based_sweeping &&
heap_.mark_compact_collector()->sweeping_in_progress()) { heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
heap_.mark_compact_collector()->EnsureSweepingCompleted(); heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
} }
if (FLAG_hydrogen_stats) GetHStatistics()->Print(); if (FLAG_hydrogen_stats) GetHStatistics()->Print();
......
...@@ -45,7 +45,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT ...@@ -45,7 +45,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
marking_parity_(ODD_MARKING_PARITY), marking_parity_(ODD_MARKING_PARITY),
compacting_(false), compacting_(false),
was_marked_incrementally_(false), was_marked_incrementally_(false),
sweeping_in_progress_(false), sweeping_pending_(false),
pending_sweeper_jobs_semaphore_(0), pending_sweeper_jobs_semaphore_(0),
sequential_sweeping_(false), sequential_sweeping_(false),
tracer_(NULL), tracer_(NULL),
...@@ -573,7 +573,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task { ...@@ -573,7 +573,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
void MarkCompactCollector::StartSweeperThreads() { void MarkCompactCollector::StartSweeperThreads() {
ASSERT(free_list_old_pointer_space_.get()->IsEmpty()); ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
ASSERT(free_list_old_data_space_.get()->IsEmpty()); ASSERT(free_list_old_data_space_.get()->IsEmpty());
sweeping_in_progress_ = true; sweeping_pending_ = true;
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping(); isolate()->sweeper_threads()[i]->StartSweeping();
} }
...@@ -588,17 +588,8 @@ void MarkCompactCollector::StartSweeperThreads() { ...@@ -588,17 +588,8 @@ void MarkCompactCollector::StartSweeperThreads() {
} }
void MarkCompactCollector::EnsureSweepingCompleted() { void MarkCompactCollector::WaitUntilSweepingCompleted() {
ASSERT(sweeping_in_progress_ == true); ASSERT(sweeping_pending_ == true);
// If sweeping is not completed, we try to complete it here. If we do not
// have sweeper threads we have to complete since we do not have a good
// indicator for a swept space in that case.
if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
}
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread(); isolate()->sweeper_threads()[i]->WaitForSweeperThread();
} }
...@@ -608,7 +599,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() { ...@@ -608,7 +599,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
pending_sweeper_jobs_semaphore_.Wait(); pending_sweeper_jobs_semaphore_.Wait();
} }
ParallelSweepSpacesComplete(); ParallelSweepSpacesComplete();
sweeping_in_progress_ = false; sweeping_pending_ = false;
RefillFreeList(heap()->paged_space(OLD_DATA_SPACE)); RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE)); RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
...@@ -622,7 +613,6 @@ bool MarkCompactCollector::IsSweepingCompleted() { ...@@ -622,7 +613,6 @@ bool MarkCompactCollector::IsSweepingCompleted() {
return false; return false;
} }
} }
if (FLAG_job_based_sweeping) { if (FLAG_job_based_sweeping) {
if (!pending_sweeper_jobs_semaphore_.WaitFor( if (!pending_sweeper_jobs_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) { base::TimeDelta::FromSeconds(0))) {
...@@ -630,7 +620,6 @@ bool MarkCompactCollector::IsSweepingCompleted() { ...@@ -630,7 +620,6 @@ bool MarkCompactCollector::IsSweepingCompleted() {
} }
pending_sweeper_jobs_semaphore_.Signal(); pending_sweeper_jobs_semaphore_.Signal();
} }
return true; return true;
} }
...@@ -659,6 +648,12 @@ bool MarkCompactCollector::AreSweeperThreadsActivated() { ...@@ -659,6 +648,12 @@ bool MarkCompactCollector::AreSweeperThreadsActivated() {
} }
bool MarkCompactCollector::IsConcurrentSweepingInProgress(PagedSpace* space) {
return (space == NULL || space->is_swept_concurrently()) &&
sweeping_pending_;
}
void Marking::TransferMark(Address old_start, Address new_start) { void Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object. // This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) == ASSERT(MemoryChunk::FromAddress(old_start) ==
...@@ -964,9 +959,9 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { ...@@ -964,9 +959,9 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(!FLAG_never_compact || !FLAG_always_compact); ASSERT(!FLAG_never_compact || !FLAG_always_compact);
if (sweeping_in_progress()) { if (IsConcurrentSweepingInProgress()) {
// Instead of waiting we could also abort the sweeper threads here. // Instead of waiting we could also abort the sweeper threads here.
EnsureSweepingCompleted(); WaitUntilSweepingCompleted();
} }
// Clear marking bits if incremental marking is aborted. // Clear marking bits if incremental marking is aborted.
...@@ -4011,7 +4006,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, ...@@ -4011,7 +4006,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
static_cast<int>(size)); static_cast<int>(size));
max_freed_bytes = Max(freed_bytes, max_freed_bytes); max_freed_bytes = Max(freed_bytes, max_freed_bytes);
ASSERT_EQ(0, p->LiveBytes()); ASSERT_EQ(0, p->LiveBytes());
return free_list->GuaranteedAllocatable(max_freed_bytes); return freed_bytes;
} }
// Grow the size of the start-of-page free space a little to get up to the // Grow the size of the start-of-page free space a little to get up to the
...@@ -4068,7 +4063,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, ...@@ -4068,7 +4063,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
} }
p->ResetLiveBytes(); p->ResetLiveBytes();
return free_list->GuaranteedAllocatable(max_freed_bytes); return max_freed_bytes;
} }
...@@ -4102,6 +4097,7 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space, ...@@ -4102,6 +4097,7 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space,
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_is_iterable(sweeper == PRECISE); space->set_is_iterable(sweeper == PRECISE);
space->set_is_swept_concurrently(sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats(); space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page // We defensively initialize end_of_unswept_pages_ here with the first page
...@@ -4146,6 +4142,15 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { ...@@ -4146,6 +4142,15 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
} }
switch (sweeper) { switch (sweeper) {
case CONSERVATIVE: {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
pages_swept++;
break;
}
case CONCURRENT_CONSERVATIVE: case CONCURRENT_CONSERVATIVE:
case PARALLEL_CONSERVATIVE: { case PARALLEL_CONSERVATIVE: {
if (!parallel_sweeping_active) { if (!parallel_sweeping_active) {
...@@ -4207,10 +4212,11 @@ void MarkCompactCollector::SweepSpaces() { ...@@ -4207,10 +4212,11 @@ void MarkCompactCollector::SweepSpaces() {
#ifdef DEBUG #ifdef DEBUG
state_ = SWEEP_SPACES; state_ = SWEEP_SPACES;
#endif #endif
SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; SweeperType how_to_sweep = CONSERVATIVE;
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; if (AreSweeperThreadsActivated()) {
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
}
if (sweep_precisely_) how_to_sweep = PRECISE; if (sweep_precisely_) how_to_sweep = PRECISE;
MoveEvacuationCandidatesToEndOfPagesList(); MoveEvacuationCandidatesToEndOfPagesList();
...@@ -4232,7 +4238,7 @@ void MarkCompactCollector::SweepSpaces() { ...@@ -4232,7 +4238,7 @@ void MarkCompactCollector::SweepSpaces() {
} }
if (how_to_sweep == PARALLEL_CONSERVATIVE) { if (how_to_sweep == PARALLEL_CONSERVATIVE) {
EnsureSweepingCompleted(); WaitUntilSweepingCompleted();
} }
} }
RemoveDeadInvalidatedCode(); RemoveDeadInvalidatedCode();
......
...@@ -570,6 +570,7 @@ class MarkCompactCollector { ...@@ -570,6 +570,7 @@ class MarkCompactCollector {
void EnableCodeFlushing(bool enable); void EnableCodeFlushing(bool enable);
enum SweeperType { enum SweeperType {
CONSERVATIVE,
PARALLEL_CONSERVATIVE, PARALLEL_CONSERVATIVE,
CONCURRENT_CONSERVATIVE, CONCURRENT_CONSERVATIVE,
PRECISE PRECISE
...@@ -664,19 +665,18 @@ class MarkCompactCollector { ...@@ -664,19 +665,18 @@ class MarkCompactCollector {
// then the whole given space is swept. // then the whole given space is swept.
int SweepInParallel(PagedSpace* space, int required_freed_bytes); int SweepInParallel(PagedSpace* space, int required_freed_bytes);
void EnsureSweepingCompleted(); void WaitUntilSweepingCompleted();
// If sweeper threads are not active this method will return true. If
// this is a latency issue we should be smarter here. Otherwise, it will
// return true if the sweeper threads are done processing the pages.
bool IsSweepingCompleted(); bool IsSweepingCompleted();
void RefillFreeList(PagedSpace* space); void RefillFreeList(PagedSpace* space);
bool AreSweeperThreadsActivated(); bool AreSweeperThreadsActivated();
// Checks if sweeping is in progress right now on any space. // If a paged space is passed in, this method checks if the given space is
bool sweeping_in_progress() { return sweeping_in_progress_; } // swept concurrently. Otherwise, this method checks if concurrent sweeping
// is in progress right now on any space.
bool IsConcurrentSweepingInProgress(PagedSpace* space = NULL);
void set_sequential_sweeping(bool sequential_sweeping) { void set_sequential_sweeping(bool sequential_sweeping) {
sequential_sweeping_ = sequential_sweeping; sequential_sweeping_ = sequential_sweeping;
...@@ -739,7 +739,7 @@ class MarkCompactCollector { ...@@ -739,7 +739,7 @@ class MarkCompactCollector {
bool was_marked_incrementally_; bool was_marked_incrementally_;
// True if concurrent or parallel sweeping is currently in progress. // True if concurrent or parallel sweeping is currently in progress.
bool sweeping_in_progress_; bool sweeping_pending_;
base::Semaphore pending_sweeper_jobs_semaphore_; base::Semaphore pending_sweeper_jobs_semaphore_;
......
...@@ -936,6 +936,7 @@ PagedSpace::PagedSpace(Heap* heap, ...@@ -936,6 +936,7 @@ PagedSpace::PagedSpace(Heap* heap,
: Space(heap, id, executable), : Space(heap, id, executable),
free_list_(this), free_list_(this),
is_iterable_(true), is_iterable_(true),
is_swept_concurrently_(false),
unswept_free_bytes_(0), unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) { end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) { if (id == CODE_SPACE) {
...@@ -2546,8 +2547,8 @@ void PagedSpace::PrepareForMarkCompact() { ...@@ -2546,8 +2547,8 @@ void PagedSpace::PrepareForMarkCompact() {
intptr_t PagedSpace::SizeOfObjects() { intptr_t PagedSpace::SizeOfObjects() {
ASSERT(heap()->mark_compact_collector()->sweeping_in_progress() || ASSERT(heap()->mark_compact_collector()->
(unswept_free_bytes_ == 0)); IsConcurrentSweepingInProgress(this) || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top()); return Size() - unswept_free_bytes_ - (limit() - top());
} }
...@@ -2577,12 +2578,24 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { ...@@ -2577,12 +2578,24 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
} }
HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( HeapObject* PagedSpace::EnsureSweepingProgress(
int size_in_bytes) { int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
if (collector->IsConcurrentSweepingInProgress(this)) {
// If sweeping is still in progress try to sweep pages on the main thread.
int free_chunk =
collector->SweepInParallel(this, size_in_bytes);
if (free_chunk >= size_in_bytes) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// We should be able to allocate an object here since we just freed that
// much memory.
ASSERT(object != NULL);
if (object != NULL) return object;
}
// Wait for the sweeper threads here and complete the sweeping phase. // Wait for the sweeper threads here and complete the sweeping phase.
collector->EnsureSweepingCompleted(); collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list // After waiting for the sweeper threads, there may be new free-list
// entries. // entries.
...@@ -2595,28 +2608,14 @@ HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( ...@@ -2595,28 +2608,14 @@ HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed. // Allocation in this space has failed.
// If sweeper threads are active, try to re-fill the free-lists.
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress. if (collector->IsConcurrentSweepingInProgress(this)) {
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
collector->RefillFreeList(this); collector->RefillFreeList(this);
// Retry the free list allocation. // Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes); HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object; if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
int free_chunk =
collector->SweepInParallel(this, size_in_bytes);
collector->RefillFreeList(this);
if (free_chunk >= size_in_bytes) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// We should be able to allocate an object here since we just freed that
// much memory.
ASSERT(object != NULL);
if (object != NULL) return object;
}
} }
// Free list allocation failed and there is no next page. Fail if we have // Free list allocation failed and there is no next page. Fail if we have
...@@ -2626,7 +2625,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -2626,7 +2625,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
&& heap()->OldGenerationAllocationLimitReached()) { && heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal // If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. // elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); HeapObject* object = EnsureSweepingProgress(size_in_bytes);
if (object != NULL) return object; if (object != NULL) return object;
} }
...@@ -2639,7 +2638,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -2639,7 +2638,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeper threads are active, wait for them at that point and steal // If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which // elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation. // would indicate that there is not enough memory for the given allocation.
return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); return EnsureSweepingProgress(size_in_bytes);
} }
......
...@@ -1619,21 +1619,6 @@ class FreeList { ...@@ -1619,21 +1619,6 @@ class FreeList {
// aligned, and the size should be a non-zero multiple of the word size. // aligned, and the size should be a non-zero multiple of the word size.
int Free(Address start, int size_in_bytes); int Free(Address start, int size_in_bytes);
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
int GuaranteedAllocatable(int maximum_freed) {
if (maximum_freed < kSmallListMin) {
return 0;
} else if (maximum_freed <= kSmallListMax) {
return kSmallAllocationMax;
} else if (maximum_freed <= kMediumListMax) {
return kMediumAllocationMax;
} else if (maximum_freed <= kLargeListMax) {
return kLargeAllocationMax;
}
return maximum_freed;
}
// Allocate a block of size 'size_in_bytes' from the free list. The block // Allocate a block of size 'size_in_bytes' from the free list. The block
// is unitialized. A failure is returned if no block is available. The // is unitialized. A failure is returned if no block is available. The
// number of bytes lost to fragmentation is returned in the output parameter // number of bytes lost to fragmentation is returned in the output parameter
...@@ -1920,6 +1905,9 @@ class PagedSpace : public Space { ...@@ -1920,6 +1905,9 @@ class PagedSpace : public Space {
bool is_iterable() { return is_iterable_; } bool is_iterable() { return is_iterable_; }
void set_is_iterable(bool b) { is_iterable_ = b; } void set_is_iterable(bool b) { is_iterable_ = b; }
bool is_swept_concurrently() { return is_swept_concurrently_; }
void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; }
// Evacuation candidates are swept by evacuator. Needs to return a valid // Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished. // result before _and_ after evacuation has finished.
static bool ShouldBeSweptBySweeperThreads(Page* p) { static bool ShouldBeSweptBySweeperThreads(Page* p) {
...@@ -2004,6 +1992,9 @@ class PagedSpace : public Space { ...@@ -2004,6 +1992,9 @@ class PagedSpace : public Space {
// This space was swept precisely, hence it is iterable. // This space was swept precisely, hence it is iterable.
bool is_iterable_; bool is_iterable_;
// This space is currently swept by sweeper threads.
bool is_swept_concurrently_;
// The number of free bytes which could be reclaimed by advancing the // The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because concurrent // concurrent sweeper threads. This is only an estimation because concurrent
// sweeping is done conservatively. // sweeping is done conservatively.
...@@ -2026,8 +2017,7 @@ class PagedSpace : public Space { ...@@ -2026,8 +2017,7 @@ class PagedSpace : public Space {
// If sweeping is still in progress try to sweep unswept pages. If that is // If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list // not successful, wait for the sweeper threads and re-try free-list
// allocation. // allocation.
MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation( MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);
int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent. // Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
......
...@@ -44,8 +44,8 @@ using namespace v8::internal; ...@@ -44,8 +44,8 @@ using namespace v8::internal;
static void SimulateIncrementalMarking() { static void SimulateIncrementalMarking() {
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
IncrementalMarking* marking = CcTest::heap()->incremental_marking(); IncrementalMarking* marking = CcTest::heap()->incremental_marking();
if (collector->sweeping_in_progress()) { if (collector->IsConcurrentSweepingInProgress()) {
collector->EnsureSweepingCompleted(); collector->WaitUntilSweepingCompleted();
} }
CHECK(marking->IsMarking() || marking->IsStopped()); CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) { if (marking->IsStopped()) {
...@@ -1595,8 +1595,8 @@ TEST(TestSizeOfObjects) { ...@@ -1595,8 +1595,8 @@ TEST(TestSizeOfObjects) {
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags); CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags); CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector(); MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) { if (collector->IsConcurrentSweepingInProgress()) {
collector->EnsureSweepingCompleted(); collector->WaitUntilSweepingCompleted();
} }
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects()); int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
...@@ -1622,8 +1622,8 @@ TEST(TestSizeOfObjects) { ...@@ -1622,8 +1622,8 @@ TEST(TestSizeOfObjects) {
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects())); CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
// Waiting for sweeper threads should not change heap size. // Waiting for sweeper threads should not change heap size.
if (collector->sweeping_in_progress()) { if (collector->IsConcurrentSweepingInProgress()) {
collector->EnsureSweepingCompleted(); collector->WaitUntilSweepingCompleted();
} }
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects())); CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment