Commit 6067ca6a authored by hpayer@chromium.org's avatar hpayer@chromium.org

Concurrent/parallel precise sweeping.

BUG=
R=jarin@chromium.org

Review URL: https://codereview.chromium.org/398333002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22471 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent d09d81f9
......@@ -516,6 +516,7 @@ DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
"track object counts and memory usage")
DEFINE_BOOL(always_precise_sweeping, false, "always sweep precisely")
DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
DEFINE_INT(sweeper_threads, 0,
......
......@@ -2600,12 +2600,12 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
CHECK(debug_heap->old_data_space()->is_iterable());
CHECK(debug_heap->old_pointer_space()->is_iterable());
CHECK(debug_heap->code_space()->is_iterable());
CHECK(debug_heap->cell_space()->is_iterable());
CHECK(debug_heap->property_cell_space()->is_iterable());
CHECK(debug_heap->map_space()->is_iterable());
CHECK(debug_heap->old_data_space()->swept_precisely());
CHECK(debug_heap->old_pointer_space()->swept_precisely());
CHECK(debug_heap->code_space()->swept_precisely());
CHECK(debug_heap->cell_space()->swept_precisely());
CHECK(debug_heap->property_cell_space()->swept_precisely());
CHECK(debug_heap->map_space()->swept_precisely());
#endif
#ifdef VERIFY_HEAP
......
......@@ -1297,7 +1297,7 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
if (heap->old_data_space()->is_iterable()) {
if (heap->old_data_space()->swept_precisely()) {
HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next();
object != NULL; object = data_it.Next())
......@@ -3304,8 +3304,7 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
// pages is set after sweeping all pages.
return (!is_in_old_pointer_space && !is_in_old_data_space) ||
page->WasSwept() ||
(page->parallel_sweeping() <=
MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
(page->parallel_sweeping() <= MemoryChunk::SWEEPING_FINALIZE);
}
......@@ -4209,8 +4208,8 @@ STRUCT_LIST(MAKE_CASE)
bool Heap::IsHeapIterable() {
return (old_pointer_space()->is_iterable() &&
old_data_space()->is_iterable() &&
return (old_pointer_space()->swept_precisely() &&
old_data_space()->swept_precisely() &&
new_space_top_after_last_gc_ == new_space()->top());
}
......
This diff is collapsed.
......@@ -567,6 +567,8 @@ class MarkCompactCollector {
enum SweeperType {
PARALLEL_CONSERVATIVE,
CONCURRENT_CONSERVATIVE,
PARALLEL_PRECISE,
CONCURRENT_PRECISE,
PRECISE
};
......@@ -586,7 +588,7 @@ class MarkCompactCollector {
// Sweep a single page from the given space conservatively.
// Returns the size of the biggest continuous freed memory chunk in bytes.
template<SweepingParallelism type>
static intptr_t SweepConservatively(PagedSpace* space,
static int SweepConservatively(PagedSpace* space,
FreeList* free_list,
Page* p);
......
......@@ -67,7 +67,7 @@ void HeapObjectIterator::Initialize(PagedSpace* space,
HeapObjectIterator::PageMode mode,
HeapObjectCallback size_f) {
// Check that we actually can iterate this space.
ASSERT(space->is_iterable());
ASSERT(space->swept_precisely());
space_ = space;
cur_addr_ = cur;
......@@ -479,7 +479,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
chunk->set_parallel_sweeping(SWEEPING_DONE);
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
......@@ -935,7 +935,7 @@ PagedSpace::PagedSpace(Heap* heap,
Executability executable)
: Space(heap, id, executable),
free_list_(this),
is_iterable_(true),
swept_precisely_(true),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) {
......@@ -1157,7 +1157,7 @@ void PagedSpace::Print() { }
#ifdef VERIFY_HEAP
void PagedSpace::Verify(ObjectVisitor* visitor) {
// We can only iterate over the pages if they were swept precisely.
if (!is_iterable_) return;
if (!swept_precisely_) return;
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
......@@ -2775,7 +2775,7 @@ void PagedSpace::ReportStatistics() {
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
if (!is_iterable_) return;
if (!swept_precisely_) return;
ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
......
......@@ -450,18 +450,17 @@ class MemoryChunk {
intptr_t GetFlags() { return flags_; }
// PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
// sweeping must not be performed on that page.
// PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
// page and will not touch the page memory anymore.
// PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
// sweeper thread.
// PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
// SWEEPING_DONE - The page state when sweeping is complete or sweeping must
// not be performed on that page.
// SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
// not touch the page memory anymore.
// SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
// SWEEPING_PENDING - This page is ready for parallel sweeping.
enum ParallelSweepingState {
PARALLEL_SWEEPING_DONE,
PARALLEL_SWEEPING_FINALIZE,
PARALLEL_SWEEPING_IN_PROGRESS,
PARALLEL_SWEEPING_PENDING
SWEEPING_DONE,
SWEEPING_FINALIZE,
SWEEPING_IN_PROGRESS,
SWEEPING_PENDING
};
ParallelSweepingState parallel_sweeping() {
......@@ -475,8 +474,8 @@ class MemoryChunk {
bool TryParallelSweeping() {
return base::Acquire_CompareAndSwap(
&parallel_sweeping_, PARALLEL_SWEEPING_PENDING,
PARALLEL_SWEEPING_IN_PROGRESS) == PARALLEL_SWEEPING_PENDING;
&parallel_sweeping_, SWEEPING_PENDING, SWEEPING_IN_PROGRESS) ==
SWEEPING_PENDING;
}
// Manage live byte count (count of bytes known to be live,
......@@ -1917,8 +1916,8 @@ class PagedSpace : public Space {
static void ResetCodeStatistics(Isolate* isolate);
#endif
bool is_iterable() { return is_iterable_; }
void set_is_iterable(bool b) { is_iterable_ = b; }
bool swept_precisely() { return swept_precisely_; }
void set_swept_precisely(bool b) { swept_precisely_ = b; }
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
......@@ -2002,7 +2001,7 @@ class PagedSpace : public Space {
AllocationInfo allocation_info_;
// This space was swept precisely, hence it is iterable.
bool is_iterable_;
bool swept_precisely_;
// The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because concurrent
......
......@@ -4321,7 +4321,7 @@ TEST(ArrayShiftSweeping) {
CHECK(heap->InOldPointerSpace(o->elements()));
CHECK(heap->InOldPointerSpace(*o));
Page* page = Page::FromAddress(o->elements()->address());
CHECK(page->parallel_sweeping() <= MemoryChunk::PARALLEL_SWEEPING_FINALIZE ||
CHECK(page->parallel_sweeping() <= MemoryChunk::SWEEPING_FINALIZE ||
Marking::IsBlack(Marking::MarkBitFrom(o->elements())));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment