Commit 65c9c2a2 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Remove conservative sweeping.

BUG=
R=jarin@chromium.org

Review URL: https://codereview.chromium.org/479113004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23283 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 53fdf75b
...@@ -526,7 +526,6 @@ DEFINE_BOOL(trace_incremental_marking, false, ...@@ -526,7 +526,6 @@ DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking") "trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false, DEFINE_BOOL(track_gc_object_stats, false,
"track object counts and memory usage") "track object counts and memory usage")
DEFINE_BOOL(always_precise_sweeping, true, "always sweep precisely")
DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping") DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping") DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
DEFINE_INT(sweeper_threads, 0, DEFINE_INT(sweeper_threads, 0,
......
...@@ -2580,15 +2580,6 @@ bool HeapSnapshotGenerator::GenerateSnapshot() { ...@@ -2580,15 +2580,6 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
Heap* debug_heap = heap_; Heap* debug_heap = heap_;
CHECK(debug_heap->old_data_space()->swept_precisely());
CHECK(debug_heap->old_pointer_space()->swept_precisely());
CHECK(debug_heap->code_space()->swept_precisely());
CHECK(debug_heap->cell_space()->swept_precisely());
CHECK(debug_heap->property_cell_space()->swept_precisely());
CHECK(debug_heap->map_space()->swept_precisely());
#endif
#ifdef VERIFY_HEAP
debug_heap->Verify(); debug_heap->Verify();
#endif #endif
......
...@@ -1273,14 +1273,10 @@ static void VerifyNonPointerSpacePointers(Heap* heap) { ...@@ -1273,14 +1273,10 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
object = code_it.Next()) object = code_it.Next())
object->Iterate(&v); object->Iterate(&v);
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
if (heap->old_data_space()->swept_precisely()) {
HeapObjectIterator data_it(heap->old_data_space()); HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next(); object != NULL; for (HeapObject* object = data_it.Next(); object != NULL;
object = data_it.Next()) object = data_it.Next())
object->Iterate(&v); object->Iterate(&v);
}
} }
#endif // VERIFY_HEAP #endif // VERIFY_HEAP
...@@ -4242,9 +4238,7 @@ AllocationResult Heap::AllocateStruct(InstanceType type) { ...@@ -4242,9 +4238,7 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
bool Heap::IsHeapIterable() { bool Heap::IsHeapIterable() {
// TODO(hpayer): This function is not correct. Allocation folding in old // TODO(hpayer): This function is not correct. Allocation folding in old
// space breaks the iterability. // space breaks the iterability.
return (old_pointer_space()->swept_precisely() && return new_space_top_after_last_gc_ == new_space()->top();
old_data_space()->swept_precisely() &&
new_space_top_after_last_gc_ == new_space()->top());
} }
......
...@@ -715,14 +715,11 @@ class Heap { ...@@ -715,14 +715,11 @@ class Heap {
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
static const int kNoGCFlags = 0; static const int kNoGCFlags = 0;
static const int kSweepPreciselyMask = 1; static const int kReduceMemoryFootprintMask = 1;
static const int kReduceMemoryFootprintMask = 2; static const int kAbortIncrementalMarkingMask = 2;
static const int kAbortIncrementalMarkingMask = 4;
// Making the heap iterable requires us to abort incremental marking.
// Making the heap iterable requires us to sweep precisely and abort any static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
// incremental marking as well.
static const int kMakeHeapIterableMask =
kSweepPreciselyMask | kAbortIncrementalMarkingMask;
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap // non-zero, then the slower precise sweeper is used, which leaves the heap
......
...@@ -23,7 +23,6 @@ MarkBit Marking::MarkBitFrom(Address addr) { ...@@ -23,7 +23,6 @@ MarkBit Marking::MarkBitFrom(Address addr) {
void MarkCompactCollector::SetFlags(int flags) { void MarkCompactCollector::SetFlags(int flags) {
sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0); reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
abort_incremental_marking_ = abort_incremental_marking_ =
((flags & Heap::kAbortIncrementalMarkingMask) != 0); ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
......
This diff is collapsed.
...@@ -544,11 +544,9 @@ class MarkCompactCollector { ...@@ -544,11 +544,9 @@ class MarkCompactCollector {
void EnableCodeFlushing(bool enable); void EnableCodeFlushing(bool enable);
enum SweeperType { enum SweeperType {
PARALLEL_CONSERVATIVE, PARALLEL_SWEEPING,
CONCURRENT_CONSERVATIVE, CONCURRENT_SWEEPING,
PARALLEL_PRECISE, SEQUENTIAL_SWEEPING
CONCURRENT_PRECISE,
PRECISE
}; };
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL }; enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
...@@ -561,12 +559,6 @@ class MarkCompactCollector { ...@@ -561,12 +559,6 @@ class MarkCompactCollector {
void VerifyOmittedMapChecks(); void VerifyOmittedMapChecks();
#endif #endif
// Sweep a single page from the given space conservatively.
// Returns the size of the biggest continuous freed memory chunk in bytes.
template <SweepingParallelism type>
static int SweepConservatively(PagedSpace* space, FreeList* free_list,
Page* p);
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
return Page::FromAddress(reinterpret_cast<Address>(anchor)) return Page::FromAddress(reinterpret_cast<Address>(anchor))
->ShouldSkipEvacuationSlotRecording(); ->ShouldSkipEvacuationSlotRecording();
...@@ -693,10 +685,6 @@ class MarkCompactCollector { ...@@ -693,10 +685,6 @@ class MarkCompactCollector {
CollectorState state_; CollectorState state_;
#endif #endif
// Global flag that forces sweeping to be precise, so we can traverse the
// heap.
bool sweep_precisely_;
bool reduce_memory_footprint_; bool reduce_memory_footprint_;
bool abort_incremental_marking_; bool abort_incremental_marking_;
......
...@@ -47,18 +47,13 @@ HeapObjectIterator::HeapObjectIterator(Page* page, ...@@ -47,18 +47,13 @@ HeapObjectIterator::HeapObjectIterator(Page* page,
owner == page->heap()->code_space()); owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
page->area_end(), kOnePageOnly, size_func); page->area_end(), kOnePageOnly, size_func);
DCHECK(page->WasSweptPrecisely() || DCHECK(page->WasSwept() || page->SweepingCompleted());
(static_cast<PagedSpace*>(owner)->swept_precisely() &&
page->SweepingCompleted()));
} }
void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end, void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
HeapObjectIterator::PageMode mode, HeapObjectIterator::PageMode mode,
HeapObjectCallback size_f) { HeapObjectCallback size_f) {
// Check that we actually can iterate this space.
DCHECK(space->swept_precisely());
space_ = space; space_ = space;
cur_addr_ = cur; cur_addr_ = cur;
cur_end_ = end; cur_end_ = end;
...@@ -83,9 +78,7 @@ bool HeapObjectIterator::AdvanceToNextPage() { ...@@ -83,9 +78,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
if (cur_page == space_->anchor()) return false; if (cur_page == space_->anchor()) return false;
cur_addr_ = cur_page->area_start(); cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end(); cur_end_ = cur_page->area_end();
DCHECK(cur_page->WasSweptPrecisely() || DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
(static_cast<PagedSpace*>(cur_page->owner())->swept_precisely() &&
cur_page->SweepingCompleted()));
return true; return true;
} }
...@@ -459,7 +452,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -459,7 +452,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->ResetLiveBytes(); chunk->ResetLiveBytes();
Bitmap::Clear(chunk); Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false); chunk->initialize_scan_on_scavenge(false);
chunk->SetFlag(WAS_SWEPT_PRECISELY); chunk->SetFlag(WAS_SWEPT);
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
...@@ -886,7 +879,6 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, ...@@ -886,7 +879,6 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
Executability executable) Executability executable)
: Space(heap, id, executable), : Space(heap, id, executable),
free_list_(this), free_list_(this),
swept_precisely_(true),
unswept_free_bytes_(0), unswept_free_bytes_(0),
end_of_unswept_pages_(NULL), end_of_unswept_pages_(NULL),
emergency_memory_(NULL) { emergency_memory_(NULL) {
...@@ -936,7 +928,7 @@ size_t PagedSpace::CommittedPhysicalMemory() { ...@@ -936,7 +928,7 @@ size_t PagedSpace::CommittedPhysicalMemory() {
Object* PagedSpace::FindObject(Address addr) { Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on precisely swept spaces. // Note: this function can only be called on iterable spaces.
DCHECK(!heap()->mark_compact_collector()->in_use()); DCHECK(!heap()->mark_compact_collector()->in_use());
if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
...@@ -1129,9 +1121,6 @@ void PagedSpace::Print() {} ...@@ -1129,9 +1121,6 @@ void PagedSpace::Print() {}
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
void PagedSpace::Verify(ObjectVisitor* visitor) { void PagedSpace::Verify(ObjectVisitor* visitor) {
// We can only iterate over the pages if they were swept precisely.
if (!swept_precisely_) return;
bool allocation_pointer_found_in_space = bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit()); (allocation_info_.top() == allocation_info_.limit());
PageIterator page_iterator(this); PageIterator page_iterator(this);
...@@ -1141,7 +1130,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { ...@@ -1141,7 +1130,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (page == Page::FromAllocationTop(allocation_info_.top())) { if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true; allocation_pointer_found_in_space = true;
} }
CHECK(page->WasSweptPrecisely()); CHECK(page->WasSwept());
HeapObjectIterator it(page, NULL); HeapObjectIterator it(page, NULL);
Address end_of_previous_object = page->area_start(); Address end_of_previous_object = page->area_start();
Address top = page->area_end(); Address top = page->area_end();
...@@ -2737,7 +2726,6 @@ void PagedSpace::ReportStatistics() { ...@@ -2737,7 +2726,6 @@ void PagedSpace::ReportStatistics() {
", available: %" V8_PTR_PREFIX "d, %%%d\n", ", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct); Capacity(), Waste(), Available(), pct);
if (!swept_precisely_) return;
if (heap()->mark_compact_collector()->sweeping_in_progress()) { if (heap()->mark_compact_collector()->sweeping_in_progress()) {
heap()->mark_compact_collector()->EnsureSweepingCompleted(); heap()->mark_compact_collector()->EnsureSweepingCompleted();
} }
......
...@@ -373,12 +373,9 @@ class MemoryChunk { ...@@ -373,12 +373,9 @@ class MemoryChunk {
EVACUATION_CANDIDATE, EVACUATION_CANDIDATE,
RESCAN_ON_EVACUATION, RESCAN_ON_EVACUATION,
// Pages swept precisely can be iterated, hitting only the live objects. // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
// Whereas those swept conservatively cannot be iterated over. Both flags // otherwise marking bits are still intact.
// indicate that marking bits have been cleared by the sweeper, otherwise WAS_SWEPT,
// marking bits are still intact.
WAS_SWEPT_PRECISELY,
WAS_SWEPT_CONSERVATIVELY,
// Large objects can have a progress bar in their page header. These object // Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned. // are scanned in increments and will be kept black while being scanned.
...@@ -765,15 +762,9 @@ class Page : public MemoryChunk { ...@@ -765,15 +762,9 @@ class Page : public MemoryChunk {
void InitializeAsAnchor(PagedSpace* owner); void InitializeAsAnchor(PagedSpace* owner);
bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } void SetWasSwept() { SetFlag(WAS_SWEPT); }
bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
void ResetFreeListStatistics(); void ResetFreeListStatistics();
...@@ -1830,14 +1821,11 @@ class PagedSpace : public Space { ...@@ -1830,14 +1821,11 @@ class PagedSpace : public Space {
static void ResetCodeStatistics(Isolate* isolate); static void ResetCodeStatistics(Isolate* isolate);
#endif #endif
bool swept_precisely() { return swept_precisely_; }
void set_swept_precisely(bool b) { swept_precisely_ = b; }
// Evacuation candidates are swept by evacuator. Needs to return a valid // Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished. // result before _and_ after evacuation has finished.
static bool ShouldBeSweptBySweeperThreads(Page* p) { static bool ShouldBeSweptBySweeperThreads(Page* p) {
return !p->IsEvacuationCandidate() && return !p->IsEvacuationCandidate() &&
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely(); !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
} }
void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; } void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
...@@ -1907,12 +1895,8 @@ class PagedSpace : public Space { ...@@ -1907,12 +1895,8 @@ class PagedSpace : public Space {
// Normal allocation information. // Normal allocation information.
AllocationInfo allocation_info_; AllocationInfo allocation_info_;
// This space was swept precisely, hence it is iterable.
bool swept_precisely_;
// The number of free bytes which could be reclaimed by advancing the // The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because concurrent // concurrent sweeper threads.
// sweeping is done conservatively.
intptr_t unswept_free_bytes_; intptr_t unswept_free_bytes_;
// The sweeper threads iterate over the list of pointer and data space pages // The sweeper threads iterate over the list of pointer and data space pages
......
...@@ -477,10 +477,8 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, ...@@ -477,10 +477,8 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
} else { } else {
Page* page = reinterpret_cast<Page*>(chunk); Page* page = reinterpret_cast<Page*>(chunk);
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
Address start = page->area_start();
Address end = page->area_end();
if (owner == heap_->map_space()) { if (owner == heap_->map_space()) {
DCHECK(page->WasSweptPrecisely()); DCHECK(page->WasSwept());
HeapObjectIterator iterator(page, NULL); HeapObjectIterator iterator(page, NULL);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) { heap_object = iterator.Next()) {
...@@ -504,29 +502,22 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, ...@@ -504,29 +502,22 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
heap_->mark_compact_collector()->EnsureSweepingCompleted(); heap_->mark_compact_collector()->EnsureSweepingCompleted();
} }
} }
// TODO(hpayer): remove the special casing and merge map and pointer
// space handling as soon as we removed conservative sweeping.
CHECK(page->owner() == heap_->old_pointer_space()); CHECK(page->owner() == heap_->old_pointer_space());
if (heap_->old_pointer_space()->swept_precisely()) {
HeapObjectIterator iterator(page, NULL); HeapObjectIterator iterator(page, NULL);
for (HeapObject* heap_object = iterator.Next(); for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object != NULL; heap_object = iterator.Next()) { heap_object = iterator.Next()) {
// We iterate over objects that contain new space pointers only. // We iterate over objects that contain new space pointers only.
if (heap_object->MayContainNewSpacePointers()) { if (heap_object->MayContainNewSpacePointers()) {
FindPointersToNewSpaceInRegion( FindPointersToNewSpaceInRegion(
heap_object->address() + HeapObject::kHeaderSize, heap_object->address() + HeapObject::kHeaderSize,
heap_object->address() + heap_object->Size(), heap_object->address() + heap_object->Size(), slot_callback,
slot_callback, clear_maps);
}
}
} else {
FindPointersToNewSpaceInRegion(start, end, slot_callback,
clear_maps); clear_maps);
} }
} }
} }
} }
} }
}
if (callback_ != NULL) { if (callback_ != NULL) {
(*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment