Commit 65c9c2a2 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Remove conservative sweeping.

BUG=
R=jarin@chromium.org

Review URL: https://codereview.chromium.org/479113004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23283 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 53fdf75b
......@@ -526,7 +526,6 @@ DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
"track object counts and memory usage")
DEFINE_BOOL(always_precise_sweeping, true, "always sweep precisely")
DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
DEFINE_INT(sweeper_threads, 0,
......
......@@ -2580,15 +2580,6 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
CHECK(debug_heap->old_data_space()->swept_precisely());
CHECK(debug_heap->old_pointer_space()->swept_precisely());
CHECK(debug_heap->code_space()->swept_precisely());
CHECK(debug_heap->cell_space()->swept_precisely());
CHECK(debug_heap->property_cell_space()->swept_precisely());
CHECK(debug_heap->map_space()->swept_precisely());
#endif
#ifdef VERIFY_HEAP
debug_heap->Verify();
#endif
......
......@@ -1273,14 +1273,10 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
object = code_it.Next())
object->Iterate(&v);
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
if (heap->old_data_space()->swept_precisely()) {
HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next(); object != NULL;
object = data_it.Next())
object->Iterate(&v);
}
}
#endif // VERIFY_HEAP
......@@ -4242,9 +4238,7 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
bool Heap::IsHeapIterable() {
// TODO(hpayer): This function is not correct. Allocation folding in old
// space breaks the iterability.
return (old_pointer_space()->swept_precisely() &&
old_data_space()->swept_precisely() &&
new_space_top_after_last_gc_ == new_space()->top());
return new_space_top_after_last_gc_ == new_space()->top();
}
......
......@@ -715,14 +715,11 @@ class Heap {
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
static const int kNoGCFlags = 0;
static const int kSweepPreciselyMask = 1;
static const int kReduceMemoryFootprintMask = 2;
static const int kAbortIncrementalMarkingMask = 4;
// Making the heap iterable requires us to sweep precisely and abort any
// incremental marking as well.
static const int kMakeHeapIterableMask =
kSweepPreciselyMask | kAbortIncrementalMarkingMask;
static const int kReduceMemoryFootprintMask = 1;
static const int kAbortIncrementalMarkingMask = 2;
// Making the heap iterable requires us to abort incremental marking.
static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
......
......@@ -23,7 +23,6 @@ MarkBit Marking::MarkBitFrom(Address addr) {
void MarkCompactCollector::SetFlags(int flags) {
sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
abort_incremental_marking_ =
((flags & Heap::kAbortIncrementalMarkingMask) != 0);
......
This diff is collapsed.
......@@ -544,11 +544,9 @@ class MarkCompactCollector {
void EnableCodeFlushing(bool enable);
enum SweeperType {
PARALLEL_CONSERVATIVE,
CONCURRENT_CONSERVATIVE,
PARALLEL_PRECISE,
CONCURRENT_PRECISE,
PRECISE
PARALLEL_SWEEPING,
CONCURRENT_SWEEPING,
SEQUENTIAL_SWEEPING
};
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
......@@ -561,12 +559,6 @@ class MarkCompactCollector {
void VerifyOmittedMapChecks();
#endif
// Sweep a single page from the given space conservatively.
// Returns the size of the biggest continuous freed memory chunk in bytes.
template <SweepingParallelism type>
static int SweepConservatively(PagedSpace* space, FreeList* free_list,
Page* p);
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
return Page::FromAddress(reinterpret_cast<Address>(anchor))
->ShouldSkipEvacuationSlotRecording();
......@@ -693,10 +685,6 @@ class MarkCompactCollector {
CollectorState state_;
#endif
// Global flag that forces sweeping to be precise, so we can traverse the
// heap.
bool sweep_precisely_;
bool reduce_memory_footprint_;
bool abort_incremental_marking_;
......
......@@ -47,18 +47,13 @@ HeapObjectIterator::HeapObjectIterator(Page* page,
owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
page->area_end(), kOnePageOnly, size_func);
DCHECK(page->WasSweptPrecisely() ||
(static_cast<PagedSpace*>(owner)->swept_precisely() &&
page->SweepingCompleted()));
DCHECK(page->WasSwept() || page->SweepingCompleted());
}
void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
HeapObjectIterator::PageMode mode,
HeapObjectCallback size_f) {
// Check that we actually can iterate this space.
DCHECK(space->swept_precisely());
space_ = space;
cur_addr_ = cur;
cur_end_ = end;
......@@ -83,9 +78,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
if (cur_page == space_->anchor()) return false;
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->WasSweptPrecisely() ||
(static_cast<PagedSpace*>(cur_page->owner())->swept_precisely() &&
cur_page->SweepingCompleted()));
DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
return true;
}
......@@ -459,7 +452,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
chunk->SetFlag(WAS_SWEPT_PRECISELY);
chunk->SetFlag(WAS_SWEPT);
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
......@@ -886,7 +879,6 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
Executability executable)
: Space(heap, id, executable),
free_list_(this),
swept_precisely_(true),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL),
emergency_memory_(NULL) {
......@@ -936,7 +928,7 @@ size_t PagedSpace::CommittedPhysicalMemory() {
Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on precisely swept spaces.
// Note: this function can only be called on iterable spaces.
DCHECK(!heap()->mark_compact_collector()->in_use());
if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
......@@ -1129,9 +1121,6 @@ void PagedSpace::Print() {}
#ifdef VERIFY_HEAP
void PagedSpace::Verify(ObjectVisitor* visitor) {
// We can only iterate over the pages if they were swept precisely.
if (!swept_precisely_) return;
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
PageIterator page_iterator(this);
......@@ -1141,7 +1130,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->WasSweptPrecisely());
CHECK(page->WasSwept());
HeapObjectIterator it(page, NULL);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
......@@ -2737,7 +2726,6 @@ void PagedSpace::ReportStatistics() {
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
if (!swept_precisely_) return;
if (heap()->mark_compact_collector()->sweeping_in_progress()) {
heap()->mark_compact_collector()->EnsureSweepingCompleted();
}
......
......@@ -373,12 +373,9 @@ class MemoryChunk {
EVACUATION_CANDIDATE,
RESCAN_ON_EVACUATION,
// Pages swept precisely can be iterated, hitting only the live objects.
// Whereas those swept conservatively cannot be iterated over. Both flags
// indicate that marking bits have been cleared by the sweeper, otherwise
// marking bits are still intact.
WAS_SWEPT_PRECISELY,
WAS_SWEPT_CONSERVATIVELY,
// WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
// otherwise marking bits are still intact.
WAS_SWEPT,
// Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned.
......@@ -765,15 +762,9 @@ class Page : public MemoryChunk {
void InitializeAsAnchor(PagedSpace* owner);
bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
void SetWasSwept() { SetFlag(WAS_SWEPT); }
void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
void ResetFreeListStatistics();
......@@ -1830,14 +1821,11 @@ class PagedSpace : public Space {
static void ResetCodeStatistics(Isolate* isolate);
#endif
bool swept_precisely() { return swept_precisely_; }
void set_swept_precisely(bool b) { swept_precisely_ = b; }
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
static bool ShouldBeSweptBySweeperThreads(Page* p) {
return !p->IsEvacuationCandidate() &&
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely();
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
}
void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
......@@ -1907,12 +1895,8 @@ class PagedSpace : public Space {
// Normal allocation information.
AllocationInfo allocation_info_;
// This space was swept precisely, hence it is iterable.
bool swept_precisely_;
// The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because concurrent
// sweeping is done conservatively.
// concurrent sweeper threads.
intptr_t unswept_free_bytes_;
// The sweeper threads iterate over the list of pointer and data space pages
......
......@@ -477,10 +477,8 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
} else {
Page* page = reinterpret_cast<Page*>(chunk);
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
Address start = page->area_start();
Address end = page->area_end();
if (owner == heap_->map_space()) {
DCHECK(page->WasSweptPrecisely());
DCHECK(page->WasSwept());
HeapObjectIterator iterator(page, NULL);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
......@@ -504,24 +502,17 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
}
// TODO(hpayer): remove the special casing and merge map and pointer
// space handling as soon as we removed conservative sweeping.
CHECK(page->owner() == heap_->old_pointer_space());
if (heap_->old_pointer_space()->swept_precisely()) {
HeapObjectIterator iterator(page, NULL);
for (HeapObject* heap_object = iterator.Next();
heap_object != NULL; heap_object = iterator.Next()) {
// We iterate over objects that contain new space pointers only.
if (heap_object->MayContainNewSpacePointers()) {
FindPointersToNewSpaceInRegion(
heap_object->address() + HeapObject::kHeaderSize,
heap_object->address() + heap_object->Size(),
slot_callback, clear_maps);
}
HeapObjectIterator iterator(page, NULL);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
// We iterate over objects that contain new space pointers only.
if (heap_object->MayContainNewSpacePointers()) {
FindPointersToNewSpaceInRegion(
heap_object->address() + HeapObject::kHeaderSize,
heap_object->address() + heap_object->Size(), slot_callback,
clear_maps);
}
} else {
FindPointersToNewSpaceInRegion(start, end, slot_callback,
clear_maps);
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment