Commit f0f707d8 authored by mlippautz's avatar mlippautz Committed by Commit bot

Reland "[heap] Refactor evacuation for young and old gen into visitors."

Create a visitor for evacuating objects for young and old generation. This is
the first step of preparing a task to process, both,  newspace and oldspace
pages in parallel.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1499893002

Cr-Commit-Position: refs/heads/master@{#32617}
parent 747f455b
...@@ -236,6 +236,11 @@ static void VerifyEvacuation(Heap* heap) { ...@@ -236,6 +236,11 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() { void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
free_list_old_space_.Reset(new FreeList(heap_->old_space())); free_list_old_space_.Reset(new FreeList(heap_->old_space()));
free_list_code_space_.Reset(new FreeList(heap_->code_space())); free_list_code_space_.Reset(new FreeList(heap_->code_space()));
free_list_map_space_.Reset(new FreeList(heap_->map_space())); free_list_map_space_.Reset(new FreeList(heap_->map_space()));
...@@ -1521,69 +1526,119 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) { ...@@ -1521,69 +1526,119 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
} }
int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( class MarkCompactCollector::HeapObjectVisitor {
NewSpace* new_space, NewSpacePage* p) { public:
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); virtual ~HeapObjectVisitor() {}
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); virtual bool Visit(HeapObject* object) = 0;
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); };
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
MarkBit::CellType* cells = p->markbits()->cells();
int survivors_size = 0;
for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
Address cell_base = it.CurrentCellBase();
MarkBit::CellType* cell = it.CurrentCell();
MarkBit::CellType current_cell = *cell;
if (current_cell == 0) continue;
int offset = 0;
while (current_cell != 0) {
int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
current_cell >>= trailing_zeros;
offset += trailing_zeros;
Address address = cell_base + offset * kPointerSize;
HeapObject* object = HeapObject::FromAddress(address);
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
int size = object->Size(); class MarkCompactCollector::EvacuateNewSpaceVisitor
survivors_size += size; : public MarkCompactCollector::HeapObjectVisitor {
public:
explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {}
virtual bool Visit(HeapObject* object) {
Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
int size = object->Size();
offset += 2;
current_cell >>= 2;
// TODO(hpayer): Refactor EvacuateObject and call this function instead. // TODO(hpayer): Refactor EvacuateObject and call this function instead.
if (heap()->ShouldBePromoted(object->address(), size) && if (heap_->ShouldBePromoted(object->address(), size) &&
TryPromoteObject(object, size)) { heap_->mark_compact_collector()->TryPromoteObject(object, size)) {
continue; return true;
} }
AllocationAlignment alignment = object->RequiredAlignment(); AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation = new_space->AllocateRaw(size, alignment); AllocationResult allocation =
heap_->new_space()->AllocateRaw(size, alignment);
if (allocation.IsRetry()) { if (allocation.IsRetry()) {
if (!new_space->AddFreshPage()) { if (!heap_->new_space()->AddFreshPage()) {
// Shouldn't happen. We are sweeping linearly, and to-space // Shouldn't happen. We are sweeping linearly, and to-space
// has the same number of pages as from-space, so there is // has the same number of pages as from-space, so there is
// always room unless we are in an OOM situation. // always room unless we are in an OOM situation.
FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
} }
allocation = new_space->AllocateRaw(size, alignment); allocation = heap_->new_space()->AllocateRaw(size, alignment);
DCHECK(!allocation.IsRetry()); DCHECK(!allocation.IsRetry());
} }
Object* target = allocation.ToObjectChecked(); Object* target = allocation.ToObjectChecked();
MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr); heap_->mark_compact_collector()->MigrateObject(
HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
if (V8_UNLIKELY(target->IsJSArrayBuffer())) { if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
} }
heap()->IncrementSemiSpaceCopiedObjectSize(size); heap_->IncrementSemiSpaceCopiedObjectSize(size);
return true;
} }
*cells = 0;
private:
Heap* heap_;
};
class MarkCompactCollector::EvacuateOldSpaceVisitor
: public MarkCompactCollector::HeapObjectVisitor {
public:
EvacuateOldSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer)
: heap_(heap),
compaction_spaces_(compaction_spaces),
evacuation_slots_buffer_(evacuation_slots_buffer) {}
virtual bool Visit(HeapObject* object) {
int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
HeapObject* target_object = nullptr;
AllocationSpace id =
Page::FromAddress(object->address())->owner()->identity();
AllocationResult allocation =
compaction_spaces_->Get(id)->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
return false;
} }
return survivors_size; heap_->mark_compact_collector()->MigrateObject(
target_object, object, size, id, evacuation_slots_buffer_);
DCHECK(object->map_word().IsForwardingAddress());
return true;
}
private:
Heap* heap_;
CompactionSpaceCollection* compaction_spaces_;
SlotsBuffer** evacuation_slots_buffer_;
};
bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page,
HeapObjectVisitor* visitor,
IterationMode mode) {
Address offsets[16];
for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
Address cell_base = it.CurrentCellBase();
MarkBit::CellType* cell = it.CurrentCell();
if (*cell == 0) continue;
int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets);
for (int i = 0; i < live_objects; i++) {
HeapObject* object = HeapObject::FromAddress(offsets[i]);
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
if (!visitor->Visit(object)) {
if ((mode == kClearMarkbits) && (i > 0)) {
page->markbits()->ClearRange(
page->AddressToMarkbitIndex(page->area_start()),
page->AddressToMarkbitIndex(offsets[i]));
}
return false;
}
}
if (mode == kClearMarkbits) {
*cell = 0;
}
}
return true;
} }
...@@ -3133,9 +3188,13 @@ void MarkCompactCollector::EvacuateNewSpace() { ...@@ -3133,9 +3188,13 @@ void MarkCompactCollector::EvacuateNewSpace() {
// new entries in the store buffer and may cause some pages to be marked // new entries in the store buffer and may cause some pages to be marked
// scan-on-scavenge. // scan-on-scavenge.
NewSpacePageIterator it(from_bottom, from_top); NewSpacePageIterator it(from_bottom, from_top);
EvacuateNewSpaceVisitor new_space_visitor(heap());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* p = it.next(); NewSpacePage* p = it.next();
survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); survivors_size += p->LiveBytes();
bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits);
USE(ok);
DCHECK(ok);
} }
heap_->IncrementYoungSurvivorsCounter(survivors_size); heap_->IncrementYoungSurvivorsCounter(survivors_size);
...@@ -3150,50 +3209,6 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( ...@@ -3150,50 +3209,6 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
} }
bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
AlwaysAllocateScope always_allocate(isolate());
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
Address starts[16];
for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
Address cell_base = it.CurrentCellBase();
MarkBit::CellType* cell = it.CurrentCell();
if (*cell == 0) continue;
int live_objects = MarkWordToObjectStarts(*cell, cell_base, starts);
for (int i = 0; i < live_objects; i++) {
HeapObject* object = HeapObject::FromAddress(starts[i]);
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
HeapObject* target_object = nullptr;
AllocationResult allocation = target_space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
// We need to abort compaction for this page. Make sure that we reset
// the mark bits for objects that have already been migrated.
if (i > 0) {
p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()),
p->AddressToMarkbitIndex(starts[i]));
}
return false;
}
MigrateObject(target_object, object, size, target_space->identity(),
evacuation_slots_buffer);
DCHECK(object->map_word().IsForwardingAddress());
}
// Clear marking bits for current cell.
*cell = 0;
}
p->ResetLiveBytes();
return true;
}
int MarkCompactCollector::NumberOfParallelCompactionTasks() { int MarkCompactCollector::NumberOfParallelCompactionTasks() {
if (!FLAG_parallel_compaction) return 1; if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the // Compute the number of needed tasks based on a target compaction time, the
...@@ -3363,6 +3378,8 @@ void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids, ...@@ -3363,6 +3378,8 @@ void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
void MarkCompactCollector::EvacuatePages( void MarkCompactCollector::EvacuatePages(
CompactionSpaceCollection* compaction_spaces, CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer) { SlotsBuffer** evacuation_slots_buffer) {
EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
evacuation_slots_buffer);
for (int i = 0; i < evacuation_candidates_.length(); i++) { for (int i = 0; i < evacuation_candidates_.length(); i++) {
Page* p = evacuation_candidates_[i]; Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() || DCHECK(p->IsEvacuationCandidate() ||
...@@ -3376,9 +3393,9 @@ void MarkCompactCollector::EvacuatePages( ...@@ -3376,9 +3393,9 @@ void MarkCompactCollector::EvacuatePages(
MemoryChunk::kCompactingInProgress); MemoryChunk::kCompactingInProgress);
double start = heap()->MonotonicallyIncreasingTimeInMs(); double start = heap()->MonotonicallyIncreasingTimeInMs();
intptr_t live_bytes = p->LiveBytes(); intptr_t live_bytes = p->LiveBytes();
if (EvacuateLiveObjectsFromPage( AlwaysAllocateScope always_allocate(isolate());
p, compaction_spaces->Get(p->owner()->identity()), if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) {
evacuation_slots_buffer)) { p->ResetLiveBytes();
p->parallel_compaction_state().SetValue( p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingFinalize); MemoryChunk::kCompactingFinalize);
compaction_spaces->ReportCompactionProgress( compaction_spaces->ReportCompactionProgress(
......
...@@ -314,6 +314,15 @@ class ThreadLocalTop; ...@@ -314,6 +314,15 @@ class ThreadLocalTop;
// Mark-Compact collector // Mark-Compact collector
class MarkCompactCollector { class MarkCompactCollector {
public: public:
enum IterationMode {
kKeepMarking,
kClearMarkbits,
};
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
class HeapObjectVisitor;
static void Initialize(); static void Initialize();
void SetUp(); void SetUp();
...@@ -695,14 +704,13 @@ class MarkCompactCollector { ...@@ -695,14 +704,13 @@ class MarkCompactCollector {
// regions to each space's free list. // regions to each space's free list.
void SweepSpaces(); void SweepSpaces();
int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space, // Iterates through all live objects on a page using marking information.
NewSpacePage* p); // Returns whether all objects have successfully been visited.
bool IterateLiveObjectsOnPage(MemoryChunk* page, HeapObjectVisitor* visitor,
IterationMode mode);
void EvacuateNewSpace(); void EvacuateNewSpace();
bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space,
SlotsBuffer** evacuation_slots_buffer);
void AddEvacuationSlotsBufferSynchronized( void AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer); SlotsBuffer* evacuation_slots_buffer);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment