Commit 120b640d authored by hpayer's avatar hpayer Committed by Commit bot

Reland of [heap] Refactor evacuation for young and old gen into visitors....

Reland of [heap] Refactor evacuation for young and old gen into visitors. (patchset #1 id:1 of https://codereview.chromium.org/1483393002/ )

Reason for revert:
Reland after fixing the potential root cause of the canary crasher.

Original issue's description:
> Revert of [heap] Refactor evacuation for young and old gen into visitors. (patchset #5 id:80001 of https://codereview.chromium.org/1470253002/ )
>
> Reason for revert:
> Still investigating bad canary.
>
> Original issue's description:
> > [heap] Refactor evacuation for young and old gen into visitors.
> >
> > Create a visitor for evacuating objects for young and old generation. This is
> > the first step of preparing a task to process, both,  newspace and oldspace
> > pages in parallel.
> >
> > BUG=chromium:524425
> > LOG=N
> >
> > Committed: https://crrev.com/138d9bae5d7014e0d205634a49b5eac3697744c8
> > Cr-Commit-Position: refs/heads/master@{#32349}
>
> TBR=mlippautz@chromium.org
> NOPRESUBMIT=true
> NOTREECHECKS=true
> NOTRY=true
> BUG=chromium:524425
>
> Committed: https://crrev.com/aa24a3135ec308e1f84bce334844caf0cae2437a
> Cr-Commit-Position: refs/heads/master@{#32462}

TBR=mlippautz@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:524425

Review URL: https://codereview.chromium.org/1493523003

Cr-Commit-Position: refs/heads/master@{#32500}
parent 60d77c8a
......@@ -236,6 +236,11 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
free_list_code_space_.Reset(new FreeList(heap_->code_space()));
free_list_map_space_.Reset(new FreeList(heap_->map_space()));
......@@ -1521,69 +1526,119 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
}
int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
NewSpace* new_space, NewSpacePage* p) {
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
class MarkCompactCollector::HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() {}
virtual bool Visit(HeapObject* object) = 0;
};
MarkBit::CellType* cells = p->markbits()->cells();
int survivors_size = 0;
for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
Address cell_base = it.CurrentCellBase();
MarkBit::CellType* cell = it.CurrentCell();
class MarkCompactCollector::EvacuateNewSpaceVisitor
: public MarkCompactCollector::HeapObjectVisitor {
public:
explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {}
MarkBit::CellType current_cell = *cell;
if (current_cell == 0) continue;
virtual bool Visit(HeapObject* object) {
Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
int size = object->Size();
int offset = 0;
while (current_cell != 0) {
int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
current_cell >>= trailing_zeros;
offset += trailing_zeros;
Address address = cell_base + offset * kPointerSize;
HeapObject* object = HeapObject::FromAddress(address);
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
// TODO(hpayer): Refactor EvacuateObject and call this function instead.
if (heap_->ShouldBePromoted(object->address(), size) &&
heap_->mark_compact_collector()->TryPromoteObject(object, size)) {
return true;
}
int size = object->Size();
survivors_size += size;
AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation =
heap_->new_space()->AllocateRaw(size, alignment);
if (allocation.IsRetry()) {
if (!heap_->new_space()->AddFreshPage()) {
// Shouldn't happen. We are sweeping linearly, and to-space
// has the same number of pages as from-space, so there is
// always room unless we are in an OOM situation.
FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
}
allocation = heap_->new_space()->AllocateRaw(size, alignment);
DCHECK(!allocation.IsRetry());
}
Object* target = allocation.ToObjectChecked();
heap_->mark_compact_collector()->MigrateObject(
HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
}
heap_->IncrementSemiSpaceCopiedObjectSize(size);
return true;
}
Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
private:
Heap* heap_;
};
offset += 2;
current_cell >>= 2;
// TODO(hpayer): Refactor EvacuateObject and call this function instead.
if (heap()->ShouldBePromoted(object->address(), size) &&
TryPromoteObject(object, size)) {
continue;
}
class MarkCompactCollector::EvacuateOldSpaceVisitor
: public MarkCompactCollector::HeapObjectVisitor {
public:
EvacuateOldSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer)
: heap_(heap),
compaction_spaces_(compaction_spaces),
evacuation_slots_buffer_(evacuation_slots_buffer) {}
AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation = new_space->AllocateRaw(size, alignment);
if (allocation.IsRetry()) {
if (!new_space->AddFreshPage()) {
// Shouldn't happen. We are sweeping linearly, and to-space
// has the same number of pages as from-space, so there is
// always room unless we are in an OOM situation.
FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
}
allocation = new_space->AllocateRaw(size, alignment);
DCHECK(!allocation.IsRetry());
}
Object* target = allocation.ToObjectChecked();
virtual bool Visit(HeapObject* object) {
int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
HeapObject* target_object = nullptr;
AllocationSpace id =
Page::FromAddress(object->address())->owner()->identity();
AllocationResult allocation =
compaction_spaces_->Get(id)->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
return false;
}
heap_->mark_compact_collector()->MigrateObject(
target_object, object, size, id, evacuation_slots_buffer_);
DCHECK(object->map_word().IsForwardingAddress());
return true;
}
private:
Heap* heap_;
CompactionSpaceCollection* compaction_spaces_;
SlotsBuffer** evacuation_slots_buffer_;
};
bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page,
HeapObjectVisitor* visitor,
IterationMode mode) {
Address offsets[16];
for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
Address cell_base = it.CurrentCellBase();
MarkBit::CellType* cell = it.CurrentCell();
if (*cell == 0) continue;
MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets);
for (int i = 0; i < live_objects; i++) {
HeapObject* object = HeapObject::FromAddress(offsets[i]);
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
if (!visitor->Visit(object)) {
if ((mode == kClearMarkbits) && (i > 0)) {
page->markbits()->ClearRange(
page->AddressToMarkbitIndex(page->area_start()),
page->AddressToMarkbitIndex(offsets[i]));
}
return false;
}
heap()->IncrementSemiSpaceCopiedObjectSize(size);
}
*cells = 0;
if (mode == kClearMarkbits) {
*cell = 0;
}
}
return survivors_size;
return true;
}
......@@ -3105,9 +3160,13 @@ void MarkCompactCollector::EvacuateNewSpace() {
// new entries in the store buffer and may cause some pages to be marked
// scan-on-scavenge.
NewSpacePageIterator it(from_bottom, from_top);
EvacuateNewSpaceVisitor new_space_visitor(heap());
while (it.has_next()) {
NewSpacePage* p = it.next();
survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
survivors_size += p->LiveBytes();
bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits);
USE(ok);
DCHECK(ok);
}
heap_->IncrementYoungSurvivorsCounter(survivors_size);
......@@ -3122,50 +3181,6 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
}
bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
AlwaysAllocateScope always_allocate(isolate());
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
Address starts[16];
for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
Address cell_base = it.CurrentCellBase();
MarkBit::CellType* cell = it.CurrentCell();
if (*cell == 0) continue;
int live_objects = MarkWordToObjectStarts(*cell, cell_base, starts);
for (int i = 0; i < live_objects; i++) {
HeapObject* object = HeapObject::FromAddress(starts[i]);
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
HeapObject* target_object = nullptr;
AllocationResult allocation = target_space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
// We need to abort compaction for this page. Make sure that we reset
// the mark bits for objects that have already been migrated.
if (i > 0) {
p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()),
p->AddressToMarkbitIndex(starts[i]));
}
return false;
}
MigrateObject(target_object, object, size, target_space->identity(),
evacuation_slots_buffer);
DCHECK(object->map_word().IsForwardingAddress());
}
// Clear marking bits for current cell.
*cell = 0;
}
p->ResetLiveBytes();
return true;
}
int MarkCompactCollector::NumberOfParallelCompactionTasks() {
if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the
......@@ -3335,6 +3350,8 @@ void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
void MarkCompactCollector::EvacuatePages(
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer) {
EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
evacuation_slots_buffer);
for (int i = 0; i < evacuation_candidates_.length(); i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
......@@ -3348,9 +3365,8 @@ void MarkCompactCollector::EvacuatePages(
MemoryChunk::kCompactingInProgress);
double start = heap()->MonotonicallyIncreasingTimeInMs();
intptr_t live_bytes = p->LiveBytes();
if (EvacuateLiveObjectsFromPage(
p, compaction_spaces->Get(p->owner()->identity()),
evacuation_slots_buffer)) {
if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) {
p->ResetLiveBytes();
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingFinalize);
compaction_spaces->ReportCompactionProgress(
......
......@@ -320,6 +320,15 @@ class ThreadLocalTop;
// Mark-Compact collector
class MarkCompactCollector {
public:
enum IterationMode {
kKeepMarking,
kClearMarkbits,
};
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
class HeapObjectVisitor;
static void Initialize();
void SetUp();
......@@ -698,14 +707,13 @@ class MarkCompactCollector {
// regions to each space's free list.
void SweepSpaces();
int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
NewSpacePage* p);
// Iterates through all live objects on a page using marking information.
// Returns whether all objects have successfully been visited.
bool IterateLiveObjectsOnPage(MemoryChunk* page, HeapObjectVisitor* visitor,
IterationMode mode);
void EvacuateNewSpace();
bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space,
SlotsBuffer** evacuation_slots_buffer);
void AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment