Commit 2263ee9b authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of [heap] Add page evacuation mode for new->new (patchset #18 id:440001...

Revert of [heap] Add page evacuation mode for new->new (patchset #18 id:440001 of https://codereview.chromium.org/1957323003/ )

Reason for revert:
Fragmentation of LABs could result in increasing memory usage (pages) instead of shrinking.

BUG=chromium:620320
LOG=N

Original issue's description:
> [heap] Add page evacuation mode for new->new
>
> Adds an evacuation mode that allows moving pages within new space without
> copying objects.
>
> Basic idea:
> a) Move page within new space
> b) Sweep page to make iterable and process ArrayBuffers
> c) Finish sweep till next scavenge
>
> Threshold is currently 70% live bytes, i.e., the same threshold we use
> to determine fragmented pages.
>
> BUG=chromium:581412
> LOG=N
> CQ_EXTRA_TRYBOTS=tryserver.v8:v8_linux_arm64_gc_stress_dbg,v8_linux_gc_stress_dbg,v8_mac_gc_stress_dbg,v8_linux64_tsan_rel,v8_mac64_asan_rel
>
> Committed: https://crrev.com/49b23201671b25092a3c22eb85783f39b95a5f87
> Cr-Commit-Position: refs/heads/master@{#36990}

TBR=ulan@chromium.org
# Not skipping CQ checks because original CL landed more than 1 days ago.
BUG=chromium:581412

Review-Url: https://codereview.chromium.org/2063013005
Cr-Commit-Position: refs/heads/master@{#37042}
parent b60da28c
......@@ -1611,8 +1611,6 @@ void Heap::Scavenge() {
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
......
......@@ -471,13 +471,13 @@ class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
private:
// v8::Task overrides.
void Run() override {
DCHECK_GE(space_to_start_, FIRST_SPACE);
DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
const int offset = space_to_start_ - FIRST_SPACE;
const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
const int offset = space_to_start_ - FIRST_PAGED_SPACE;
const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
for (int i = 0; i < num_spaces; i++) {
const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
DCHECK_GE(space_id, FIRST_SPACE);
const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
DCHECK_GE(space_id, FIRST_PAGED_SPACE);
DCHECK_LE(space_id, LAST_PAGED_SPACE);
sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
}
......@@ -515,9 +515,9 @@ void MarkCompactCollector::Sweeper::StartSweepingHelper(
void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
Page* page) {
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
if (!page->SweepingDone()) {
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
ParallelSweepPage(page, owner->identity());
ParallelSweepPage(page, owner);
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
......@@ -560,27 +560,12 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
}
}
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) {
swept_list_[NEW_SPACE].Clear();
}
DCHECK(sweeping_list_[space].empty());
});
ForAllSweepingSpaces(
[this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); });
late_pages_ = false;
sweeping_in_progress_ = false;
}
void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
NewSpacePageIterator pit(heap_->new_space());
while (pit.has_next()) {
Page* page = pit.next();
SweepOrWaitUntilSweepingCompleted(page);
}
}
}
void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper().sweeping_in_progress()) return;
......@@ -1894,17 +1879,13 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final
: public MarkCompactCollector::HeapObjectVisitor {
public:
explicit EvacuateNewSpacePageVisitor(Heap* heap)
: heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
static void MoveToOldSpace(Page* page, PagedSpace* owner) {
page->Unlink();
Page* new_page = Page::ConvertNewToOld(page, owner);
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
}
: heap_(heap), promoted_size_(0) {}
static void MoveToToSpace(Page* page) {
page->heap()->new_space()->MovePageFromSpaceToSpace(page);
page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
static void TryMoveToOldSpace(Page* page, PagedSpace* owner) {
if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) {
Page* new_page = Page::ConvertNewToOld(page, owner);
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
}
}
inline bool Visit(HeapObject* object) {
......@@ -1915,16 +1896,10 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final
}
intptr_t promoted_size() { return promoted_size_; }
intptr_t semispace_copied_size() { return semispace_copied_size_; }
void account_semispace_copied(intptr_t copied) {
semispace_copied_size_ += copied;
}
private:
Heap* heap_;
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
};
class MarkCompactCollector::EvacuateOldSpaceVisitor final
......@@ -3064,26 +3039,12 @@ void MarkCompactCollector::EvacuateNewSpacePrologue() {
new_space->ResetAllocationInfo();
}
void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
newspace_evacuation_candidates_.Rewind(0);
}
class MarkCompactCollector::Evacuator : public Malloced {
public:
enum EvacuationMode {
kObjectsNewToOld,
kPageNewToOld,
kObjectsOldToOld,
kPageNewToNew,
};
static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
// Note: The order of checks is important in this function.
if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
return kPageNewToOld;
if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
return kPageNewToNew;
if (chunk->InNewSpace()) return kObjectsNewToOld;
DCHECK(chunk->IsEvacuationCandidate());
return kObjectsOldToOld;
}
// NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation.
static int PageEvacuationThreshold() {
......@@ -3113,15 +3074,33 @@ class MarkCompactCollector::Evacuator : public Malloced {
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
private:
enum EvacuationMode {
kObjectsNewToOld,
kPageNewToOld,
kObjectsOldToOld,
};
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
inline Heap* heap() { return collector_->heap(); }
inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
// Note: The order of checks is important in this function.
if (chunk->InNewSpace()) return kObjectsNewToOld;
if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
return kPageNewToOld;
DCHECK(chunk->IsEvacuationCandidate());
return kObjectsOldToOld;
}
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
duration_ += duration;
bytes_compacted_ += bytes_compacted;
}
template <IterationMode mode, class Visitor>
inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
MarkCompactCollector* collector_;
// Locally cached collector data.
......@@ -3138,78 +3117,87 @@ class MarkCompactCollector::Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
template <MarkCompactCollector::IterationMode mode, class Visitor>
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
Visitor* visitor) {
bool success = false;
DCHECK(page->SweepingDone());
int saved_live_bytes = page->LiveBytes();
double evacuation_time = 0.0;
Heap* heap = page->heap();
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
int saved_live_bytes = p->LiveBytes();
double evacuation_time;
{
AlwaysAllocateScope always_allocate(heap->isolate());
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld:
success = collector_->VisitLiveObjects(page, &new_space_visitor_,
kClearMarkbits);
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
DCHECK(success);
break;
case kPageNewToOld:
success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
kKeepMarking);
// ArrayBufferTracker will be updated during sweeping.
DCHECK(success);
break;
case kPageNewToNew:
new_space_page_visitor.account_semispace_copied(page->LiveBytes());
// ArrayBufferTracker will be updated during sweeping.
success = true;
break;
case kObjectsOldToOld:
success = collector_->VisitLiveObjects(page, &old_space_visitor_,
kClearMarkbits);
if (!success) {
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
// Note: We mark the page as aborted here to be able to record slots
// for code objects in |RecordMigratedSlotVisitor|.
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
success =
collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
DCHECK(success);
// We need to return failure here to indicate that we want this page
// added to the sweeper.
success = false;
} else {
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
}
break;
default:
UNREACHABLE();
}
success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
PrintIsolate(heap->isolate(),
"evacuation[%p]: page=%p new_space=%d "
"page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%d time=%f\n",
static_cast<void*>(this), static_cast<void*>(page),
page->InNewSpace(),
page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
page->Contains(heap->new_space()->age_mark()),
saved_live_bytes, evacuation_time);
const char age_mark_tag =
!p->InNewSpace()
? 'x'
: !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
? '>'
: !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
: '#';
PrintIsolate(heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
"page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
static_cast<void*>(this), static_cast<void*>(p),
p->InNewSpace(), age_mark_tag,
p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
evacuation_time);
}
if (success) {
ReportCompactionProgress(evacuation_time, saved_live_bytes);
}
return success;
}
bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
bool result = false;
DCHECK(page->SweepingDone());
switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld:
result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
DCHECK(result);
USE(result);
break;
case kPageNewToOld:
result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
// ArrayBufferTracker will be updated during sweeping.
DCHECK(result);
USE(result);
break;
case kObjectsOldToOld:
result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
if (!result) {
// Aborted compaction page. We have to record slots here, since we might
// not have recorded them in first place.
// Note: We mark the page as aborted here to be able to record slots
// for code objects in |RecordMigratedSlotVisitor|.
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
DCHECK(result);
USE(result);
// We need to return failure here to indicate that we want this page
// added to the sweeper.
return false;
}
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
break;
default:
UNREACHABLE();
}
return result;
}
void MarkCompactCollector::Evacuator::Finalize() {
heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
......@@ -3218,13 +3206,11 @@ void MarkCompactCollector::Evacuator::Finalize() {
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
new_space_page_visitor.promoted_size());
heap()->IncrementSemiSpaceCopiedObjectSize(
new_space_visitor_.semispace_copied_size() +
new_space_page_visitor.semispace_copied_size());
new_space_visitor_.semispace_copied_size());
heap()->IncrementYoungSurvivorsCounter(
new_space_visitor_.promoted_size() +
new_space_visitor_.semispace_copied_size() +
new_space_page_visitor.promoted_size() +
new_space_page_visitor.semispace_copied_size());
new_space_page_visitor.promoted_size());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
}
......@@ -3272,33 +3258,30 @@ class EvacuationJobTraits {
static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
bool success, PerPageData data) {
using Evacuator = MarkCompactCollector::Evacuator;
Page* p = static_cast<Page*>(chunk);
switch (Evacuator::ComputeEvacuationMode(p)) {
case Evacuator::kPageNewToOld:
break;
case Evacuator::kPageNewToNew:
DCHECK(success);
break;
case Evacuator::kObjectsNewToOld:
DCHECK(success);
break;
case Evacuator::kObjectsOldToOld:
if (success) {
DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone());
p->Unlink();
} else {
// We have partially compacted the page, i.e., some objects may have
// moved, others are still in place.
p->ClearEvacuationCandidate();
// Slots have already been recorded so we just need to add it to the
// sweeper, which will happen after updating pointers.
*data += 1;
}
break;
default:
UNREACHABLE();
if (chunk->InNewSpace()) {
DCHECK(success);
} else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
DCHECK(success);
Page* p = static_cast<Page*>(chunk);
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
heap->mark_compact_collector()->sweeper().AddLatePage(
p->owner()->identity(), p);
} else {
Page* p = static_cast<Page*>(chunk);
if (success) {
DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone());
p->Unlink();
} else {
// We have partially compacted the page, i.e., some objects may have
// moved, others are still in place.
p->ClearEvacuationCandidate();
// Slots have already been recorded so we just need to add it to the
// sweeper.
*data += 1;
}
}
}
};
......@@ -3320,14 +3303,10 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
live_bytes += page->LiveBytes();
if (!page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
!page->Contains(age_mark)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
} else {
EvacuateNewSpacePageVisitor::MoveToToSpace(page);
}
EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space());
}
job.AddPage(page, &abandoned_pages);
}
DCHECK_GE(job.NumberOfPages(), 1);
......@@ -3381,14 +3360,13 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode,
MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
ObjectVisitor* v) {
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) ||
(skip_list_mode == REBUILD_SKIP_LIST));
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
space->identity() == CODE_SPACE);
DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
......@@ -3421,13 +3399,8 @@ int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, size,
ClearRecordedSlots::kNo);
}
freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
Map* map = object->synchronized_map();
int size = object->SizeFromMap(map);
......@@ -3454,16 +3427,10 @@ int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, size,
ClearRecordedSlots::kNo);
}
freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
}
......@@ -3579,15 +3546,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuateNewSpacePrologue();
EvacuatePagesInParallel();
EvacuateNewSpaceEpilogue();
heap()->new_space()->set_age_mark(heap()->new_space()->top());
}
UpdatePointersAfterEvacuation();
if (!heap()->new_space()->Rebalance()) {
FatalProcessOutOfMemory("NewSpace::Rebalance");
}
// Give pages that are queued to be freed back to the OS. Note that filtering
// slots only handles old space (for unboxed doubles), and thus map space can
// still contain stale pointers. We only free the chunks after pointer updates
......@@ -3597,19 +3561,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
for (Page* p : newspace_evacuation_candidates_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
sweeper().AddLatePage(p->owner()->identity(), p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
sweeper().AddLatePage(p->owner()->identity(), p);
}
}
newspace_evacuation_candidates_.Rewind(0);
for (Page* p : evacuation_candidates_) {
// Important: skip list should be cleared only after roots were updated
// because root iteration traverses the stack and might have to find
......@@ -3704,15 +3655,6 @@ class PointerUpdateJobTraits {
if (heap->InToSpace(*slot)) {
return KEEP_SLOT;
}
} else if (heap->InToSpace(*slot)) {
DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address())
->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
// Slots can be in "to" space after a page has been moved. Since there is
// no forwarding information present we need to check the markbits to
// determine liveness.
if (Marking::IsBlack(
Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
return KEEP_SLOT;
} else {
DCHECK(!heap->InNewSpace(*slot));
}
......@@ -3745,24 +3687,6 @@ class ToSpacePointerUpdateJobTraits {
static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk, PerPageData limits) {
if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// New->new promoted pages contain garbage so they require iteration
// using markbits.
ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
} else {
ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
}
return true;
}
static const bool NeedSequentialFinalization = false;
static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
}
private:
static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
PerPageData limits) {
for (Address cur = limits.first; cur < limits.second;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
......@@ -3770,18 +3694,10 @@ class ToSpacePointerUpdateJobTraits {
object->IterateBody(map->instance_type(), size, visitor);
cur += size;
}
return true;
}
static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
PerPageData limits) {
LiveObjectIterator<kBlackObjects> it(chunk);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
Map* map = object->map();
int size = object->SizeFromMap(map);
object->IterateBody(map->instance_type(), size, visitor);
}
static const bool NeedSequentialFinalization = false;
static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
}
};
......@@ -3857,7 +3773,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
int freed = ParallelSweepPage(page, identity);
int freed = ParallelSweepPage(page, heap_->paged_space(identity));
pages_freed += 1;
DCHECK_GE(freed, 0);
max_freed = Max(max_freed, freed);
......@@ -3869,7 +3785,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
}
int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
AllocationSpace identity) {
PagedSpace* space) {
int max_freed = 0;
if (page->mutex()->TryLock()) {
// If this page was already swept in the meantime, we can return here.
......@@ -3878,25 +3794,19 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
return 0;
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
if (identity == NEW_SPACE) {
RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr);
} else if (identity == OLD_SPACE) {
if (space->identity() == OLD_SPACE) {
max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
heap_->paged_space(identity), page, nullptr);
} else if (identity == CODE_SPACE) {
IGNORE_FREE_SPACE>(space, page, NULL);
} else if (space->identity() == CODE_SPACE) {
max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
heap_->paged_space(identity), page, nullptr);
IGNORE_FREE_SPACE>(space, page, NULL);
} else {
max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
heap_->paged_space(identity), page, nullptr);
IGNORE_FREE_SPACE>(space, page, NULL);
}
{
base::LockGuard<base::Mutex> guard(&mutex_);
swept_list_[identity].Add(page);
swept_list_[space->identity()].Add(page);
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
page->mutex()->Unlock();
......@@ -3922,8 +3832,7 @@ void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
Page* page) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
int to_sweep = page->area_size() - page->LiveBytes();
if (space != NEW_SPACE)
heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
}
Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
......@@ -3987,8 +3896,8 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST,
Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(
space, p, nullptr);
continue;
}
......
......@@ -408,7 +408,6 @@ class MarkCompactCollector {
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
......@@ -417,7 +416,6 @@ class MarkCompactCollector {
template <SweepingMode sweeping_mode, SweepingParallelism parallelism,
SkipListRebuildingMode skip_list_mode,
FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode>
static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v);
......@@ -436,12 +434,11 @@ class MarkCompactCollector {
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
int ParallelSweepPage(Page* page, PagedSpace* space);
void StartSweeping();
void StartSweepingHelper(AllocationSpace space_to_start);
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool IsSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
......@@ -794,6 +791,7 @@ class MarkCompactCollector {
void SweepSpaces();
void EvacuateNewSpacePrologue();
void EvacuateNewSpaceEpilogue();
void EvacuatePagesInParallel();
......
......@@ -1386,6 +1386,7 @@ void NewSpace::TearDown() {
from_space_.TearDown();
}
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
......@@ -1431,48 +1432,6 @@ void NewSpace::Shrink() {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
bool NewSpace::Rebalance() {
CHECK(heap()->promotion_queue()->is_empty());
// Order here is important to make use of the page pool.
return to_space_.EnsureCurrentCapacity() &&
from_space_.EnsureCurrentCapacity();
}
bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) {
const int expected_pages = current_capacity_ / Page::kPageSize;
int actual_pages = 0;
Page* current_page = anchor()->next_page();
while (current_page != anchor()) {
actual_pages++;
current_page = current_page->next_page();
if (actual_pages > expected_pages) {
Page* to_remove = current_page->prev_page();
// Make sure we don't overtake the actual top pointer.
DCHECK_NE(to_remove, current_page_);
to_remove->Unlink();
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
to_remove);
}
}
while (actual_pages < expected_pages) {
actual_pages++;
current_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
Page::kAllocatableMemory, this, executable());
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
current_page->InsertAfter(anchor());
Bitmap::Clear(current_page);
current_page->SetFlags(anchor()->prev_page()->GetFlags(),
Page::kCopyAllFlags);
heap()->CreateFillerObjectAt(current_page->area_start(),
current_page->area_size(),
ClearRecordedSlots::kNo);
}
}
return true;
}
void LocalAllocationBuffer::Close() {
if (IsValid()) {
......@@ -1915,17 +1874,21 @@ void SemiSpace::Reset() {
current_page_ = anchor_.next_page();
}
void SemiSpace::RemovePage(Page* page) {
if (current_page_ == page) {
current_page_ = page->prev_page();
}
page->Unlink();
}
void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
page->set_owner(this);
page->InsertAfter(anchor());
bool SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
// TODO(mlippautz): We do not have to get a new page here when the semispace
// is uncommitted later on.
Page* new_page = heap()->memory_allocator()->AllocatePage(
Page::kAllocatableMemory, this, executable());
if (new_page == nullptr) return false;
Bitmap::Clear(new_page);
new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
new_page->set_next_page(old_page->next_page());
new_page->set_prev_page(old_page->prev_page());
old_page->next_page()->set_prev_page(new_page);
old_page->prev_page()->set_next_page(new_page);
heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
ClearRecordedSlots::kNo);
return true;
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
......
......@@ -425,10 +425,6 @@ class MemoryChunk {
// from new to old space during evacuation.
PAGE_NEW_OLD_PROMOTION,
// |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
// within the new space during evacuation.
PAGE_NEW_NEW_PROMOTION,
// A black page has all mark bits set to 1 (black). A black page currently
// cannot be iterated because it is not swept. Moreover live bytes are also
// not updated.
......@@ -2435,8 +2431,6 @@ class SemiSpace : public Space {
// than the current capacity.
bool ShrinkTo(int new_capacity);
bool EnsureCurrentCapacity();
// Returns the start address of the first page of the space.
Address space_start() {
DCHECK_NE(anchor_.next_page(), anchor());
......@@ -2465,8 +2459,7 @@ class SemiSpace : public Space {
// Resets the space to using the first page.
void Reset();
void RemovePage(Page* page);
void PrependPage(Page* page);
bool ReplaceWithEmptyPage(Page* page);
// Age mark accessors.
Address age_mark() { return age_mark_; }
......@@ -2542,9 +2535,8 @@ class SemiSpace : public Space {
Page anchor_;
Page* current_page_;
friend class NewSpace;
friend class NewSpacePageIterator;
friend class SemiSpaceIterator;
friend class NewSpacePageIterator;
};
......@@ -2723,15 +2715,12 @@ class NewSpace : public Space {
return static_cast<size_t>(allocated);
}
void MovePageFromSpaceToSpace(Page* page) {
bool ReplaceWithEmptyPage(Page* page) {
// This method is called after flipping the semispace.
DCHECK(page->InFromSpace());
from_space_.RemovePage(page);
to_space_.PrependPage(page);
pages_used_++;
return from_space_.ReplaceWithEmptyPage(page);
}
bool Rebalance();
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
......
......@@ -112,7 +112,6 @@
'heap/test-incremental-marking.cc',
'heap/test-lab.cc',
'heap/test-mark-compact.cc',
'heap/test-page-promotion.cc',
'heap/test-spaces.cc',
'libsampler/test-sampler.cc',
'print-extension.cc',
......
......@@ -6561,6 +6561,56 @@ HEAP_TEST(Regress589413) {
heap->CollectGarbage(OLD_SPACE);
}
UNINITIALIZED_TEST(PagePromotion) {
FLAG_page_promotion = true;
FLAG_page_promotion_threshold = 0; // %
i::FLAG_min_semi_space_size = 8 * (Page::kPageSize / MB);
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
// Clean up any left over objects from cctest initialization.
heap->CollectAllGarbage();
heap->CollectAllGarbage();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
Page* first_page = Page::FromAddress(first_object->address());
// The age mark should not be on the first page.
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(first_page->address()));
CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
heap->CollectGarbage(OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
CHECK(heap->old_space()->ContainsSlow(first_page->address()));
}
}
TEST(Regress598319) {
// This test ensures that no white objects can cross the progress bar of large
// objects during incremental marking. It checks this by using Shift() during
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/array-buffer-tracker.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
namespace {
v8::Isolate* NewIsolateForPagePromotion() {
i::FLAG_page_promotion = true;
i::FLAG_page_promotion_threshold = 0; // %
i::FLAG_min_semi_space_size = 8 * (i::Page::kPageSize / i::MB);
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
return isolate;
}
} // namespace
namespace v8 {
namespace internal {
UNINITIALIZED_TEST(PagePromotion_NewToOld) {
v8::Isolate* isolate = NewIsolateForPagePromotion();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
Page* first_page = Page::FromAddress(first_object->address());
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(first_page->address()));
CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
CHECK(heap->old_space()->ContainsSlow(first_page->address()));
}
}
UNINITIALIZED_TEST(PagePromotion_NewToNew) {
v8::Isolate* isolate = NewIsolateForPagePromotion();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
CHECK_GT(handles.size(), 0u);
// Last object in handles should definitely be on the last page which does
// not contain the age mark.
Handle<FixedArray> last_object = handles.back();
Page* to_be_promoted_page = Page::FromAddress(last_object->address());
CHECK(to_be_promoted_page->Contains(last_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
}
}
UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
// Test makes sure JSArrayBuffer backing stores are still tracked after
// new-to-new promotion.
v8::Isolate* isolate = NewIsolateForPagePromotion();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
// Fill the current page which potentially contains the age mark.
heap::FillCurrentPage(heap->new_space());
// Allocate a buffer we would like to check against.
Handle<JSArrayBuffer> buffer =
i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100);
std::vector<Handle<FixedArray>> handles;
// Simulate a full space, filling the interesting page with live objects.
heap::SimulateFullSpace(heap->new_space(), &handles);
CHECK_GT(handles.size(), 0u);
// Last object in handles should definitely be on the last page which does
// not contain the age mark.
Handle<FixedArray> first_object = handles.front();
Page* to_be_promoted_page = Page::FromAddress(first_object->address());
CHECK(to_be_promoted_page->Contains(first_object->address()));
CHECK(to_be_promoted_page->Contains(buffer->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address()));
CHECK(to_be_promoted_page->Contains(first_object->address()));
CHECK(to_be_promoted_page->Contains(buffer->address()));
CHECK(ArrayBufferTracker::IsTracked(*buffer));
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment