Commit fc5765ce authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Simplify PageParallelJob

Remove FinalizePageSequentially as it had only a single use case that
was tied to the full collector.

Bug: chromium:651354
Change-Id: I03299ddbd439ea273e02dd33f12c005371694130
Reviewed-on: https://chromium-review.googlesource.com/504508Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45280}
parent dd9ac62c
...@@ -3575,18 +3575,17 @@ bool Evacuator::EvacuatePage(Page* page) { ...@@ -3575,18 +3575,17 @@ bool Evacuator::EvacuatePage(Page* page) {
} }
ReportCompactionProgress(evacuation_time, saved_live_bytes); ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) { if (FLAG_trace_evacuation) {
PrintIsolate( PrintIsolate(heap()->isolate(),
heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d " "evacuation[%p]: page=%p new_space=%d "
"page_evacuation=%d executable=%d contains_age_mark=%d " "page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%" V8PRIdPTR " time=%f page_promotion_qualifies=%d\n", "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(), static_cast<void*>(this), static_cast<void*>(page),
page->InNewSpace(),
page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
page->Contains(heap()->new_space()->age_mark()), saved_live_bytes, page->Contains(heap()->new_space()->age_mark()),
evacuation_time, saved_live_bytes, evacuation_time, success);
saved_live_bytes > Evacuator::PageEvacuationThreshold());
} }
return success; return success;
} }
...@@ -3657,7 +3656,8 @@ bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) { ...@@ -3657,7 +3656,8 @@ bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
// Aborted compaction page. We have to record slots here, since we // Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place. // might not have recorded them in first place.
// Note: We mark the page as aborted here to be able to record slots // Note: We mark the page as aborted here to be able to record slots
// for code objects in |RecordMigratedSlotVisitor|. // for code objects in |RecordMigratedSlotVisitor| and to be able
// to identify the page later on for post processing.
page->SetFlag(Page::COMPACTION_WAS_ABORTED); page->SetFlag(Page::COMPACTION_WAS_ABORTED);
EvacuateRecordOnlyVisitor record_visitor(heap()); EvacuateRecordOnlyVisitor record_visitor(heap());
success = object_visitor.VisitBlackObjects( success = object_visitor.VisitBlackObjects(
...@@ -3665,8 +3665,6 @@ bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) { ...@@ -3665,8 +3665,6 @@ bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
ArrayBufferTracker::ProcessBuffers( ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedKeepOthers); page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
DCHECK(success); DCHECK(success);
// We need to return failure here to indicate that we want this page
// added to the sweeper.
success = false; success = false;
} else { } else {
ArrayBufferTracker::ProcessBuffers( ArrayBufferTracker::ProcessBuffers(
...@@ -3739,49 +3737,15 @@ bool YoungGenerationEvacuator::RawEvacuatePage(Page* page, ...@@ -3739,49 +3737,15 @@ bool YoungGenerationEvacuator::RawEvacuatePage(Page* page,
class EvacuationJobTraits { class EvacuationJobTraits {
public: public:
struct PageData { struct PageData {
int* abandoned_pages; // Pointer to number of aborted pages.
MarkingState marking_state; MarkingState marking_state;
}; };
typedef PageData PerPageData; typedef PageData PerPageData;
typedef Evacuator* PerTaskData; typedef Evacuator* PerTaskData;
static const bool NeedSequentialFinalization = true; static void ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
MemoryChunk* chunk, PerPageData) { MemoryChunk* chunk, PerPageData) {
return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
}
static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
bool success, PerPageData data) {
Page* p = static_cast<Page*>(chunk);
switch (Evacuator::ComputeEvacuationMode(p)) {
case Evacuator::kPageNewToOld:
break;
case Evacuator::kPageNewToNew:
DCHECK(success);
break;
case Evacuator::kObjectsNewToOld:
DCHECK(success);
break;
case Evacuator::kObjectsOldToOld:
if (success) {
DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone());
p->Unlink();
} else {
// We have partially compacted the page, i.e., some objects may have
// moved, others are still in place.
p->ClearEvacuationCandidate();
// Slots have already been recorded so we just need to add it to the
// sweeper, which will happen after updating pointers.
*data.abandoned_pages += 1;
}
break;
default:
UNREACHABLE();
}
} }
}; };
...@@ -3789,8 +3753,7 @@ template <class Evacuator, class Collector> ...@@ -3789,8 +3753,7 @@ template <class Evacuator, class Collector>
void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
Collector* collector, PageParallelJob<EvacuationJobTraits>* job, Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
RecordMigratedSlotVisitor* record_visitor, RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes, MigrationObserver* migration_observer, const intptr_t live_bytes) {
const int& abandoned_pages) {
// Used for trace summary. // Used for trace summary.
double compaction_speed = 0; double compaction_speed = 0;
if (FLAG_trace_evacuation) { if (FLAG_trace_evacuation) {
...@@ -3830,11 +3793,11 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( ...@@ -3830,11 +3793,11 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
if (FLAG_trace_evacuation) { if (FLAG_trace_evacuation) {
PrintIsolate(isolate(), PrintIsolate(isolate(),
"%8.0f ms: evacuation-summary: parallel=%s pages=%d " "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
"aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS "wanted_tasks=%d tasks=%d cores=%" PRIuS
" live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
isolate()->time_millis_since_init(), isolate()->time_millis_since_init(),
FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(), FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(),
abandoned_pages, wanted_num_tasks, job->NumberOfTasks(), wanted_num_tasks, job->NumberOfTasks(),
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
live_bytes, compaction_speed); live_bytes, compaction_speed);
} }
...@@ -3852,12 +3815,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3852,12 +3815,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
PageParallelJob<EvacuationJobTraits> job( PageParallelJob<EvacuationJobTraits> job(
heap_, heap_->isolate()->cancelable_task_manager(), heap_, heap_->isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_); &page_parallel_job_semaphore_);
int abandoned_pages = 0;
intptr_t live_bytes = 0; intptr_t live_bytes = 0;
for (Page* page : old_space_evacuation_pages_) { for (Page* page : old_space_evacuation_pages_) {
live_bytes += MarkingState::Internal(page).live_bytes(); live_bytes += MarkingState::Internal(page).live_bytes();
job.AddPage(page, {&abandoned_pages, marking_state(page)}); job.AddPage(page, {marking_state(page)});
} }
for (Page* page : new_space_evacuation_pages_) { for (Page* page : new_space_evacuation_pages_) {
...@@ -3870,20 +3832,20 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3870,20 +3832,20 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
} }
} }
job.AddPage(page, {&abandoned_pages, marking_state(page)}); job.AddPage(page, {marking_state(page)});
} }
DCHECK_GE(job.NumberOfPages(), 1); DCHECK_GE(job.NumberOfPages(), 1);
RecordMigratedSlotVisitor record_visitor(this); RecordMigratedSlotVisitor record_visitor(this);
CreateAndExecuteEvacuationTasks<FullEvacuator>( CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &job, &record_visitor,
this, &job, &record_visitor, nullptr, live_bytes, abandoned_pages); nullptr, live_bytes);
PostProcessEvacuationCandidates();
} }
void MinorMarkCompactCollector::EvacuatePagesInParallel() { void MinorMarkCompactCollector::EvacuatePagesInParallel() {
PageParallelJob<EvacuationJobTraits> job( PageParallelJob<EvacuationJobTraits> job(
heap_, heap_->isolate()->cancelable_task_manager(), heap_, heap_->isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_); &page_parallel_job_semaphore_);
int abandoned_pages = 0;
intptr_t live_bytes = 0; intptr_t live_bytes = 0;
for (Page* page : new_space_evacuation_pages_) { for (Page* page : new_space_evacuation_pages_) {
...@@ -3896,7 +3858,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3896,7 +3858,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
} }
} }
job.AddPage(page, {&abandoned_pages, marking_state(page)}); job.AddPage(page, {marking_state(page)});
} }
DCHECK_GE(job.NumberOfPages(), 1); DCHECK_GE(job.NumberOfPages(), 1);
...@@ -3905,7 +3867,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3905,7 +3867,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
YoungGenerationRecordMigratedSlotVisitor record_visitor( YoungGenerationRecordMigratedSlotVisitor record_visitor(
heap()->mark_compact_collector()); heap()->mark_compact_collector());
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>( CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); this, &job, &record_visitor, &observer, live_bytes);
} }
class EvacuationWeakObjectRetainer : public WeakObjectRetainer { class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
...@@ -4221,14 +4183,10 @@ class PointerUpdateJobTraits { ...@@ -4221,14 +4183,10 @@ class PointerUpdateJobTraits {
typedef int PerPageData; // Per page data is not used in this job. typedef int PerPageData; // Per page data is not used in this job.
typedef const MarkCompactCollectorBase* PerTaskData; typedef const MarkCompactCollectorBase* PerTaskData;
static bool ProcessPageInParallel(Heap* heap, PerTaskData task_data, static void ProcessPageInParallel(Heap* heap, PerTaskData task_data,
MemoryChunk* chunk, PerPageData) { MemoryChunk* chunk, PerPageData) {
UpdateUntypedPointers(heap, chunk, task_data); UpdateUntypedPointers(heap, chunk, task_data);
UpdateTypedPointers(heap, chunk, task_data); UpdateTypedPointers(heap, chunk, task_data);
return true;
}
static const bool NeedSequentialFinalization = false;
static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
} }
private: private:
...@@ -4346,7 +4304,7 @@ class ToSpacePointerUpdateJobTraits { ...@@ -4346,7 +4304,7 @@ class ToSpacePointerUpdateJobTraits {
typedef PageData PerPageData; typedef PageData PerPageData;
typedef PointersUpdatingVisitor* PerTaskData; typedef PointersUpdatingVisitor* PerTaskData;
static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, static void ProcessPageInParallel(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk, PerPageData page_data) { MemoryChunk* chunk, PerPageData page_data) {
if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// New->new promoted pages contain garbage so they require iteration // New->new promoted pages contain garbage so they require iteration
...@@ -4355,11 +4313,6 @@ class ToSpacePointerUpdateJobTraits { ...@@ -4355,11 +4313,6 @@ class ToSpacePointerUpdateJobTraits {
} else { } else {
ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data); ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data);
} }
return true;
}
static const bool NeedSequentialFinalization = false;
static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
} }
private: private:
...@@ -4486,6 +4439,24 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { ...@@ -4486,6 +4439,24 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
} }
} }
void MarkCompactCollector::PostProcessEvacuationCandidates() {
int aborted_pages = 0;
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
p->ClearEvacuationCandidate();
aborted_pages++;
} else {
DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone());
p->Unlink();
}
}
if (FLAG_trace_evacuation && (aborted_pages > 0)) {
PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
isolate()->time_millis_since_init(), aborted_pages);
}
}
void MarkCompactCollector::ReleaseEvacuationCandidates() { void MarkCompactCollector::ReleaseEvacuationCandidates() {
for (Page* p : old_space_evacuation_pages_) { for (Page* p : old_space_evacuation_pages_) {
if (!p->IsEvacuationCandidate()) continue; if (!p->IsEvacuationCandidate()) continue;
......
...@@ -322,8 +322,7 @@ class MarkCompactCollectorBase { ...@@ -322,8 +322,7 @@ class MarkCompactCollectorBase {
void CreateAndExecuteEvacuationTasks( void CreateAndExecuteEvacuationTasks(
Collector* collector, PageParallelJob<EvacuationJobTraits>* job, Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
RecordMigratedSlotVisitor* record_visitor, RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes, MigrationObserver* migration_observer, const intptr_t live_bytes);
const int& abandoned_pages);
// Returns whether this page should be moved according to heuristics. // Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes); bool ShouldMovePage(Page* p, intptr_t live_bytes);
...@@ -695,6 +694,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -695,6 +694,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void UpdatePointersAfterEvacuation() override; void UpdatePointersAfterEvacuation() override;
void ReleaseEvacuationCandidates(); void ReleaseEvacuationCandidates();
void PostProcessEvacuationCandidates();
base::Semaphore page_parallel_job_semaphore_; base::Semaphore page_parallel_job_semaphore_;
......
...@@ -20,16 +20,10 @@ class Isolate; ...@@ -20,16 +20,10 @@ class Isolate;
// The JobTraits class needs to define: // The JobTraits class needs to define:
// - PerPageData type - state associated with each page. // - PerPageData type - state associated with each page.
// - PerTaskData type - state associated with each task. // - PerTaskData type - state associated with each task.
// - static bool ProcessPageInParallel(Heap* heap, // - static void ProcessPageInParallel(Heap* heap,
// PerTaskData task_data, // PerTaskData task_data,
// MemoryChunk* page, // MemoryChunk* page,
// PerPageData page_data) // PerPageData page_data)
// The function should return true iff processing succeeded.
// - static const bool NeedSequentialFinalization
// - static void FinalizePageSequentially(Heap* heap,
// bool processing_succeeded,
// MemoryChunk* page,
// PerPageData page_data)
template <typename JobTraits> template <typename JobTraits>
class PageParallelJob { class PageParallelJob {
public: public:
...@@ -108,21 +102,12 @@ class PageParallelJob { ...@@ -108,21 +102,12 @@ class PageParallelJob {
pending_tasks_->Wait(); pending_tasks_->Wait();
} }
} }
if (JobTraits::NeedSequentialFinalization) {
Item* item = items_;
while (item != nullptr) {
bool success = (item->state.Value() == kFinished);
JobTraits::FinalizePageSequentially(heap_, item->chunk, success,
item->data);
item = item->next;
}
}
} }
private: private:
static const int kMaxNumberOfTasks = 32; static const int kMaxNumberOfTasks = 32;
enum ProcessingState { kAvailable, kProcessing, kFinished, kFailed }; enum ProcessingState { kAvailable, kProcessing, kFinished };
struct Item : public Malloced { struct Item : public Malloced {
Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next) Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
...@@ -158,9 +143,9 @@ class PageParallelJob { ...@@ -158,9 +143,9 @@ class PageParallelJob {
} }
for (int i = 0; i < num_items_; i++) { for (int i = 0; i < num_items_; i++) {
if (current->state.TrySetValue(kAvailable, kProcessing)) { if (current->state.TrySetValue(kAvailable, kProcessing)) {
bool success = JobTraits::ProcessPageInParallel( JobTraits::ProcessPageInParallel(heap_, data_, current->chunk,
heap_, data_, current->chunk, current->data); current->data);
current->state.SetValue(success ? kFinished : kFailed); current->state.SetValue(kFinished);
} }
current = current->next; current = current->next;
// Wrap around if needed. // Wrap around if needed.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment