Commit 4cb6ef83 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Move sweeper to separate file

Bug: 
Change-Id: Ie516167f047e48cda47a5dbfb156ea9ae164046c
Reviewed-on: https://chromium-review.googlesource.com/789878Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49656}
parent 888acb2f
......@@ -1672,6 +1672,8 @@ v8_source_set("v8_base") {
"src/heap/spaces.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/heap/sweeper.cc",
"src/heap/sweeper.h",
"src/heap/worklist.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
......
......@@ -43,6 +43,7 @@
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
#include "src/heap/sweeper.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/object-macros.h"
#include "src/objects/shared-function-info.h"
......@@ -1818,7 +1819,7 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanExpandOldGeneration(new_space()->Size()));
}
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
LOG(isolate_, ResourceEvent("scavenge", "begin"));
......@@ -1934,14 +1935,14 @@ void Heap::Scavenge() {
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
if (mark_compact_collector()->sweeper().sweeping_in_progress() &&
if (mark_compact_collector()->sweeper()->sweeping_in_progress() &&
memory_allocator_->unmapper()->NumberOfDelayedChunks() >
static_cast<int>(new_space_->MaximumCapacity() / Page::kPageSize)) {
mark_compact_collector()->EnsureSweepingCompleted();
}
// TODO(mlippautz): Untangle the dependency of the unmapper from the sweeper.
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
......@@ -1969,17 +1970,15 @@ void Heap::Scavenge() {
}
{
MarkCompactCollector::Sweeper* sweeper =
&mark_compact_collector()->sweeper();
Sweeper* sweeper = mark_compact_collector()->sweeper();
// Pause the concurrent sweeper.
MarkCompactCollector::Sweeper::PauseOrCompleteScope pause_scope(sweeper);
Sweeper::PauseOrCompleteScope pause_scope(sweeper);
// Filter out pages from the sweeper that need to be processed for old to
// new slots by the Scavenger. After processing, the Scavenger adds back
// pages that are still unsweeped. This way the Scavenger has exclusive
// access to the slots of a page and can completely avoid any locks on
// the page itself.
MarkCompactCollector::Sweeper::FilterSweepingPagesScope filter_scope(
sweeper, pause_scope);
Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
filter_scope.FilterOldSpaceSweepingPages(
[](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
......@@ -6613,7 +6612,7 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
DCHECK_EQ(page->owner(), code_space());
mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(page);
mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(page);
Address addr = page->skip_list()->StartFor(inner_pointer);
Address top = code_space()->top();
......
......@@ -2583,6 +2583,7 @@ class Heap {
friend class PagedSpace;
friend class Scavenger;
friend class StoreBuffer;
friend class Sweeper;
friend class heap::TestMemoryAllocatorScope;
// The allocator interface.
......
......@@ -15,6 +15,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/sweeper.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
#include "src/visitors.h"
......@@ -867,7 +868,7 @@ void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(!FLAG_concurrent_sweeping ||
!heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
!heap_->mark_compact_collector()->sweeper()->AreSweeperTasksRunning())) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
......
This diff is collapsed.
......@@ -11,6 +11,7 @@
#include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
namespace v8 {
......@@ -243,7 +244,6 @@ class LiveObjectVisitor : AllStatic {
};
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum MarkingTreatmentMode { KEEP, CLEAR };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
......@@ -617,149 +617,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
class RootMarkingVisitor;
class CustomRootBodyMarkingVisitor;
class Sweeper {
public:
typedef std::deque<Page*> SweepingList;
typedef std::vector<Page*> SweptList;
// Pauses the sweeper tasks or completes sweeping.
class PauseOrCompleteScope final {
public:
explicit PauseOrCompleteScope(Sweeper* sweeper);
~PauseOrCompleteScope();
private:
Sweeper* const sweeper_;
};
// Temporary filters old space sweeping lists. Requires the concurrent
// sweeper to be paused. Allows for pages to be added to the sweeper while
// in this scope. Note that the original list of sweeping pages is restored
// after exiting this scope.
class FilterSweepingPagesScope final {
public:
explicit FilterSweepingPagesScope(
Sweeper* sweeper,
const PauseOrCompleteScope& pause_or_complete_scope);
~FilterSweepingPagesScope();
template <typename Callback>
void FilterOldSpaceSweepingPages(Callback callback) {
if (!sweeping_in_progress_) return;
SweepingList* sweeper_list = &sweeper_->sweeping_list_[OLD_SPACE];
// Iteration here is from most free space to least free space.
for (auto it = old_space_sweeping_list_.begin();
it != old_space_sweeping_list_.end(); it++) {
if (callback(*it)) {
sweeper_list->push_back(*it);
}
}
}
private:
Sweeper* const sweeper_;
SweepingList old_space_sweeping_list_;
const PauseOrCompleteScope& pause_or_complete_scope_;
bool sweeping_in_progress_;
};
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
CLEAR_REGULAR_SLOTS,
CLEAR_TYPED_SLOTS
};
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
Sweeper(Heap* heap,
MarkCompactCollector::NonAtomicMarkingState* marking_state)
: heap_(heap),
marking_state_(marking_state),
num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
num_sweeping_tasks_(0),
stop_sweeper_tasks_(false) {}
bool sweeping_in_progress() const { return sweeping_in_progress_; }
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
void ScheduleIncrementalSweepingTask();
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
void StartSweeping();
void StartSweeperTasks();
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool AreSweeperTasksRunning();
void SweepOrWaitUntilSweepingCompleted(Page* page);
void AddSweptPageSafe(PagedSpace* space, Page* page);
Page* GetSweptPageSafe(PagedSpace* space);
private:
class IncrementalSweeperTask;
class SweeperTask;
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
static const int kMaxSweeperTasks = kAllocationSpaces;
template <typename Callback>
void ForAllSweepingSpaces(Callback callback) {
for (int i = 0; i < kAllocationSpaces; i++) {
callback(static_cast<AllocationSpace>(i));
}
}
// Can only be called on the main thread when no tasks are running.
bool IsDoneSweeping() const {
for (int i = 0; i < kAllocationSpaces; i++) {
if (!sweeping_list_[i].empty()) return false;
}
return true;
}
void SweepSpaceFromTask(AllocationSpace identity);
// Sweeps incrementally one page from the given space. Returns true if
// there are no more pages to sweep in the given space.
bool SweepSpaceIncrementallyFromTask(AllocationSpace identity);
void AbortAndWaitForTasks();
Page* GetSweepingPageSafe(AllocationSpace space);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
Heap* const heap_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
int num_tasks_;
CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
base::Semaphore pending_sweeper_tasks_semaphore_;
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
bool incremental_sweeper_pending_;
bool sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
base::AtomicValue<bool> stop_sweeper_tasks_;
};
enum IterationMode {
kKeepMarking,
kClearMarkbits,
......@@ -813,7 +670,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EnsureSweepingCompleted();
// Checks if sweeping is in progress right now on any space.
bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
......@@ -831,7 +688,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
Sweeper& sweeper() { return sweeper_; }
Sweeper* sweeper() { return sweeper_; }
#ifdef DEBUG
// Checks whether performing mark-compact collection.
......@@ -850,6 +707,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
private:
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector();
bool WillBeDeoptimized(Code* code);
......@@ -1015,7 +873,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
std::vector<Page*> new_space_evacuation_pages_;
std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
Sweeper sweeper_;
Sweeper* sweeper_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
......
......@@ -9,6 +9,7 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/sweeper.h"
#include "src/objects-body-descriptors-inl.h"
namespace v8 {
......@@ -89,9 +90,9 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
AllocationSpace space = page->owner()->identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
heap()->mark_compact_collector()->sweeper().AddPage(
heap()->mark_compact_collector()->sweeper()->AddPage(
space, reinterpret_cast<Page*>(page),
MarkCompactCollector::Sweeper::READD_TEMPORARY_REMOVED_PAGE);
Sweeper::READD_TEMPORARY_REMOVED_PAGE);
}
}
......
......@@ -15,6 +15,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
#include "src/heap/sweeper.h"
#include "src/msan.h"
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
......@@ -56,7 +57,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
Page* cur_page = *(current_page_++);
Heap* heap = space_->heap();
heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
heap->mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(
cur_page);
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
......@@ -412,7 +413,7 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
// Chunks in old generation are unmapped if they are empty.
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
return !chunk->InNewSpace() || mc == nullptr ||
!mc->sweeper().sweeping_in_progress();
!mc->sweeper()->sweeping_in_progress();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
......@@ -1437,7 +1438,7 @@ void PagedSpace::RefillFreeList() {
size_t added = 0;
{
Page* p = nullptr;
while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
......@@ -3201,7 +3202,7 @@ bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
collector->sweeper().ParallelSweepSpace(identity(), 0);
collector->sweeper()->ParallelSweepSpace(identity(), 0);
RefillFreeList();
return free_list_.Allocate(size_in_bytes);
}
......@@ -3228,7 +3229,7 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
if (FLAG_concurrent_sweeping && !is_local() &&
!collector->sweeper().AreSweeperTasksRunning()) {
!collector->sweeper()->AreSweeperTasksRunning()) {
collector->EnsureSweepingCompleted();
}
......@@ -3240,7 +3241,7 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper().ParallelSweepSpace(
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
......
This diff is collapsed.
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_SWEEPER_H_
#define V8_HEAP_SWEEPER_H_
#include <deque>
#include <vector>
#include "src/base/platform/semaphore.h"
#include "src/cancelable-task.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
class MajorNonAtomicMarkingState;
class Page;
class PagedSpace;
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
class Sweeper {
public:
typedef std::deque<Page*> SweepingList;
typedef std::vector<Page*> SweptList;
// Pauses the sweeper tasks or completes sweeping.
class PauseOrCompleteScope final {
public:
explicit PauseOrCompleteScope(Sweeper* sweeper);
~PauseOrCompleteScope();
private:
Sweeper* const sweeper_;
};
// Temporary filters old space sweeping lists. Requires the concurrent
// sweeper to be paused. Allows for pages to be added to the sweeper while
// in this scope. Note that the original list of sweeping pages is restored
// after exiting this scope.
class FilterSweepingPagesScope final {
public:
explicit FilterSweepingPagesScope(
Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope);
~FilterSweepingPagesScope();
template <typename Callback>
void FilterOldSpaceSweepingPages(Callback callback) {
if (!sweeping_in_progress_) return;
SweepingList* sweeper_list = &sweeper_->sweeping_list_[OLD_SPACE];
// Iteration here is from most free space to least free space.
for (auto it = old_space_sweeping_list_.begin();
it != old_space_sweeping_list_.end(); it++) {
if (callback(*it)) {
sweeper_list->push_back(*it);
}
}
}
private:
Sweeper* const sweeper_;
SweepingList old_space_sweeping_list_;
const PauseOrCompleteScope& pause_or_complete_scope_;
bool sweeping_in_progress_;
};
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
CLEAR_REGULAR_SLOTS,
CLEAR_TYPED_SLOTS
};
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
: heap_(heap),
marking_state_(marking_state),
num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
num_sweeping_tasks_(0),
stop_sweeper_tasks_(false) {}
bool sweeping_in_progress() const { return sweeping_in_progress_; }
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
void ScheduleIncrementalSweepingTask();
int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
void StartSweeping();
void StartSweeperTasks();
void EnsureCompleted();
void EnsureNewSpaceCompleted();
bool AreSweeperTasksRunning();
void SweepOrWaitUntilSweepingCompleted(Page* page);
void AddSweptPageSafe(PagedSpace* space, Page* page);
Page* GetSweptPageSafe(PagedSpace* space);
private:
class IncrementalSweeperTask;
class SweeperTask;
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
static const int kMaxSweeperTasks = kAllocationSpaces;
template <typename Callback>
void ForAllSweepingSpaces(Callback callback) {
for (int i = 0; i < kAllocationSpaces; i++) {
callback(static_cast<AllocationSpace>(i));
}
}
// Can only be called on the main thread when no tasks are running.
bool IsDoneSweeping() const {
for (int i = 0; i < kAllocationSpaces; i++) {
if (!sweeping_list_[i].empty()) return false;
}
return true;
}
void SweepSpaceFromTask(AllocationSpace identity);
// Sweeps incrementally one page from the given space. Returns true if
// there are no more pages to sweep in the given space.
bool SweepSpaceIncrementallyFromTask(AllocationSpace identity);
void AbortAndWaitForTasks();
Page* GetSweepingPageSafe(AllocationSpace space);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
Heap* const heap_;
MajorNonAtomicMarkingState* marking_state_;
int num_tasks_;
CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
base::Semaphore pending_sweeper_tasks_semaphore_;
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
bool incremental_sweeper_pending_;
bool sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
base::AtomicValue<bool> stop_sweeper_tasks_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_SWEEPER_H_
......@@ -1043,6 +1043,8 @@
'heap/spaces.h',
'heap/store-buffer.cc',
'heap/store-buffer.h',
'heap/sweeper.cc',
'heap/sweeper.h',
'heap/worklist.h',
'intl.cc',
'intl.h',
......
......@@ -192,7 +192,7 @@ UNINITIALIZED_HEAP_TEST(Regress658718) {
heap->new_space()->Shrink();
heap->memory_allocator()->unmapper()->WaitUntilCompleted();
heap->delay_sweeper_tasks_for_testing_ = false;
heap->mark_compact_collector()->sweeper().StartSweeperTasks();
heap->mark_compact_collector()->sweeper()->StartSweeperTasks();
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
isolate->Dispose();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment