Commit 9317f8d2 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Scavenger: Filter pages that are scavenged from sweeper

Avoid locking the pages for the scavenger by filtering out pages that require
sweeping and scavenging from the sweeper. This way we preserve the assumption 
that processing slots on a page is sequential. 

Pages that have been scavenged are re-added to the sweeper to allow them to
be reused for allocation.

CQ_INCLUDE_TRYBOTS=master.tryserver.v8:v8_linux64_tsan_rel;master.tryserver.v8:v8_linux64_tsan_concurrent_marking_rel_ng;master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel;master.tryserver.chromium.android:android_optional_gpu_tests_rel

Bug: v8:6923
Change-Id: I7d159de11792ef16a534f29e31791d06d7fb5221
Reviewed-on: https://chromium-review.googlesource.com/730603
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48859}
parent a9b09801
......@@ -1914,16 +1914,26 @@ void Heap::Scavenge() {
job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
}
{
CodeSpaceMemoryModificationScope code_modification(this);
MarkCompactCollector::Sweeper* sweeper =
&mark_compact_collector()->sweeper();
// Pause the concurrent sweeper.
MarkCompactCollector::Sweeper::PauseOrCompleteScope pause_scope(sweeper);
// Filter out pages from the sweeper that need to be processed for old to
// new slots by the Scavenger. After processing, the Scavenger adds back
// pages that are still unsweeped. This way the Scavenger has exclusive
// access to the slots of a page and can completely avoid any locks on
// the page itself.
MarkCompactCollector::Sweeper::FilterSweepingPagesScope filter_scope(
sweeper, pause_scope);
filter_scope.FilterOldSpaceSweepingPages(
[](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
this, [&job](MemoryChunk* chunk) {
job.AddItem(new PageScavengingItem(chunk));
});
{
MarkCompactCollector::Sweeper::PauseOrCompleteScope sweeper_scope(
&mark_compact_collector()->sweeper());
RootScavengeVisitor root_scavenge_visitor(this, scavengers[kMainThreadId]);
{
......@@ -1961,12 +1971,12 @@ void Heap::Scavenge() {
&root_scavenge_visitor);
scavengers[kMainThreadId]->Process();
}
}
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i]->Finalize();
delete scavengers[i];
}
}
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
......
......@@ -73,14 +73,12 @@ class LocalAllocator {
}
}
void AnnounceLockedPage(MemoryChunk* chunk) {
void PreferredSweepingPage(MemoryChunk* chunk) {
const AllocationSpace space = chunk->owner()->identity();
// There are no allocations on large object and map space and hence we
// cannot announce that we locked a page there.
if (space == LO_SPACE || space == MAP_SPACE) return;
// Only announce preferred pages for OLD_SPACE.
if (space != OLD_SPACE) return;
DCHECK(space != NEW_SPACE);
compaction_spaces_.Get(space)->AnnounceLockedPage(
compaction_spaces_.Get(space)->PreferredSweepingPage(
reinterpret_cast<Page*>(chunk));
}
......
......@@ -630,6 +630,29 @@ MarkCompactCollector::Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
sweeper_->StartSweeperTasks();
}
MarkCompactCollector::Sweeper::FilterSweepingPagesScope::
FilterSweepingPagesScope(
MarkCompactCollector::Sweeper* sweeper,
const PauseOrCompleteScope& pause_or_complete_scope)
: sweeper_(sweeper),
pause_or_complete_scope_(pause_or_complete_scope),
sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
USE(pause_or_complete_scope_);
if (!sweeping_in_progress_) return;
old_space_sweeping_list_ = std::move(sweeper_->sweeping_list_[OLD_SPACE]);
sweeper_->sweeping_list_[OLD_SPACE].clear();
}
MarkCompactCollector::Sweeper::FilterSweepingPagesScope::
~FilterSweepingPagesScope() {
DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
if (!sweeping_in_progress_) return;
sweeper_->sweeping_list_[OLD_SPACE] = std::move(old_space_sweeping_list_);
// old_space_sweeping_list_ does not need to be cleared as we don't use it.
}
class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
public:
SweeperTask(Isolate* isolate, Sweeper* sweeper,
......@@ -3767,12 +3790,12 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
sweeper().AddPage(p->owner()->identity(), p);
sweeper().AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
sweeper().AddPage(p->owner()->identity(), p);
sweeper().AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
}
}
new_space_evacuation_pages_.clear();
......@@ -3784,7 +3807,7 @@ void MarkCompactCollector::Evacuate() {
SkipList* list = p->skip_list();
if (list != nullptr) list->Clear();
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
sweeper().AddPage(p->owner()->identity(), p);
sweeper().AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
......@@ -4455,9 +4478,20 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
return max_freed;
}
void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
void MarkCompactCollector::Sweeper::AddPage(
AllocationSpace space, Page* page,
MarkCompactCollector::Sweeper::AddPageMode mode) {
base::LockGuard<base::Mutex> guard(&mutex_);
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
if (mode == Sweeper::REGULAR) {
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
PrepareToBeSweptPage(space, page);
} else {
// Page has been temporarily removed from the sweeper. Accounting already
// happened when the page was initially added, so it is skipped here.
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
}
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
sweeping_list_[space].push_back(page);
}
......@@ -4528,7 +4562,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
unused_page_present = true;
}
sweeper().AddPage(space->identity(), p);
sweeper().AddPage(space->identity(), p, Sweeper::REGULAR);
will_be_swept++;
}
......
......@@ -593,8 +593,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
class Sweeper {
public:
typedef std::deque<Page*> SweepingList;
typedef std::vector<Page*> SweptList;
// Pauses the sweeper tasks or completes sweeping.
class PauseOrCompleteScope {
class PauseOrCompleteScope final {
public:
explicit PauseOrCompleteScope(Sweeper* sweeper);
~PauseOrCompleteScope();
......@@ -603,20 +606,50 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
Sweeper* const sweeper_;
};
// Temporary filters old space sweeping lists. Requires the concurrent
// sweeper to be paused. Allows for pages to be added to the sweeper while
// in this scope. Note that the original list of sweeping pages is restored
// after exiting this scope.
class FilterSweepingPagesScope final {
public:
explicit FilterSweepingPagesScope(
Sweeper* sweeper,
const PauseOrCompleteScope& pause_or_complete_scope);
~FilterSweepingPagesScope();
template <typename Callback>
void FilterOldSpaceSweepingPages(Callback callback) {
if (!sweeping_in_progress_) return;
SweepingList* sweeper_list = &sweeper_->sweeping_list_[OLD_SPACE];
// Iteration here is from most free space to least free space.
for (auto it = old_space_sweeping_list_.begin();
it != old_space_sweeping_list_.end(); it++) {
if (callback(*it)) {
sweeper_list->push_back(*it);
}
}
}
private:
Sweeper* const sweeper_;
SweepingList old_space_sweeping_list_;
const PauseOrCompleteScope& pause_or_complete_scope_;
bool sweeping_in_progress_;
};
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
CLEAR_REGULAR_SLOTS,
CLEAR_TYPED_SLOTS
};
typedef std::deque<Page*> SweepingList;
typedef std::vector<Page*> SweptList;
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
explicit Sweeper(Heap* heap,
Sweeper(Heap* heap,
MarkCompactCollector::NonAtomicMarkingState* marking_state)
: heap_(heap),
marking_state_(marking_state),
......@@ -628,7 +661,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool sweeping_in_progress() const { return sweeping_in_progress_; }
void AddPage(AllocationSpace space, Page* page);
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
......
......@@ -86,9 +86,17 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
target->IterateBody(target->map()->instance_type(), size, &visitor);
}
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
AllocationSpace space = page->owner()->identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
heap()->mark_compact_collector()->sweeper().AddPage(
space, reinterpret_cast<Page*>(page),
MarkCompactCollector::Sweeper::READD_TEMPORARY_REMOVED_PAGE);
}
}
void Scavenger::ScavengePage(MemoryChunk* page) {
base::LockGuard<base::RecursiveMutex> guard(page->mutex());
AnnounceLockedPage(page);
PreferredSweepingPage(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
......@@ -102,6 +110,8 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
reinterpret_cast<Address>(addr));
});
});
AddPageToSweeperIfNecessary(page);
}
void Scavenger::Process(OneshotBarrier* barrier) {
......
......@@ -42,8 +42,8 @@ class Scavenger {
size_t bytes_copied() const { return copied_size_; }
size_t bytes_promoted() const { return promoted_size_; }
void AnnounceLockedPage(MemoryChunk* chunk) {
allocator_.AnnounceLockedPage(chunk);
void PreferredSweepingPage(MemoryChunk* chunk) {
allocator_.PreferredSweepingPage(chunk);
}
private:
......@@ -56,6 +56,8 @@ class Scavenger {
inline void PageMemoryFence(Object* object);
void AddPageToSweeperIfNecessary(MemoryChunk* page);
// Potentially scavenges an object referenced from |slot_address| if it is
// indeed a HeapObject and resides in from space.
inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
......
......@@ -1427,7 +1427,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
: Space(heap, space, executable),
anchor_(this),
free_list_(this),
locked_page_(nullptr),
preferred_sweeping_page_(nullptr),
top_on_previous_step_(0) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
......@@ -3266,10 +3266,11 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Retry the free list allocation.
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
if (locked_page_ != nullptr) {
DCHECK_EQ(locked_page_->owner()->identity(), identity());
collector->sweeper().ParallelSweepPage(locked_page_, identity());
locked_page_ = nullptr;
if (preferred_sweeping_page_ != nullptr) {
DCHECK_EQ(preferred_sweeping_page_->owner()->identity(), identity());
collector->sweeper().ParallelSweepPage(preferred_sweeping_page_,
identity());
preferred_sweeping_page_ = nullptr;
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
......
......@@ -458,6 +458,12 @@ class MemoryChunk {
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
template <RememberedSetType type>
bool ContainsSlots() {
return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
invalidated_slots() != nullptr;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* slot_set() {
if (access_mode == AccessMode::ATOMIC)
......@@ -2197,10 +2203,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
// Sets the page that is currently locked by the task using the space. This
// page will be preferred for sweeping to avoid a potential deadlock where
// multiple tasks hold locks on pages while trying to sweep each others pages.
void AnnounceLockedPage(Page* page) { locked_page_ = page; }
// This page will be preferred for sweeping.
void PreferredSweepingPage(Page* page) { preferred_sweeping_page_ = page; }
Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
void SetAllocationInfo(Address top, Address limit);
......@@ -2278,7 +2282,7 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
Page* locked_page_;
Page* preferred_sweeping_page_;
Address top_on_previous_step_;
friend class IncrementalMarking;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment