Commit 026a1000 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Update evacuation and implement sweeping in MinorMC

Bug: v8:12612
Change-Id: I28a574435646073d65e6fe1e746267ffb0eaa01d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3864083
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82932}
parent 2f41221f
......@@ -982,18 +982,20 @@ enum AllocationSpace {
OLD_SPACE, // Old generation regular object space.
CODE_SPACE, // Old generation code object space, marked executable.
MAP_SPACE, // Old generation map object space, non-movable.
NEW_SPACE, // Young generation space for regular objects collected
// with Scavenger/MinorMC.
LO_SPACE, // Old generation large object space.
CODE_LO_SPACE, // Old generation large code object space.
NEW_LO_SPACE, // Young generation large object space.
NEW_SPACE, // Young generation semispaces for regular objects collected with
// Scavenger.
FIRST_SPACE = RO_SPACE,
LAST_SPACE = NEW_SPACE,
LAST_SPACE = NEW_LO_SPACE,
FIRST_MUTABLE_SPACE = OLD_SPACE,
LAST_MUTABLE_SPACE = NEW_SPACE,
LAST_MUTABLE_SPACE = NEW_LO_SPACE,
FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
LAST_GROWABLE_PAGED_SPACE = MAP_SPACE
LAST_GROWABLE_PAGED_SPACE = MAP_SPACE,
FIRST_SWEEPABLE_SPACE = OLD_SPACE,
LAST_SWEEPABLE_SPACE = NEW_SPACE
};
constexpr int kSpaceTagSize = 4;
static_assert(FIRST_SPACE == 0);
......
......@@ -827,7 +827,6 @@ void GCTracer::PrintNVP() const {
"evacuate=%.2f "
"evacuate.copy=%.2f "
"evacuate.update_pointers=%.2f "
"evacuate.update_pointers.to_new_roots=%.2f "
"evacuate.update_pointers.slots=%.2f "
"background.mark=%.2f "
"background.evacuate.copy=%.2f "
......@@ -850,7 +849,6 @@ void GCTracer::PrintNVP() const {
current_scope(Scope::MINOR_MC_EVACUATE),
current_scope(Scope::MINOR_MC_EVACUATE_COPY),
current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS),
current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS),
current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS),
current_scope(Scope::MINOR_MC_BACKGROUND_MARKING),
current_scope(Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY),
......
......@@ -658,6 +658,10 @@ uintptr_t Heap::code_page_collection_memory_modification_scope_depth() {
return local_heap->code_page_collection_memory_modification_scope_depth_;
}
PagedNewSpace* Heap::paged_new_space() const {
return PagedNewSpace::From(new_space());
}
CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
if (heap_->write_protect_code_memory()) {
heap_->decrement_code_space_memory_modification_scope_depth();
......
......@@ -51,9 +51,7 @@ void HeapLayoutTracer::PrintBasicMemoryChunk(std::ostream& os,
// static
void HeapLayoutTracer::PrintHeapLayout(std::ostream& os, Heap* heap) {
if (FLAG_minor_mc) {
const PagedNewSpace* paged_new_space =
PagedNewSpace::From(heap->new_space());
for (const Page* page : *paged_new_space) {
for (const Page* page : *heap->paged_new_space()) {
PrintBasicMemoryChunk(os, *page, "new_space");
}
} else {
......
......@@ -4452,7 +4452,7 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
// We need to refine the counters on pages that are already swept and have
// not been moved over to the actual space. Otherwise, the AccountingStats
// are just an over approximation.
space->RefillFreeList();
space->RefillFreeList(mark_compact_collector()->sweeper());
space->VerifyCountersBeforeConcurrentSweeping();
}
}
......@@ -4464,9 +4464,7 @@ void Heap::VerifyCommittedPhysicalMemory() {
space->VerifyCommittedPhysicalMemory();
}
if (FLAG_minor_mc && new_space()) {
PagedNewSpace::From(new_space())
->paged_space()
->VerifyCommittedPhysicalMemory();
paged_new_space()->paged_space()->VerifyCommittedPhysicalMemory();
}
}
#endif // DEBUG
......
......@@ -119,6 +119,7 @@ class ObjectIterator;
class ObjectStats;
class Page;
class PagedSpace;
class PagedNewSpace;
class ReadOnlyHeap;
class RootVisitor;
class RwxMemoryWriteScope;
......@@ -873,6 +874,7 @@ class Heap {
inline Address NewSpaceTop();
NewSpace* new_space() const { return new_space_; }
inline PagedNewSpace* paged_new_space() const;
OldSpace* old_space() const { return old_space_; }
OldSpace* shared_old_space() const { return shared_old_space_; }
CodeSpace* code_space() const { return code_space_; }
......
This diff is collapsed.
......@@ -29,6 +29,7 @@ class ItemParallelJob;
class LargeObjectSpace;
class LargePage;
class MigrationObserver;
class PagedNewSpace;
class ReadOnlySpace;
class RecordMigratedSlotVisitor;
class UpdatingItem;
......@@ -683,6 +684,7 @@ class MarkCompactCollector final : public CollectorBase {
// up other pages for sweeping. Does not start sweeper tasks.
void Sweep();
void StartSweepSpace(PagedSpace* space);
void StartSweepNewSpace();
void SweepLargeSpace(LargeObjectSpace* space);
void EvacuatePrologue();
......@@ -811,10 +813,8 @@ class MinorMarkCompactCollector final : public CollectorBase {
void Finish() final;
bool sweeping_in_progress() const final {
// TODO(v8:13012): Fix this once sweeping is implemented.
return false;
}
Sweeper* sweeper() { return sweeper_.get(); }
bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
void VisitObject(HeapObject obj) final;
......@@ -831,19 +831,15 @@ class MinorMarkCompactCollector final : public CollectorBase {
void TraceFragmentation();
void ClearNonLiveReferences();
void Sweep();
void StartSweepNewSpace();
void EvacuatePrologue();
void EvacuateEpilogue();
void Evacuate();
void EvacuatePagesInParallel();
void UpdatePointersAfterEvacuation();
std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
Address start,
Address end);
int CollectToSpaceUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items);
void SweepArrayBufferExtensions();
std::unique_ptr<YoungGenerationMainMarkingVisitor> main_marking_visitor_;
......@@ -853,6 +849,8 @@ class MinorMarkCompactCollector final : public CollectorBase {
std::vector<Page*> promoted_pages_;
std::vector<LargePage*> promoted_large_pages_;
std::unique_ptr<Sweeper> sweeper_;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingJob;
friend class YoungGenerationMainMarkingVisitor;
......
......@@ -966,8 +966,6 @@ void PagedSpaceForNewSpace::Shrink() {
target_capacity_ = current_capacity_;
}
void PagedSpaceForNewSpace::EvacuatePrologue() { FreeLinearAllocationArea(); }
void PagedSpaceForNewSpace::UpdateInlineAllocationLimit(size_t size_in_bytes) {
PagedSpaceBase::UpdateInlineAllocationLimit(size_in_bytes);
}
......
......@@ -584,7 +584,7 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
}
// Reset the allocation pointer.
void EvacuatePrologue();
void EvacuatePrologue() {}
void EvacuateEpilogue() { allocated_linear_areas_ = 0; }
// When inline allocation stepping is active, either because of incremental
......
......@@ -23,6 +23,7 @@
#include "src/heap/read-only-heap.h"
#include "src/heap/safepoint.h"
#include "src/heap/spaces.h"
#include "src/heap/sweeper.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/string.h"
#include "src/utils/utils.h"
......@@ -138,17 +139,17 @@ void PagedSpaceBase::TearDown() {
accounting_stats_.Clear();
}
void PagedSpaceBase::RefillFreeList() {
void PagedSpaceBase::RefillFreeList(Sweeper* sweeper) {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
identity() == MAP_SPACE || identity() == NEW_SPACE);
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
while ((p = sweeper->GetSweptPageSafe(this)) != nullptr) {
// We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
// entries here to make them unavailable for allocations.
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
......@@ -162,6 +163,7 @@ void PagedSpaceBase::RefillFreeList() {
// during compaction.
if (is_compaction_space()) {
DCHECK_NE(this, p->owner());
DCHECK_NE(NEW_SPACE, identity());
PagedSpaceBase* owner = reinterpret_cast<PagedSpaceBase*>(p->owner());
base::MutexGuard guard(owner->mutex());
owner->RefineAllocatedBytesAfterSweeping(p);
......@@ -282,7 +284,7 @@ bool PagedSpaceBase::ContainsSlow(Address addr) const {
void PagedSpaceBase::RefineAllocatedBytesAfterSweeping(Page* page) {
CHECK(page->SweepingDone());
auto marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
heap()->mark_compact_collector()->non_atomic_marking_state();
// The live_byte on the page was accounted in the space allocated
// bytes counter. After sweeping allocated_bytes() contains the
// accurate live byte count on the page.
......@@ -329,7 +331,13 @@ void PagedSpaceBase::RemovePage(Page* page) {
if (identity() == NEW_SPACE) {
page->ReleaseFreeListCategories();
}
DecreaseAllocatedBytes(page->allocated_bytes(), page);
// Pages are only removed from new space when they are promoted to old space
// during a GC. This happens after sweeping as started and the allocation
// counters have been reset.
DCHECK_IMPLIES(identity() == NEW_SPACE, Size() == 0);
if (identity() != NEW_SPACE) {
DecreaseAllocatedBytes(page->allocated_bytes(), page);
}
DecreaseCapacity(page->area_size());
AccountUncommitted(page->size());
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
......@@ -662,7 +670,7 @@ PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
RefillFreeList();
RefillFreeList(collector->sweeper());
// Retry the free list allocation.
result = TryAllocationFromFreeListBackground(min_size_in_bytes,
......@@ -677,7 +685,8 @@ PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
identity(), Sweeper::SweepingMode::kLazyOrConcurrent,
static_cast<int>(min_size_in_bytes), kMaxPagesToSweep);
RefillFreeList();
// Keep new space sweeping atomic.
RefillFreeList(collector->sweeper());
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
result = TryAllocationFromFreeListBackground(min_size_in_bytes,
......@@ -699,7 +708,7 @@ PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
collector->DrainSweepingWorklistForSpace(identity());
}
RefillFreeList();
RefillFreeList(collector->sweeper());
// Last try to acquire memory from free list.
return TryAllocationFromFreeListBackground(min_size_in_bytes,
......@@ -985,7 +994,7 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
RefillFreeList();
RefillFreeList(collector->sweeper());
// Retry the free list allocation.
if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
......@@ -1049,7 +1058,7 @@ bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
if (collector->sweeping_in_progress()) {
collector->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
required_freed_bytes, max_pages);
RefillFreeList();
RefillFreeList(collector->sweeper());
return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
return false;
......
......@@ -31,6 +31,7 @@ class HeapObject;
class Isolate;
class ObjectVisitor;
class PagedSpaceBase;
class Sweeper;
// -----------------------------------------------------------------------------
// Heap object iterator in paged spaces.
......@@ -211,8 +212,6 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
accounting_stats_.IncreaseCapacity(bytes);
}
void RefineAllocatedBytesAfterSweeping(Page* page);
Page* InitializePage(MemoryChunk* chunk) override;
virtual void ReleasePage(Page* page);
......@@ -281,7 +280,7 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
// Refills the free list from the corresponding free list filled by the
// sweeper.
virtual void RefillFreeList();
void RefillFreeList(Sweeper* sweeper);
base::Mutex* mutex() { return &space_mutex_; }
......@@ -343,6 +342,8 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
return !is_compaction_space();
}
void RefineAllocatedBytesAfterSweeping(Page* page);
protected:
void UpdateInlineAllocationLimit(size_t min_size) override;
......
......@@ -76,10 +76,7 @@ bool Heap::CreateHeapObjects() {
// Create initial maps.
if (!CreateInitialMaps()) return false;
if (FLAG_minor_mc && new_space()) {
PagedNewSpace::From(new_space())
->paged_space()
->free_list()
->RepairLists(this);
paged_new_space()->paged_space()->free_list()->RepairLists(this);
}
CreateApiObjects();
......
......@@ -7,6 +7,7 @@
#include <memory>
#include <vector>
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/base/active-system-pages.h"
......@@ -16,28 +17,41 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/new-spaces.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/remembered-set.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
namespace {
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
} // namespace
class Sweeper::ConcurrentSweeper final {
public:
explicit ConcurrentSweeper(Sweeper* sweeper) : sweeper_(sweeper) {}
explicit ConcurrentSweeper(Sweeper* sweeper)
: sweeper_(sweeper),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity) {}
bool ConcurrentSweepSpace(AllocationSpace identity, JobDelegate* delegate) {
while (!delegate->ShouldYield()) {
Page* page = sweeper_->GetSweepingPageSafe(identity);
if (page == nullptr) return true;
sweeper_->ParallelSweepPage(page, identity,
sweeper_->ParallelSweepPage(page, identity, &local_pretenuring_feedback_,
SweepingMode::kLazyOrConcurrent);
}
return false;
}
Heap::PretenuringFeedbackMap* local_pretenuring_feedback() {
return &local_pretenuring_feedback_;
}
private:
Sweeper* const sweeper_;
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
};
class Sweeper::SweeperJob final : public JobTask {
......@@ -80,8 +94,7 @@ class Sweeper::SweeperJob final : public JobTask {
ConcurrentSweeper& sweeper = (*concurrent_sweepers_)[offset];
for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
const AllocationSpace space_id = static_cast<AllocationSpace>(
FIRST_GROWABLE_PAGED_SPACE +
((i + offset) % kNumberOfSweepingSpaces));
FIRST_SWEEPABLE_SPACE + ((i + offset) % kNumberOfSweepingSpaces));
DCHECK(IsValidSweepingSpace(space_id));
if (!sweeper.ConcurrentSweepSpace(space_id, delegate)) return;
}
......@@ -96,9 +109,13 @@ Sweeper::Sweeper(Heap* heap, NonAtomicMarkingState* marking_state)
: heap_(heap),
marking_state_(marking_state),
sweeping_in_progress_(false),
should_reduce_memory_(false) {}
should_reduce_memory_(false),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity) {}
Sweeper::~Sweeper() { DCHECK(concurrent_sweepers_.empty()); }
Sweeper::~Sweeper() {
DCHECK(concurrent_sweepers_.empty());
DCHECK(local_pretenuring_feedback_.empty());
}
Sweeper::PauseScope::PauseScope(Sweeper* sweeper) : sweeper_(sweeper) {
if (!sweeper_->sweeping_in_progress()) return;
......@@ -143,11 +160,10 @@ void Sweeper::TearDown() {
}
void Sweeper::StartSweeping() {
DCHECK(local_pretenuring_feedback_.empty());
sweeping_in_progress_ = true;
should_reduce_memory_ = heap_->ShouldReduceMemory();
NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
ForAllSweepingSpaces([this](AllocationSpace space) {
// Sorting is done in order to make compaction more efficient: by sweeping
// pages with the most free bytes first, we make it more likely that when
// evacuating a page, already swept pages will have enough free bytes to
......@@ -158,7 +174,7 @@ void Sweeper::StartSweeping() {
int space_index = GetSweepSpaceIndex(space);
std::sort(
sweeping_list_[space_index].begin(), sweeping_list_[space_index].end(),
[marking_state](Page* a, Page* b) {
[marking_state = marking_state_](Page* a, Page* b) {
return marking_state->live_bytes(a) > marking_state->live_bytes(b);
});
});
......@@ -198,13 +214,13 @@ Page* Sweeper::GetSweptPageSafe(PagedSpaceBase* space) {
return nullptr;
}
void Sweeper::EnsureCompleted() {
void Sweeper::EnsureCompleted(SweepingMode sweeping_mode) {
if (!sweeping_in_progress_) return;
// If sweeping is not completed or not running at all, we try to complete it
// here.
ForAllSweepingSpaces([this](AllocationSpace space) {
ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
ForAllSweepingSpaces([this, sweeping_mode](AllocationSpace space) {
ParallelSweepSpace(space, sweeping_mode, 0);
});
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
......@@ -213,7 +229,14 @@ void Sweeper::EnsureCompleted() {
CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
});
heap_->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
heap_->MergeAllocationSitePretenuringFeedback(
*concurrent_sweeper.local_pretenuring_feedback());
}
local_pretenuring_feedback_.clear();
concurrent_sweepers_.clear();
sweeping_in_progress_ = false;
}
......@@ -322,13 +345,17 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(Page* page,
DCHECK_EQ(live_bytes, page->allocated_bytes());
}
int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
SweepingMode sweeping_mode,
const base::MutexGuard& page_guard) {
int Sweeper::RawSweep(
Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
space->identity() == MAP_SPACE);
space->identity() == MAP_SPACE ||
(space->identity() == NEW_SPACE && FLAG_minor_mc));
DCHECK_IMPLIES(space->identity() == NEW_SPACE,
sweeping_mode == SweepingMode::kEagerDuringGC);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
// Phase 1: Prepare the page for sweeping.
......@@ -410,6 +437,10 @@ int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
live_bytes += size;
free_start = free_end + size;
if (p->InYoungGeneration()) {
heap_->UpdateAllocationSite(map, object, local_pretenuring_feedback);
}
if (active_system_pages_after_sweeping) {
active_system_pages_after_sweeping->Add(
free_end - p->address(), free_start - p->address(),
......@@ -456,7 +487,9 @@ int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
size_t Sweeper::ConcurrentSweepingPageCount() {
base::MutexGuard guard(&mutex_);
return sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)].size() +
sweeping_list_[GetSweepSpaceIndex(MAP_SPACE)].size();
sweeping_list_[GetSweepSpaceIndex(MAP_SPACE)].size() +
(FLAG_minor_mc ? sweeping_list_[GetSweepSpaceIndex(NEW_SPACE)].size()
: 0);
}
int Sweeper::ParallelSweepSpace(AllocationSpace identity,
......@@ -466,7 +499,8 @@ int Sweeper::ParallelSweepSpace(AllocationSpace identity,
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
int freed = ParallelSweepPage(page, identity, sweeping_mode);
int freed = ParallelSweepPage(page, identity, &local_pretenuring_feedback_,
sweeping_mode);
++pages_freed;
if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// Free list of a never-allocate page will be dropped later on.
......@@ -481,8 +515,10 @@ int Sweeper::ParallelSweepSpace(AllocationSpace identity,
return max_freed;
}
int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
SweepingMode sweeping_mode) {
int Sweeper::ParallelSweepPage(
Page* page, AllocationSpace identity,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
SweepingMode sweeping_mode) {
DCHECK(IsValidSweepingSpace(identity));
// The Scavenger may add already swept pages back.
......@@ -503,7 +539,8 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
const FreeSpaceTreatmentMode free_space_treatment_mode =
Heap::ShouldZapGarbage() ? FreeSpaceTreatmentMode::kZapFreeSpace
: FreeSpaceTreatmentMode::kIgnoreFreeSpace;
max_freed = RawSweep(page, free_space_treatment_mode, sweeping_mode, guard);
max_freed = RawSweep(page, free_space_treatment_mode, sweeping_mode, guard,
local_pretenuring_feedback);
DCHECK(page->SweepingDone());
}
......@@ -522,7 +559,8 @@ void Sweeper::EnsurePageIsSwept(Page* page) {
if (IsValidSweepingSpace(space)) {
if (TryRemoveSweepingPageSafe(space, page)) {
// Page was successfully removed and can now be swept.
ParallelSweepPage(page, space, SweepingMode::kLazyOrConcurrent);
ParallelSweepPage(page, space, &local_pretenuring_feedback_,
SweepingMode::kLazyOrConcurrent);
} else {
// Some sweeper task already took ownership of that page, wait until
// sweeping is finished.
......@@ -578,8 +616,14 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
});
#endif // DEBUG
page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
PagedSpaceBase* paged_space;
if (space == NEW_SPACE) {
DCHECK(FLAG_minor_mc);
paged_space = heap_->paged_new_space()->paged_space();
} else {
paged_space = heap_->paged_space(space);
}
paged_space->IncreaseAllocatedBytes(marking_state_->live_bytes(page), page);
}
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
......
......@@ -11,6 +11,8 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/tasks/cancelable-task.h"
......@@ -85,20 +87,24 @@ class Sweeper {
int ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode,
int required_freed_bytes, int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity,
SweepingMode sweeping_mode);
int ParallelSweepPage(
Page* page, AllocationSpace identity,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
SweepingMode sweeping_mode);
void EnsurePageIsSwept(Page* page);
int RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
SweepingMode sweeping_mode, const base::MutexGuard& page_guard);
SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
void StartSweeping();
V8_EXPORT_PRIVATE void StartSweeperTasks();
void EnsureCompleted();
void EnsureCompleted(
SweepingMode sweeping_mode = SweepingMode::kLazyOrConcurrent);
void DrainSweepingWorklistForSpace(AllocationSpace space);
bool AreSweeperTasksRunning();
......@@ -107,16 +113,21 @@ class Sweeper {
Page* GetSweptPageSafe(PagedSpaceBase* space);
NonAtomicMarkingState* marking_state() const { return marking_state_; }
private:
class ConcurrentSweeper;
class SweeperJob;
static const int kNumberOfSweepingSpaces =
LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
LAST_SWEEPABLE_SPACE - FIRST_SWEEPABLE_SPACE + 1;
static constexpr int kMaxSweeperTasks = 3;
template <typename Callback>
void ForAllSweepingSpaces(Callback callback) const {
if (FLAG_minor_mc) {
callback(NEW_SPACE);
}
callback(OLD_SPACE);
callback(CODE_SPACE);
callback(MAP_SPACE);
......@@ -165,13 +176,12 @@ class Sweeper {
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
static bool IsValidSweepingSpace(AllocationSpace space) {
return space >= FIRST_GROWABLE_PAGED_SPACE &&
space <= LAST_GROWABLE_PAGED_SPACE;
return space >= FIRST_SWEEPABLE_SPACE && space <= LAST_SWEEPABLE_SPACE;
}
static int GetSweepSpaceIndex(AllocationSpace space) {
DCHECK(IsValidSweepingSpace(space));
return space - FIRST_GROWABLE_PAGED_SPACE;
return space - FIRST_SWEEPABLE_SPACE;
}
int NumberOfConcurrentSweepers() const;
......@@ -188,6 +198,7 @@ class Sweeper {
// path checks this flag to see whether it could support concurrent sweeping.
std::atomic<bool> sweeping_in_progress_;
bool should_reduce_memory_;
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
};
} // namespace internal
......
......@@ -577,6 +577,7 @@
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH_SWEEP_NEW_LO) \
F(MC_FINISH_SWEEP_NEW) \
F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \
F(MC_MARK_CLIENT_HEAPS) \
F(MC_MARK_EMBEDDER_PROLOGUE) \
......@@ -593,6 +594,7 @@
F(MC_SWEEP_CODE_LO) \
F(MC_SWEEP_LO) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_NEW) \
F(MC_SWEEP_OLD) \
F(MINOR_MARK_COMPACTOR) \
F(MINOR_MC) \
......@@ -610,10 +612,10 @@
F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MINOR_MC_FINISH) \
F(MINOR_MC_FINISH_SWEEP_ARRAY_BUFFERS) \
F(MINOR_MC_FINISH_SWEEP_NEW) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_PARALLEL) \
......@@ -622,6 +624,8 @@
F(MINOR_MC_MARK_WEAK) \
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEP) \
F(MINOR_MC_SWEEP_NEW) \
F(SAFEPOINT) \
F(SCAVENGER) \
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment