Commit e79d34ee authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Slots are either in sweeping or old-to-new RS after Full GC

This CL ensures that recorded slots are either in the sweeping or the
old-to-new remembered set after mark-compact depending on whether the
page was already swept or not.

All pages that are swept during the evacuation phase also have their
remembered sets merged. is_local() is renamed to
is_compaction_space() and non-virtual. The PagedSpace now not only
knows whether it is a compaction space or not but also for which
collection through the compaction_space_kind_ field. This allows
RefillFreeList to merge the remembered sets immediately also for the
mark-compact collection.

Change-Id: I7457f8393d73f3e8d6b6ebedc46ebc36af509729
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1868613Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64458}
parent e8a514a0
......@@ -758,6 +758,13 @@ enum MinimumCapacity {
enum GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
enum class CompactionSpaceKind {
kNone,
kScavenge,
kMarkCompact,
kMinorMarkCompact,
};
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum VisitMode {
......
......@@ -19,10 +19,10 @@ class LocalAllocator {
static const int kLabSize = 32 * KB;
static const int kMaxLabObjectSize = 8 * KB;
explicit LocalAllocator(Heap* heap)
explicit LocalAllocator(Heap* heap, CompactionSpaceKind compaction_space_kind)
: heap_(heap),
new_space_(heap->new_space()),
compaction_spaces_(heap),
compaction_spaces_(heap, compaction_space_kind),
new_space_lab_(LocalAllocationBuffer::InvalidBuffer()),
lab_allocation_will_fail_(false) {}
......
This diff is collapsed.
......@@ -402,7 +402,7 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
copied_size_(0),
promoted_size_(0),
allocator_(heap),
allocator_(heap, CompactionSpaceKind::kScavenge),
is_logging_(is_logging),
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()) {}
......
......@@ -480,7 +480,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
if (!result.IsRetry() && result.To(&heap_obj) && !is_compaction_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
......
......@@ -1639,8 +1639,11 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable, FreeList* free_list)
: SpaceWithLinearArea(heap, space, free_list), executable_(executable) {
Executability executable, FreeList* free_list,
CompactionSpaceKind compaction_space_kind)
: SpaceWithLinearArea(heap, space, free_list),
executable_(executable),
compaction_space_kind_(compaction_space_kind) {
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
accounting_stats_.Clear();
}
......@@ -1676,18 +1679,17 @@ void PagedSpace::RefillFreeList() {
});
}
// Also merge old-to-new remembered sets outside of collections.
// Do not do this during GC, because of races during scavenges.
// One thread might iterate remembered set, while another thread merges
// them.
if (!is_local()) {
// Also merge old-to-new remembered sets if not scavenging because of
// data races: One thread might iterate remembered set, while another
// thread merges them.
if (compaction_space_kind() != CompactionSpaceKind::kScavenge) {
p->MergeOldToNewRememberedSets();
}
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
if (is_local()) {
if (is_compaction_space()) {
DCHECK_NE(this, p->owner());
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
base::MutexGuard guard(owner->mutex());
......@@ -1701,7 +1703,7 @@ void PagedSpace::RefillFreeList() {
added += RelinkFreeListCategories(p);
}
added += p->wasted_memory();
if (is_local() && (added > kCompactionMemoryWanted)) break;
if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
}
}
}
......@@ -2047,7 +2049,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
// if it is big enough.
FreeLinearAllocationArea();
if (!is_local()) {
if (!is_compaction_space()) {
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
......@@ -3741,7 +3743,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
if (FLAG_concurrent_sweeping && !is_local() &&
if (FLAG_concurrent_sweeping && !is_compaction_space() &&
!collector->sweeper()->AreSweeperTasksRunning()) {
collector->EnsureSweepingCompleted();
}
......@@ -3759,8 +3761,9 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
// final atomic pause.
Sweeper::FreeSpaceMayContainInvalidatedSlots
invalidated_slots_in_free_space =
is_local() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
: Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
is_compaction_space()
? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
: Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper()->ParallelSweepSpace(
......@@ -3774,7 +3777,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
}
}
if (is_local()) {
if (is_compaction_space()) {
// The main thread may have acquired all swept pages. Try to steal from
// it. This can only happen during young generation evacuation.
PagedSpace* main_space = heap()->paged_space(identity());
......
......@@ -2285,8 +2285,10 @@ class V8_EXPORT_PRIVATE PagedSpace
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list);
PagedSpace(
Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list,
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
~PagedSpace() override { TearDown(); }
......@@ -2462,7 +2464,11 @@ class V8_EXPORT_PRIVATE PagedSpace
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return static_cast<int>(area_size_); }
virtual bool is_local() { return false; }
bool is_compaction_space() {
return compaction_space_kind_ != CompactionSpaceKind::kNone;
}
CompactionSpaceKind compaction_space_kind() { return compaction_space_kind_; }
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
......@@ -2503,7 +2509,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsInlineAllocation() override {
return identity() == OLD_SPACE && !is_local();
return identity() == OLD_SPACE && !is_compaction_space();
}
protected:
......@@ -2560,6 +2566,8 @@ class V8_EXPORT_PRIVATE PagedSpace
Executability executable_;
CompactionSpaceKind compaction_space_kind_;
size_t area_size_;
// Accounting information for this space.
......@@ -3035,10 +3043,12 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
: PagedSpace(heap, id, executable, FreeList::CreateFreeList()) {}
bool is_local() override { return true; }
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
CompactionSpaceKind compaction_space_kind)
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
compaction_space_kind) {
DCHECK_NE(compaction_space_kind, CompactionSpaceKind::kNone);
}
protected:
// The space is temporary and not included in any snapshots.
......@@ -3054,9 +3064,12 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
explicit CompactionSpaceCollection(Heap* heap,
CompactionSpaceKind compaction_space_kind)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
compaction_space_kind),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
compaction_space_kind) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
......
......@@ -214,6 +214,15 @@ Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
return nullptr;
}
void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
base::MutexGuard guard(&mutex_);
ForAllSweepingSpaces([this](AllocationSpace space) {
SweptList& swept_list = swept_list_[GetSweepSpaceIndex(space)];
for (Page* p : swept_list) p->MergeOldToNewRememberedSets();
});
}
void Sweeper::AbortAndWaitForTasks() {
if (!FLAG_concurrent_sweeping) return;
......
......@@ -110,6 +110,7 @@ class Sweeper {
void AddPageForIterability(Page* page);
void StartIterabilityTasks();
void EnsureIterabilityCompleted();
void MergeOldToNewRememberedSetsForSweptPages();
private:
class IncrementalSweeperTask;
......
......@@ -18,8 +18,8 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
OldSpace* old_space = heap->old_space();
EXPECT_TRUE(old_space != nullptr);
CompactionSpace* compaction_space =
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
CompactionSpace* compaction_space = new CompactionSpace(
heap, OLD_SPACE, NOT_EXECUTABLE, CompactionSpaceKind::kMarkCompact);
EXPECT_TRUE(compaction_space != nullptr);
for (Page* p : *old_space) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment