Commit 167a8946 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Split old-to-new remembered set

Split OLD_TO_NEW remembered set and add OLD_TO_NEW_SWEEPING. The
OLD_TO_NEW remembered set is moved to OLD_TO_NEW_SWEEPING during
mark-compact. OLD_TO_NEW_SWEEPING is then modified by the sweeper.
Before using the page again, OLD_TO_NEW and OLD_TO_NEW_SWEEPING are
merged again.

This means only the main thread modifies OLD_TO_NEW, the sweeper only
removes entries from OLD_TO_NEW_SWEEPING. We can use this property
to make accesses non-atomic in a subsequent CL.

Bug: v8:9454
Change-Id: I9057cf85818d647775ae4c7beec4c8ccf73e18f7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1771783Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63807}
parent 97c89ebb
......@@ -4115,6 +4115,17 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
return KEEP_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
if (direction == OLD_TO_NEW) {
RememberedSetSweeping::Iterate(
chunk,
[start, end, untyped](MaybeObjectSlot slot) {
if (start <= slot.address() && slot.address() < end) {
untyped->insert(slot.address());
}
return KEEP_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
RememberedSet<direction>::IterateTyped(
chunk, [=](SlotType type, Address slot) {
if (start <= slot && slot < end) {
......@@ -5547,8 +5558,10 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
RememberedSetSweeping::Remove(page, slot.address());
}
#endif
}
......@@ -5561,8 +5574,9 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
Page* page = Page::FromAddress(slot.address());
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots.
// Slots are filtered with invalidated slots.
CHECK_IMPLIES(RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()),
page->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
#endif
......@@ -5575,9 +5589,12 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSetSweeping::RemoveRange(page, start, end,
SlotSet::KEEP_EMPTY_BUCKETS);
}
#endif
}
......
......@@ -855,7 +855,6 @@ class Heap {
static Address store_buffer_overflow_function_address();
void MoveStoreBufferEntriesToRememberedSet();
void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
......
......@@ -3421,6 +3421,17 @@ class RememberedSetUpdatingItem : public UpdatingItem {
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSetSweeping::Iterate(
chunk_,
[this, &filter](MaybeObjectSlot slot) {
CHECK(filter.IsValid(slot.address()));
return CheckAndUpdateOldToNewSlot(slot);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
// The invalidated slots are not needed after old-to-new slots were
// processed.
......@@ -3437,6 +3448,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
chunk_->ReleaseSlotSet<OLD_TO_OLD>();
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
......@@ -3556,15 +3568,18 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
const bool contains_old_to_new_sweeping_slots =
chunk->sweeping_slot_set() != nullptr;
const bool contains_old_to_old_invalidated_slots =
chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
const bool contains_old_to_new_invalidated_slots =
chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
!contains_old_to_old_invalidated_slots &&
if (!contains_old_to_new_slots && !contains_old_to_new_sweeping_slots &&
!contains_old_to_old_slots && !contains_old_to_old_invalidated_slots &&
!contains_old_to_new_invalidated_slots)
continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
contains_old_to_new_sweeping_slots ||
contains_old_to_old_invalidated_slots ||
contains_old_to_new_invalidated_slots) {
job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
......@@ -4651,6 +4666,14 @@ class PageMarkingItem : public MarkingItem {
return CheckAndMarkObject(task, slot);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSetSweeping::Iterate(
chunk_,
[this, task, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndMarkObject(task, slot);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
......
......@@ -18,54 +18,38 @@ namespace internal {
enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED };
// TODO(ulan): Investigate performance of de-templatizing this class.
template <RememberedSetType type>
class RememberedSet : public AllStatic {
class RememberedSetOperations {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
template <AccessMode access_mode = AccessMode::ATOMIC>
static void Insert(MemoryChunk* chunk, Address slot_addr) {
template <AccessMode access_mode>
static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->slot_set<type, access_mode>();
if (slot_set == nullptr) {
slot_set = chunk->AllocateSlotSet<type>();
}
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Insert<access_mode>(offset %
Page::kPageSize);
}
// Given a page and a slot in that page, this function returns true if
// the remembered set contains the slot.
static bool Contains(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->slot_set<type>();
if (slot_set == nullptr) {
return false;
template <typename Callback>
static void Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
slots[page].Iterate(callback, mode);
}
}
uintptr_t offset = slot_addr - chunk->address();
return slot_set[offset / Page::kPageSize].Contains(offset %
Page::kPageSize);
}
// Given a page and a slot in that page, this function removes the slot from
// the remembered set.
// If the slot was never added, then the function does nothing.
static void Remove(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->slot_set<type>();
static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
if (slot_set != nullptr) {
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
}
}
// Given a page and a range of slots in that page, this function removes the
// slots from the remembered set.
static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
SlotSet::EmptyBucketMode mode) {
SlotSet* slot_set = chunk->slot_set<type>();
static void RemoveRange(SlotSet* slot_set, MemoryChunk* chunk, Address start,
Address end, SlotSet::EmptyBucketMode mode) {
if (slot_set != nullptr) {
uintptr_t start_offset = start - chunk->address();
uintptr_t end_offset = end - chunk->address();
......@@ -101,6 +85,53 @@ class RememberedSet : public AllStatic {
}
}
}
};
// TODO(ulan): Investigate performance of de-templatizing this class.
template <RememberedSetType type>
class RememberedSet : public AllStatic {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
template <AccessMode access_mode = AccessMode::ATOMIC>
static void Insert(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->slot_set<type, access_mode>();
if (slot_set == nullptr) {
slot_set = chunk->AllocateSlotSet<type>();
}
RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
}
// Given a page and a slot in that page, this function returns true if
// the remembered set contains the slot.
static bool Contains(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->slot_set<type>();
if (slot_set == nullptr) {
return false;
}
uintptr_t offset = slot_addr - chunk->address();
return slot_set[offset / Page::kPageSize].Contains(offset %
Page::kPageSize);
}
// Given a page and a slot in that page, this function removes the slot from
// the remembered set.
// If the slot was never added, then the function does nothing.
static void Remove(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->slot_set<type>();
RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
}
// Given a page and a range of slots in that page, this function removes the
// slots from the remembered set.
static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
SlotSet::EmptyBucketMode mode) {
SlotSet* slot_set = chunk->slot_set<type>();
RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
}
// Iterates and filters the remembered set with the given callback.
// The callback should take (Address slot) and return SlotCallbackResult.
......@@ -122,8 +153,11 @@ class RememberedSet : public AllStatic {
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
SlotSet* sweeping_slots =
type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
if (slots != nullptr || typed_slots != nullptr ||
if (slots != nullptr || sweeping_slots != nullptr ||
typed_slots != nullptr ||
chunk->invalidated_slots<type>() != nullptr) {
callback(chunk);
}
......@@ -140,18 +174,7 @@ class RememberedSet : public AllStatic {
static void Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>();
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
int new_count = 0;
for (size_t page = 0; page < pages; page++) {
new_count += slots[page].Iterate(callback, mode);
}
// Only old-to-old slot sets are released eagerly. Old-new-slot sets are
// released by the sweeper threads.
if (type == OLD_TO_OLD && new_count == 0) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
}
}
RememberedSetOperations::Iterate(slots, chunk, callback, mode);
}
static int NumberOfPreFreedEmptyBuckets(MemoryChunk* chunk) {
......@@ -349,6 +372,46 @@ class UpdateTypedSlotHelper {
}
};
class RememberedSetSweeping {
public:
template <AccessMode access_mode = AccessMode::ATOMIC>
static void Insert(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
if (slot_set == nullptr) {
slot_set = chunk->AllocateSweepingSlotSet();
}
RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
}
static void Remove(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
}
// Given a page and a range of slots in that page, this function removes the
// slots from the remembered set.
static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
SlotSet::EmptyBucketMode mode) {
SlotSet* slot_set = chunk->sweeping_slot_set();
RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
}
// Iterates and filters the remembered set in the given memory chunk with
// the given callback. The callback should take (Address slot) and return
// SlotCallbackResult.
//
// Notice that |mode| can only be of FREE* or PREFREE* if there are no other
// threads concurrently inserting slots.
template <typename Callback>
static void Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->sweeping_slot_set();
RememberedSetOperations::Iterate(slots, chunk, callback, mode);
}
};
inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
if (RelocInfo::IsCodeTargetMode(rmode)) {
return CODE_TARGET_SLOT;
......
......@@ -153,8 +153,15 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
if (result == KEEP_SLOT) {
SLOW_DCHECK(target.IsHeapObject());
RememberedSet<OLD_TO_NEW>::Insert(MemoryChunk::FromHeapObject(host),
slot.address());
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
// Sweeper is stopped during scavenge, so we can directly
// insert into its remembered set here.
if (chunk->sweeping_slot_set()) {
RememberedSetSweeping::Insert(chunk, slot.address());
} else {
RememberedSet<OLD_TO_NEW>::Insert(chunk, slot.address());
}
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target)));
......@@ -239,8 +246,10 @@ void ScavengerCollector::CollectGarbage() {
// access to the slots of a page and can completely avoid any locks on
// the page itself.
Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
filter_scope.FilterOldSpaceSweepingPages(
[](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
filter_scope.FilterOldSpaceSweepingPages([](Page* page) {
return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
});
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap_, [&job](MemoryChunk* chunk) {
job.AddItem(new PageScavengingItem(chunk));
......@@ -440,6 +449,14 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
return CheckAndScavengeObject(heap_, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSetSweeping::Iterate(
page,
[this, &filter](MaybeObjectSlot slot) {
CHECK(filter.IsValid(slot.address()));
return CheckAndScavengeObject(heap_, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
// The invalidated slots are not needed after old-to-new slots were
......
......@@ -18,6 +18,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
......@@ -698,6 +699,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->InitializeReservedMemory();
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
......@@ -855,6 +857,33 @@ Page* Page::ConvertNewToOld(Page* old_page) {
return new_page;
}
void Page::MoveOldToNewRememberedSetForSweeping() {
CHECK_NULL(sweeping_slot_set_);
sweeping_slot_set_ = slot_set_[OLD_TO_NEW];
slot_set_[OLD_TO_NEW] = nullptr;
}
void Page::MergeOldToNewRememberedSets() {
if (sweeping_slot_set_ == nullptr) return;
RememberedSet<OLD_TO_NEW>::Iterate(
this,
[this](MaybeObjectSlot slot) {
Address address = slot.address();
RememberedSetSweeping::Insert(this, address);
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
if (slot_set_[OLD_TO_NEW]) {
ReleaseSlotSet<OLD_TO_NEW>();
}
CHECK_NULL(slot_set_[OLD_TO_NEW]);
slot_set_[OLD_TO_NEW] = sweeping_slot_set_;
sweeping_slot_set_ = nullptr;
}
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size();
......@@ -1375,6 +1404,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
}
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet(&sweeping_slot_set_);
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
......@@ -1410,15 +1440,23 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
SlotSet* slot_set = AllocateAndInitializeSlotSet(size(), address());
return AllocateSlotSet(&slot_set_[type]);
}
SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
return AllocateSlotSet(&sweeping_slot_set_);
}
SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
SlotSet* new_slot_set = AllocateAndInitializeSlotSet(size(), address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
&slot_set_[type], nullptr, slot_set);
slot_set, nullptr, new_slot_set);
if (old_slot_set != nullptr) {
delete[] slot_set;
slot_set = old_slot_set;
delete[] new_slot_set;
new_slot_set = old_slot_set;
}
DCHECK(slot_set);
return slot_set;
DCHECK(new_slot_set);
return new_slot_set;
}
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
......@@ -1426,10 +1464,13 @@ template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
SlotSet* slot_set = slot_set_[type];
if (slot_set) {
slot_set_[type] = nullptr;
delete[] slot_set;
ReleaseSlotSet(&slot_set_[type]);
}
void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
if (*slot_set) {
delete[] * slot_set;
*slot_set = nullptr;
}
}
......@@ -1626,6 +1667,13 @@ void PagedSpace::RefillFreeList() {
DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
// Avoid races with concurrent store buffer processing when merging
// old-to-new remembered sets later.
if (!is_local()) {
heap()->MoveStoreBufferEntriesToRememberedSet();
}
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
......@@ -1636,6 +1684,15 @@ void PagedSpace::RefillFreeList() {
category->Reset(free_list());
});
}
// Also merge old-to-new remembered sets outside of collections.
// Do not do this during GC, because of races during scavenges.
// One thread might iterate remembered set, while another thread merges
// them.
if (!is_local()) {
p->MergeOldToNewRememberedSets();
}
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
......@@ -1678,6 +1735,9 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Move over pages.
for (auto it = other->begin(); it != other->end();) {
Page* p = *(it++);
p->MergeOldToNewRememberedSets();
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
......
......@@ -133,7 +133,7 @@ enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
NUMBER_OF_REMEMBERED_SET_TYPES
};
// A free list category maintains a linked list of free memory blocks.
......@@ -607,6 +607,7 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
......@@ -706,6 +707,13 @@ class MemoryChunk : public BasicMemoryChunk {
return slot_set_[type];
}
template <AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* sweeping_slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
return sweeping_slot_set_;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
if (access_mode == AccessMode::ATOMIC)
......@@ -715,9 +723,13 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
SlotSet* AllocateSweepingSlotSet();
SlotSet* AllocateSlotSet(SlotSet** slot_set);
// Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseSlotSet();
void ReleaseSlotSet(SlotSet** slot_set);
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently.
......@@ -911,6 +923,7 @@ class MemoryChunk : public BasicMemoryChunk {
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
SlotSet* sweeping_slot_set_;
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
......@@ -1093,6 +1106,9 @@ class Page : public MemoryChunk {
void AllocateFreeListCategories();
void ReleaseFreeListCategories();
void MoveOldToNewRememberedSetForSweeping();
void MergeOldToNewRememberedSets();
#ifdef DEBUG
void Print();
#endif // DEBUG
......
......@@ -320,8 +320,8 @@ int Sweeper::RawSweep(
ClearFreedMemoryMode::kClearFreedMemory);
}
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSetSweeping::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
......@@ -354,8 +354,8 @@ int Sweeper::RawSweep(
ClearFreedMemoryMode::kClearFreedMemory);
}
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSetSweeping::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
......@@ -516,6 +516,7 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK(!category->is_linked(page->owner()->free_list()));
});
#endif // DEBUG
page->MoveOldToNewRememberedSetForSweeping();
page->set_concurrent_sweeping_state(Page::kSweepingPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
......
......@@ -2795,7 +2795,6 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
index, HeapNumber::cast(value).value_as_bits());
if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
// Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object, object->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*object);
chunk->InvalidateRecordedSlots(*object);
} else {
......
......@@ -153,6 +153,9 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Slot clearing is the reason why this entire function cannot currently
// be implemented in the DeleteProperty stub.
if (index.is_inobject() && !receiver_map->IsUnboxedDoubleField(index)) {
// We need to clear the recorded slot in this case because in-object
// slack tracking might not be finished. This ensures that we don't
// have recorded slots in free space.
isolate->heap()->ClearRecordedSlot(*receiver,
receiver->RawField(index.offset()));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment