Commit 572f536a authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Change SlotSet representation to store dynamic number of buckets

Change SlotSet representation to a variable-sized array of pointers to
buckets. The length of the array/number of buckets depends on the size
of the page.
Before this change the SlotSet always stored a fixed number of
buckets. Large pages needed a SlotSet-Array to cover the whole object.

Now both regular and large pages both use a single SlotSet object,
which contains all bucket pointers.

Change-Id: I2d8d62fad54b58409cd39ae7a52c64497ee7c261
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1876811Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64635}
parent b875f466
......@@ -104,6 +104,16 @@ class AsAtomicImpl {
cast_helper<T>::to_storage_type(new_value)));
}
template <typename T>
static T AcquireRelease_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
return cast_helper<T>::to_return_type(base::AcquireRelease_CompareAndSwap(
to_storage_addr(addr), cast_helper<T>::to_storage_type(old_value),
cast_helper<T>::to_storage_type(new_value)));
}
// Atomically sets bits selected by the mask to the given value.
// Returns false if the bits are already set as needed.
template <typename T>
......
......@@ -97,6 +97,8 @@ Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value);
void SeqCst_MemoryFence();
void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
......@@ -120,9 +122,10 @@ Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
Atomic64 new_value);
Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value);
void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 Relaxed_Load(volatile const Atomic64* ptr);
......
......@@ -62,6 +62,13 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline AtomicWord AcquireRelease_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::base::AcquireRelease_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline void Relaxed_Store(volatile AtomicWord* ptr, AtomicWord value) {
Relaxed_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);
}
......
......@@ -101,6 +101,14 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return old_value;
}
inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
return old_value;
}
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
......@@ -171,6 +179,14 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
return old_value;
}
inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
return old_value;
}
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
......
......@@ -89,6 +89,15 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return old_value;
}
inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_acq_rel, std::memory_order_acquire);
return old_value;
}
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed);
......@@ -175,6 +184,15 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
return old_value;
}
inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
std::atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_acq_rel, std::memory_order_acquire);
return old_value;
}
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed);
......
......@@ -337,11 +337,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
TNode<IntPtrT> page = PageFromAddress(object);
// Load address of SlotSet
TNode<IntPtrT> slot_set_array = LoadSlotSetArray(page, &slow_path);
TNode<IntPtrT> slot_set = LoadSlotSet(page, &slow_path);
TNode<IntPtrT> slot_offset = IntPtrSub(slot, page);
// Load bucket
TNode<IntPtrT> bucket = LoadBucket(slot_set_array, slot_offset, &slow_path);
TNode<IntPtrT> bucket = LoadBucket(slot_set, slot_offset, &slow_path);
// Update cell
SetBitInCell(bucket, slot_offset);
......@@ -352,23 +352,21 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
InsertIntoRememberedSetAndGotoSlow(object, slot, mode, next);
}
TNode<IntPtrT> LoadSlotSetArray(TNode<IntPtrT> page, Label* slow_path) {
TNode<IntPtrT> slot_set_array = UncheckedCast<IntPtrT>(
TNode<IntPtrT> LoadSlotSet(TNode<IntPtrT> page, Label* slow_path) {
TNode<IntPtrT> slot_set = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset)));
GotoIf(WordEqual(slot_set_array, IntPtrConstant(0)), slow_path);
GotoIf(WordEqual(slot_set, IntPtrConstant(0)), slow_path);
return slot_set_array;
return slot_set;
}
TNode<IntPtrT> LoadBucket(TNode<IntPtrT> slot_set_array,
TNode<WordT> slot_offset, Label* slow_path) {
// Assume here that SlotSet only contains of buckets
DCHECK_EQ(SlotSet::kSize, SlotSet::kBuckets * sizeof(SlotSet::Bucket));
TNode<IntPtrT> LoadBucket(TNode<IntPtrT> slot_set, TNode<WordT> slot_offset,
Label* slow_path) {
TNode<WordT> bucket_index =
WordShr(slot_offset, SlotSet::kBitsPerBucketLog2 + kTaggedSizeLog2);
TNode<IntPtrT> bucket = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), slot_set_array,
Load(MachineType::Pointer(), slot_set,
WordShl(bucket_index, kSystemPointerSizeLog2)));
GotoIf(WordEqual(bucket, IntPtrConstant(0)), slow_path);
return bucket;
......
......@@ -112,6 +112,8 @@ class BasicMemoryChunk {
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
size_t buckets() const { return SlotSet::BucketsForSize(size()); }
Address area_start() const { return area_start_; }
Address area_end() const { return area_end_; }
......
......@@ -26,28 +26,24 @@ class RememberedSetOperations {
static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Insert<access_mode>(offset %
Page::kPageSize);
slot_set->Insert<access_mode>(offset);
}
template <typename Callback>
static int Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback,
static int Iterate(SlotSet* slot_set, MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
int number_slots = 0;
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
number_slots += slots[page].Iterate(
chunk->address() + page * Page::kPageSize, callback, mode);
}
int slots = 0;
if (slot_set != nullptr) {
slots +=
slot_set->Iterate(chunk->address(), chunk->buckets(), callback, mode);
}
return number_slots;
return slots;
}
static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
if (slot_set != nullptr) {
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
slot_set->Remove(offset);
}
}
......@@ -57,35 +53,9 @@ class RememberedSetOperations {
uintptr_t start_offset = start - chunk->address();
uintptr_t end_offset = end - chunk->address();
DCHECK_LT(start_offset, end_offset);
if (end_offset < static_cast<uintptr_t>(Page::kPageSize)) {
slot_set->RemoveRange(static_cast<int>(start_offset),
static_cast<int>(end_offset), mode);
} else {
// The large page has multiple slot sets.
// Compute slot set indicies for the range [start_offset, end_offset).
int start_chunk = static_cast<int>(start_offset / Page::kPageSize);
int end_chunk = static_cast<int>((end_offset - 1) / Page::kPageSize);
int offset_in_start_chunk =
static_cast<int>(start_offset % Page::kPageSize);
// Note that using end_offset % Page::kPageSize would be incorrect
// because end_offset is one beyond the last slot to clear.
int offset_in_end_chunk = static_cast<int>(
end_offset - static_cast<uintptr_t>(end_chunk) * Page::kPageSize);
if (start_chunk == end_chunk) {
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
offset_in_end_chunk, mode);
} else {
// Clear all slots from start_offset to the end of first chunk.
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
Page::kPageSize, mode);
// Clear all slots in intermediate chunks.
for (int i = start_chunk + 1; i < end_chunk; i++) {
slot_set[i].RemoveRange(0, Page::kPageSize, mode);
}
// Clear slots from the beginning of the last page to end_offset.
slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk, mode);
}
}
slot_set->RemoveRange(static_cast<int>(start_offset),
static_cast<int>(end_offset), chunk->buckets(),
mode);
}
}
};
......@@ -115,8 +85,7 @@ class RememberedSet : public AllStatic {
return false;
}
uintptr_t offset = slot_addr - chunk->address();
return slot_set[offset / Page::kPageSize].Contains(offset %
Page::kPageSize);
return slot_set->Contains(offset);
}
// Given a page and a slot in that page, this function removes the slot from
......@@ -155,12 +124,12 @@ class RememberedSet : public AllStatic {
OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
SlotSet* sweeping_slots =
SlotSet* slot_set = chunk->slot_set<type>();
SlotSet* sweeping_slot_set =
type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
if (slots != nullptr || sweeping_slots != nullptr ||
typed_slots != nullptr ||
TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
if (slot_set != nullptr || sweeping_slot_set != nullptr ||
typed_slot_set != nullptr ||
chunk->invalidated_slots<type>() != nullptr) {
callback(chunk);
}
......@@ -176,18 +145,15 @@ class RememberedSet : public AllStatic {
template <typename Callback>
static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>();
return RememberedSetOperations::Iterate(slots, chunk, callback, mode);
SlotSet* slot_set = chunk->slot_set<type>();
return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
}
static void FreeEmptyBuckets(MemoryChunk* chunk) {
DCHECK(type == OLD_TO_NEW);
SlotSet* slots = chunk->slot_set<type>();
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
slots[page].FreeEmptyBuckets();
}
SlotSet* slot_set = chunk->slot_set<type>();
if (slot_set != nullptr) {
slot_set->FreeEmptyBuckets(chunk->buckets());
}
}
......@@ -202,20 +168,20 @@ class RememberedSet : public AllStatic {
slot_set->Insert(slot_type, offset);
}
static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> slots) {
static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> other) {
TypedSlotSet* slot_set = page->typed_slot_set<type>();
if (slot_set == nullptr) {
slot_set = page->AllocateTypedSlotSet<type>();
}
slot_set->Merge(slots.get());
slot_set->Merge(other.get());
}
// Given a page and a range of typed slots in that page, this function removes
// the slots from the remembered set.
static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
TypedSlotSet* slots = page->typed_slot_set<type>();
if (slots != nullptr) {
slots->Iterate(
TypedSlotSet* slot_set = page->typed_slot_set<type>();
if (slot_set != nullptr) {
slot_set->Iterate(
[=](SlotType slot_type, Address slot_addr) {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
: KEEP_SLOT;
......@@ -242,9 +208,10 @@ class RememberedSet : public AllStatic {
// and return SlotCallbackResult.
template <typename Callback>
static void IterateTyped(MemoryChunk* chunk, Callback callback) {
TypedSlotSet* slots = chunk->typed_slot_set<type>();
if (slots != nullptr) {
int new_count = slots->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
TypedSlotSet* slot_set = chunk->typed_slot_set<type>();
if (slot_set != nullptr) {
int new_count =
slot_set->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
if (new_count == 0) {
chunk->ReleaseTypedSlotSet<type>();
}
......@@ -382,8 +349,8 @@ class RememberedSetSweeping {
template <typename Callback>
static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->sweeping_slot_set();
return RememberedSetOperations::Iterate(slots, chunk, callback, mode);
SlotSet* slot_set = chunk->sweeping_slot_set();
return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
}
};
......
This diff is collapsed.
......@@ -1090,6 +1090,12 @@ size_t Page::ShrinkToHighWaterMark() {
// Ensure that no objects will be allocated on this page.
DCHECK_EQ(0u, AvailableInFreeList());
// Ensure that slot sets are empty. Otherwise the buckets for the shrinked
// area would not be freed when deallocating this page.
DCHECK_NULL(slot_set<OLD_TO_NEW>());
DCHECK_NULL(slot_set<OLD_TO_OLD>());
DCHECK_NULL(sweeping_slot_set());
size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
......@@ -1416,12 +1422,6 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
}
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK_LT(0, pages);
return new SlotSet[pages];
}
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
......@@ -1435,11 +1435,11 @@ SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
}
SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
SlotSet* new_slot_set = AllocateAndInitializeSlotSet(size(), address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
SlotSet* new_slot_set = SlotSet::Allocate(buckets());
SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
slot_set, nullptr, new_slot_set);
if (old_slot_set != nullptr) {
delete[] new_slot_set;
SlotSet::Delete(new_slot_set, buckets());
new_slot_set = old_slot_set;
}
DCHECK(new_slot_set);
......@@ -1460,7 +1460,7 @@ void MemoryChunk::ReleaseSweepingSlotSet() {
void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
if (*slot_set) {
delete[] * slot_set;
SlotSet::Delete(*slot_set, buckets());
*slot_set = nullptr;
}
}
......
......@@ -76,7 +76,7 @@ class FreeStoreAllocationPolicy {
void* AllocWithRetry(size_t size);
V8_EXPORT_PRIVATE void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void* ptr);
V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
......
......@@ -14,45 +14,56 @@
namespace v8 {
namespace internal {
TEST(SlotSet, BucketsForSize) {
EXPECT_EQ(static_cast<size_t>(SlotSet::kBucketsRegularPage),
SlotSet::BucketsForSize(Page::kPageSize));
EXPECT_EQ(static_cast<size_t>(SlotSet::kBucketsRegularPage) * 2,
SlotSet::BucketsForSize(Page::kPageSize * 2));
}
TEST(SlotSet, InsertAndLookup1) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_TRUE(set.Lookup(i));
EXPECT_TRUE(set->Lookup(i));
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(SlotSet, InsertAndLookup2) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
EXPECT_TRUE(set.Lookup(i));
EXPECT_TRUE(set->Lookup(i));
} else {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(SlotSet, Iterate) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
}
set.Iterate(
kNullAddress,
set->Iterate(
kNullAddress, SlotSet::kBucketsRegularPage,
[](MaybeObjectSlot slot) {
if (slot.address() % 3 == 0) {
return KEEP_SLOT;
......@@ -64,56 +75,62 @@ TEST(SlotSet, Iterate) {
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i));
EXPECT_TRUE(set->Lookup(i));
} else {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(SlotSet, Remove) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 3 != 0) {
set.Remove(i);
set->Remove(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i));
EXPECT_TRUE(set->Lookup(i));
} else {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
uint32_t first = start == 0 ? 0 : start - kTaggedSize;
uint32_t last = end == Page::kPageSize ? end - kTaggedSize : end;
for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
for (uint32_t i = first; i <= last; i += kTaggedSize) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
set.RemoveRange(start, end, mode);
set->RemoveRange(start, end, SlotSet::kBucketsRegularPage, mode);
if (first != start) {
EXPECT_TRUE(set.Lookup(first));
EXPECT_TRUE(set->Lookup(first));
}
if (last == end) {
EXPECT_TRUE(set.Lookup(last));
EXPECT_TRUE(set->Lookup(last));
}
for (uint32_t i = start; i < end; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(SlotSet, RemoveRange) {
......@@ -132,15 +149,16 @@ TEST(SlotSet, RemoveRange) {
}
}
}
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
set.Insert<AccessMode::ATOMIC>(Page::kPageSize / 2);
set.RemoveRange(0, Page::kPageSize, mode);
set->Insert<AccessMode::ATOMIC>(Page::kPageSize / 2);
set->RemoveRange(0, Page::kPageSize, SlotSet::kBucketsRegularPage, mode);
for (uint32_t i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(TypedSlotSet, Iterate) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment