Commit 572f536a authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Change SlotSet representation to store dynamic number of buckets

Change SlotSet representation to a variable-sized array of pointers to
buckets. The length of the array/number of buckets depends on the size
of the page.
Before this change the SlotSet always stored a fixed number of
buckets. Large pages needed a SlotSet-Array to cover the whole object.

Now both regular and large pages both use a single SlotSet object,
which contains all bucket pointers.

Change-Id: I2d8d62fad54b58409cd39ae7a52c64497ee7c261
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1876811Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64635}
parent b875f466
...@@ -104,6 +104,16 @@ class AsAtomicImpl { ...@@ -104,6 +104,16 @@ class AsAtomicImpl {
cast_helper<T>::to_storage_type(new_value))); cast_helper<T>::to_storage_type(new_value)));
} }
template <typename T>
static T AcquireRelease_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
return cast_helper<T>::to_return_type(base::AcquireRelease_CompareAndSwap(
to_storage_addr(addr), cast_helper<T>::to_storage_type(old_value),
cast_helper<T>::to_storage_type(new_value)));
}
// Atomically sets bits selected by the mask to the given value. // Atomically sets bits selected by the mask to the given value.
// Returns false if the bits are already set as needed. // Returns false if the bits are already set as needed.
template <typename T> template <typename T>
......
...@@ -97,6 +97,8 @@ Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, ...@@ -97,6 +97,8 @@ Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 old_value,
Atomic32 new_value); Atomic32 new_value);
Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value);
void SeqCst_MemoryFence(); void SeqCst_MemoryFence();
void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value); void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
...@@ -120,9 +122,10 @@ Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); ...@@ -120,9 +122,10 @@ Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 old_value,
Atomic64 new_value); Atomic64 new_value);
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
Atomic64 old_value,
Atomic64 new_value); Atomic64 new_value);
Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value);
void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value); void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value); void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 Relaxed_Load(volatile const Atomic64* ptr); Atomic64 Relaxed_Load(volatile const Atomic64* ptr);
......
...@@ -62,6 +62,13 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, ...@@ -62,6 +62,13 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value); reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
} }
inline AtomicWord AcquireRelease_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::base::AcquireRelease_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline void Relaxed_Store(volatile AtomicWord* ptr, AtomicWord value) { inline void Relaxed_Store(volatile AtomicWord* ptr, AtomicWord value) {
Relaxed_Store(reinterpret_cast<volatile Atomic32*>(ptr), value); Relaxed_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);
} }
......
...@@ -101,6 +101,14 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, ...@@ -101,6 +101,14 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return old_value; return old_value;
} }
inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
return old_value;
}
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) { inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED); __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
} }
...@@ -171,6 +179,14 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, ...@@ -171,6 +179,14 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
return old_value; return old_value;
} }
inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
return old_value;
}
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) { inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED); __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
} }
......
...@@ -89,6 +89,15 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, ...@@ -89,6 +89,15 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return old_value; return old_value;
} }
inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_acq_rel, std::memory_order_acquire);
return old_value;
}
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) { inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value, std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed); std::memory_order_relaxed);
...@@ -175,6 +184,15 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, ...@@ -175,6 +184,15 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
return old_value; return old_value;
} }
inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
std::atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_acq_rel, std::memory_order_acquire);
return old_value;
}
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) { inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value, std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed); std::memory_order_relaxed);
......
...@@ -337,11 +337,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { ...@@ -337,11 +337,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
TNode<IntPtrT> page = PageFromAddress(object); TNode<IntPtrT> page = PageFromAddress(object);
// Load address of SlotSet // Load address of SlotSet
TNode<IntPtrT> slot_set_array = LoadSlotSetArray(page, &slow_path); TNode<IntPtrT> slot_set = LoadSlotSet(page, &slow_path);
TNode<IntPtrT> slot_offset = IntPtrSub(slot, page); TNode<IntPtrT> slot_offset = IntPtrSub(slot, page);
// Load bucket // Load bucket
TNode<IntPtrT> bucket = LoadBucket(slot_set_array, slot_offset, &slow_path); TNode<IntPtrT> bucket = LoadBucket(slot_set, slot_offset, &slow_path);
// Update cell // Update cell
SetBitInCell(bucket, slot_offset); SetBitInCell(bucket, slot_offset);
...@@ -352,23 +352,21 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { ...@@ -352,23 +352,21 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
InsertIntoRememberedSetAndGotoSlow(object, slot, mode, next); InsertIntoRememberedSetAndGotoSlow(object, slot, mode, next);
} }
TNode<IntPtrT> LoadSlotSetArray(TNode<IntPtrT> page, Label* slow_path) { TNode<IntPtrT> LoadSlotSet(TNode<IntPtrT> page, Label* slow_path) {
TNode<IntPtrT> slot_set_array = UncheckedCast<IntPtrT>( TNode<IntPtrT> slot_set = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), page, Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset))); IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset)));
GotoIf(WordEqual(slot_set_array, IntPtrConstant(0)), slow_path); GotoIf(WordEqual(slot_set, IntPtrConstant(0)), slow_path);
return slot_set_array; return slot_set;
} }
TNode<IntPtrT> LoadBucket(TNode<IntPtrT> slot_set_array, TNode<IntPtrT> LoadBucket(TNode<IntPtrT> slot_set, TNode<WordT> slot_offset,
TNode<WordT> slot_offset, Label* slow_path) { Label* slow_path) {
// Assume here that SlotSet only contains of buckets
DCHECK_EQ(SlotSet::kSize, SlotSet::kBuckets * sizeof(SlotSet::Bucket));
TNode<WordT> bucket_index = TNode<WordT> bucket_index =
WordShr(slot_offset, SlotSet::kBitsPerBucketLog2 + kTaggedSizeLog2); WordShr(slot_offset, SlotSet::kBitsPerBucketLog2 + kTaggedSizeLog2);
TNode<IntPtrT> bucket = UncheckedCast<IntPtrT>( TNode<IntPtrT> bucket = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), slot_set_array, Load(MachineType::Pointer(), slot_set,
WordShl(bucket_index, kSystemPointerSizeLog2))); WordShl(bucket_index, kSystemPointerSizeLog2)));
GotoIf(WordEqual(bucket, IntPtrConstant(0)), slow_path); GotoIf(WordEqual(bucket, IntPtrConstant(0)), slow_path);
return bucket; return bucket;
......
...@@ -112,6 +112,8 @@ class BasicMemoryChunk { ...@@ -112,6 +112,8 @@ class BasicMemoryChunk {
size_t size() const { return size_; } size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; } void set_size(size_t size) { size_ = size; }
size_t buckets() const { return SlotSet::BucketsForSize(size()); }
Address area_start() const { return area_start_; } Address area_start() const { return area_start_; }
Address area_end() const { return area_end_; } Address area_end() const { return area_end_; }
......
...@@ -26,28 +26,24 @@ class RememberedSetOperations { ...@@ -26,28 +26,24 @@ class RememberedSetOperations {
static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) { static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr)); DCHECK(chunk->Contains(slot_addr));
uintptr_t offset = slot_addr - chunk->address(); uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Insert<access_mode>(offset % slot_set->Insert<access_mode>(offset);
Page::kPageSize);
} }
template <typename Callback> template <typename Callback>
static int Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback, static int Iterate(SlotSet* slot_set, MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) { SlotSet::EmptyBucketMode mode) {
int number_slots = 0; int slots = 0;
if (slots != nullptr) { if (slot_set != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize; slots +=
for (size_t page = 0; page < pages; page++) { slot_set->Iterate(chunk->address(), chunk->buckets(), callback, mode);
number_slots += slots[page].Iterate(
chunk->address() + page * Page::kPageSize, callback, mode);
}
} }
return number_slots; return slots;
} }
static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) { static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
if (slot_set != nullptr) { if (slot_set != nullptr) {
uintptr_t offset = slot_addr - chunk->address(); uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize); slot_set->Remove(offset);
} }
} }
...@@ -57,35 +53,9 @@ class RememberedSetOperations { ...@@ -57,35 +53,9 @@ class RememberedSetOperations {
uintptr_t start_offset = start - chunk->address(); uintptr_t start_offset = start - chunk->address();
uintptr_t end_offset = end - chunk->address(); uintptr_t end_offset = end - chunk->address();
DCHECK_LT(start_offset, end_offset); DCHECK_LT(start_offset, end_offset);
if (end_offset < static_cast<uintptr_t>(Page::kPageSize)) { slot_set->RemoveRange(static_cast<int>(start_offset),
slot_set->RemoveRange(static_cast<int>(start_offset), static_cast<int>(end_offset), chunk->buckets(),
static_cast<int>(end_offset), mode); mode);
} else {
// The large page has multiple slot sets.
// Compute slot set indicies for the range [start_offset, end_offset).
int start_chunk = static_cast<int>(start_offset / Page::kPageSize);
int end_chunk = static_cast<int>((end_offset - 1) / Page::kPageSize);
int offset_in_start_chunk =
static_cast<int>(start_offset % Page::kPageSize);
// Note that using end_offset % Page::kPageSize would be incorrect
// because end_offset is one beyond the last slot to clear.
int offset_in_end_chunk = static_cast<int>(
end_offset - static_cast<uintptr_t>(end_chunk) * Page::kPageSize);
if (start_chunk == end_chunk) {
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
offset_in_end_chunk, mode);
} else {
// Clear all slots from start_offset to the end of first chunk.
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
Page::kPageSize, mode);
// Clear all slots in intermediate chunks.
for (int i = start_chunk + 1; i < end_chunk; i++) {
slot_set[i].RemoveRange(0, Page::kPageSize, mode);
}
// Clear slots from the beginning of the last page to end_offset.
slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk, mode);
}
}
} }
} }
}; };
...@@ -115,8 +85,7 @@ class RememberedSet : public AllStatic { ...@@ -115,8 +85,7 @@ class RememberedSet : public AllStatic {
return false; return false;
} }
uintptr_t offset = slot_addr - chunk->address(); uintptr_t offset = slot_addr - chunk->address();
return slot_set[offset / Page::kPageSize].Contains(offset % return slot_set->Contains(offset);
Page::kPageSize);
} }
// Given a page and a slot in that page, this function removes the slot from // Given a page and a slot in that page, this function removes the slot from
...@@ -155,12 +124,12 @@ class RememberedSet : public AllStatic { ...@@ -155,12 +124,12 @@ class RememberedSet : public AllStatic {
OldGenerationMemoryChunkIterator it(heap); OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk; MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) { while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>(); SlotSet* slot_set = chunk->slot_set<type>();
SlotSet* sweeping_slots = SlotSet* sweeping_slot_set =
type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr; type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>(); TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
if (slots != nullptr || sweeping_slots != nullptr || if (slot_set != nullptr || sweeping_slot_set != nullptr ||
typed_slots != nullptr || typed_slot_set != nullptr ||
chunk->invalidated_slots<type>() != nullptr) { chunk->invalidated_slots<type>() != nullptr) {
callback(chunk); callback(chunk);
} }
...@@ -176,18 +145,15 @@ class RememberedSet : public AllStatic { ...@@ -176,18 +145,15 @@ class RememberedSet : public AllStatic {
template <typename Callback> template <typename Callback>
static int Iterate(MemoryChunk* chunk, Callback callback, static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) { SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>(); SlotSet* slot_set = chunk->slot_set<type>();
return RememberedSetOperations::Iterate(slots, chunk, callback, mode); return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
} }
static void FreeEmptyBuckets(MemoryChunk* chunk) { static void FreeEmptyBuckets(MemoryChunk* chunk) {
DCHECK(type == OLD_TO_NEW); DCHECK(type == OLD_TO_NEW);
SlotSet* slots = chunk->slot_set<type>(); SlotSet* slot_set = chunk->slot_set<type>();
if (slots != nullptr) { if (slot_set != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize; slot_set->FreeEmptyBuckets(chunk->buckets());
for (size_t page = 0; page < pages; page++) {
slots[page].FreeEmptyBuckets();
}
} }
} }
...@@ -202,20 +168,20 @@ class RememberedSet : public AllStatic { ...@@ -202,20 +168,20 @@ class RememberedSet : public AllStatic {
slot_set->Insert(slot_type, offset); slot_set->Insert(slot_type, offset);
} }
static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> slots) { static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> other) {
TypedSlotSet* slot_set = page->typed_slot_set<type>(); TypedSlotSet* slot_set = page->typed_slot_set<type>();
if (slot_set == nullptr) { if (slot_set == nullptr) {
slot_set = page->AllocateTypedSlotSet<type>(); slot_set = page->AllocateTypedSlotSet<type>();
} }
slot_set->Merge(slots.get()); slot_set->Merge(other.get());
} }
// Given a page and a range of typed slots in that page, this function removes // Given a page and a range of typed slots in that page, this function removes
// the slots from the remembered set. // the slots from the remembered set.
static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) { static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
TypedSlotSet* slots = page->typed_slot_set<type>(); TypedSlotSet* slot_set = page->typed_slot_set<type>();
if (slots != nullptr) { if (slot_set != nullptr) {
slots->Iterate( slot_set->Iterate(
[=](SlotType slot_type, Address slot_addr) { [=](SlotType slot_type, Address slot_addr) {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
: KEEP_SLOT; : KEEP_SLOT;
...@@ -242,9 +208,10 @@ class RememberedSet : public AllStatic { ...@@ -242,9 +208,10 @@ class RememberedSet : public AllStatic {
// and return SlotCallbackResult. // and return SlotCallbackResult.
template <typename Callback> template <typename Callback>
static void IterateTyped(MemoryChunk* chunk, Callback callback) { static void IterateTyped(MemoryChunk* chunk, Callback callback) {
TypedSlotSet* slots = chunk->typed_slot_set<type>(); TypedSlotSet* slot_set = chunk->typed_slot_set<type>();
if (slots != nullptr) { if (slot_set != nullptr) {
int new_count = slots->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS); int new_count =
slot_set->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
if (new_count == 0) { if (new_count == 0) {
chunk->ReleaseTypedSlotSet<type>(); chunk->ReleaseTypedSlotSet<type>();
} }
...@@ -382,8 +349,8 @@ class RememberedSetSweeping { ...@@ -382,8 +349,8 @@ class RememberedSetSweeping {
template <typename Callback> template <typename Callback>
static int Iterate(MemoryChunk* chunk, Callback callback, static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) { SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->sweeping_slot_set(); SlotSet* slot_set = chunk->sweeping_slot_set();
return RememberedSetOperations::Iterate(slots, chunk, callback, mode); return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
} }
}; };
......
...@@ -24,100 +24,132 @@ enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT }; ...@@ -24,100 +24,132 @@ enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Data structure for maintaining a set of slots in a standard (non-large) // Data structure for maintaining a set of slots in a standard (non-large)
// page. // page.
// The data structure assumes that the slots are pointer size aligned and // The data structure assumes that the slots are pointer size aligned and
// splits the valid slot offset range into kBuckets buckets. // splits the valid slot offset range into buckets.
// Each bucket is a bitmap with a bit corresponding to a single slot offset. // Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced { class SlotSet {
public: public:
enum EmptyBucketMode { enum EmptyBucketMode {
FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately. FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
KEEP_EMPTY_BUCKETS // An empty bucket will be kept. KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
}; };
SlotSet() { SlotSet() = delete;
for (int i = 0; i < kBuckets; i++) {
StoreBucket(&buckets_[i], nullptr); static SlotSet* Allocate(size_t buckets) {
size_t buckets_size = buckets * sizeof(Bucket*);
void* allocation =
AlignedAlloc(kInitialBucketsSize + buckets_size, kSystemPointerSize);
SlotSet* slot_set = reinterpret_cast<SlotSet*>(
reinterpret_cast<uint8_t*>(allocation) + kInitialBucketsSize);
#ifdef DEBUG
*slot_set->initial_buckets() = buckets;
#endif
for (size_t i = 0; i < buckets; i++) {
*slot_set->bucket(i) = nullptr;
} }
return slot_set;
} }
~SlotSet() { static void Delete(SlotSet* slot_set, size_t buckets) {
for (int i = 0; i < kBuckets; i++) { if (slot_set == nullptr) return;
ReleaseBucket(i);
for (size_t i = 0; i < buckets; i++) {
slot_set->ReleaseBucket(i);
}
#ifdef DEBUG
for (size_t i = buckets; i < *slot_set->initial_buckets(); i++) {
DCHECK_NULL(*slot_set->bucket(i));
} }
#endif
AlignedFree(reinterpret_cast<uint8_t*>(slot_set) - kInitialBucketsSize);
}
static size_t BucketsForSize(size_t size) {
return (size + (kTaggedSize * kBitsPerBucket) - 1) >>
(kTaggedSizeLog2 + kBitsPerBucketLog2);
} }
// The slot offset specifies a slot at address page_start_ + slot_offset. // The slot offset specifies a slot at address page_start_ + slot_offset.
// AccessMode defines whether there can be concurrent access on the buckets // AccessMode defines whether there can be concurrent access on the buckets
// or not. // or not.
template <AccessMode access_mode> template <AccessMode access_mode>
void Insert(int slot_offset) { void Insert(size_t slot_offset) {
int bucket_index, cell_index, bit_index; size_t bucket_index;
int cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]); Bucket* bucket = LoadBucket<access_mode>(bucket_index);
if (bucket == nullptr) { if (bucket == nullptr) {
bucket = AllocateBucket(); bucket = new Bucket;
if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) { if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) {
DeleteArray<uint32_t>(bucket); delete bucket;
bucket = LoadBucket<access_mode>(&buckets_[bucket_index]); bucket = LoadBucket<access_mode>(bucket_index);
} }
} }
// Check that monotonicity is preserved, i.e., once a bucket is set we do // Check that monotonicity is preserved, i.e., once a bucket is set we do
// not free it concurrently. // not free it concurrently.
DCHECK_NOT_NULL(bucket); DCHECK(bucket != nullptr);
DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index])); DCHECK_EQ(bucket->cells(), LoadBucket<access_mode>(bucket_index)->cells());
uint32_t mask = 1u << bit_index; uint32_t mask = 1u << bit_index;
if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) { if ((bucket->LoadCell<access_mode>(cell_index) & mask) == 0) {
SetCellBits<access_mode>(&bucket[cell_index], mask); bucket->SetCellBits<access_mode>(cell_index, mask);
} }
} }
// The slot offset specifies a slot at address page_start_ + slot_offset. // The slot offset specifies a slot at address page_start_ + slot_offset.
// Returns true if the set contains the slot. // Returns true if the set contains the slot.
bool Contains(int slot_offset) { bool Contains(size_t slot_offset) {
int bucket_index, cell_index, bit_index; size_t bucket_index;
int cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
Bucket bucket = LoadBucket(&buckets_[bucket_index]); Bucket* bucket = LoadBucket(bucket_index);
if (bucket == nullptr) return false; if (bucket == nullptr) return false;
return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0; return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
} }
// The slot offset specifies a slot at address page_start_ + slot_offset. // The slot offset specifies a slot at address page_start_ + slot_offset.
void Remove(int slot_offset) { void Remove(size_t slot_offset) {
int bucket_index, cell_index, bit_index; size_t bucket_index;
int cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
Bucket bucket = LoadBucket(&buckets_[bucket_index]); Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) { if (bucket != nullptr) {
uint32_t cell = LoadCell(&bucket[cell_index]); uint32_t cell = bucket->LoadCell(cell_index);
uint32_t bit_mask = 1u << bit_index; uint32_t bit_mask = 1u << bit_index;
if (cell & bit_mask) { if (cell & bit_mask) {
ClearCellBits(&bucket[cell_index], bit_mask); bucket->ClearCellBits(cell_index, bit_mask);
} }
} }
} }
// The slot offsets specify a range of slots at addresses: // The slot offsets specify a range of slots at addresses:
// [page_start_ + start_offset ... page_start_ + end_offset). // [page_start_ + start_offset ... page_start_ + end_offset).
void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) { void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets,
CHECK_LE(end_offset, 1 << kPageSizeBits); EmptyBucketMode mode) {
CHECK_LE(end_offset, buckets * kBitsPerBucket * kTaggedSize);
DCHECK_LE(start_offset, end_offset); DCHECK_LE(start_offset, end_offset);
int start_bucket, start_cell, start_bit; size_t start_bucket;
int start_cell, start_bit;
SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit); SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
int end_bucket, end_cell, end_bit; size_t end_bucket;
int end_cell, end_bit;
SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit); SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
uint32_t start_mask = (1u << start_bit) - 1; uint32_t start_mask = (1u << start_bit) - 1;
uint32_t end_mask = ~((1u << end_bit) - 1); uint32_t end_mask = ~((1u << end_bit) - 1);
Bucket bucket; Bucket* bucket;
if (start_bucket == end_bucket && start_cell == end_cell) { if (start_bucket == end_bucket && start_cell == end_cell) {
bucket = LoadBucket(&buckets_[start_bucket]); bucket = LoadBucket(start_bucket);
if (bucket != nullptr) { if (bucket != nullptr) {
ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask)); bucket->ClearCellBits(start_cell, ~(start_mask | end_mask));
} }
return; return;
} }
int current_bucket = start_bucket; size_t current_bucket = start_bucket;
int current_cell = start_cell; int current_cell = start_cell;
bucket = LoadBucket(&buckets_[current_bucket]); bucket = LoadBucket(current_bucket);
if (bucket != nullptr) { if (bucket != nullptr) {
ClearCellBits(&bucket[current_cell], ~start_mask); bucket->ClearCellBits(current_cell, ~start_mask);
} }
current_cell++; current_cell++;
if (current_bucket < end_bucket) { if (current_bucket < end_bucket) {
...@@ -136,7 +168,7 @@ class SlotSet : public Malloced { ...@@ -136,7 +168,7 @@ class SlotSet : public Malloced {
ReleaseBucket(current_bucket); ReleaseBucket(current_bucket);
} else { } else {
DCHECK(mode == KEEP_EMPTY_BUCKETS); DCHECK(mode == KEEP_EMPTY_BUCKETS);
bucket = LoadBucket(&buckets_[current_bucket]); bucket = LoadBucket(current_bucket);
if (bucket != nullptr) { if (bucket != nullptr) {
ClearBucket(bucket, 0, kCellsPerBucket); ClearBucket(bucket, 0, kCellsPerBucket);
} }
...@@ -145,26 +177,27 @@ class SlotSet : public Malloced { ...@@ -145,26 +177,27 @@ class SlotSet : public Malloced {
} }
// All buckets between start_bucket and end_bucket are cleared. // All buckets between start_bucket and end_bucket are cleared.
DCHECK(current_bucket == end_bucket); DCHECK(current_bucket == end_bucket);
if (current_bucket == kBuckets) return; if (current_bucket == buckets) return;
bucket = LoadBucket(&buckets_[current_bucket]); bucket = LoadBucket(current_bucket);
DCHECK(current_cell <= end_cell); DCHECK(current_cell <= end_cell);
if (bucket == nullptr) return; if (bucket == nullptr) return;
while (current_cell < end_cell) { while (current_cell < end_cell) {
StoreCell(&bucket[current_cell], 0); bucket->StoreCell(current_cell, 0);
current_cell++; current_cell++;
} }
// All cells between start_cell and end_cell are cleared. // All cells between start_cell and end_cell are cleared.
DCHECK(current_bucket == end_bucket && current_cell == end_cell); DCHECK(current_bucket == end_bucket && current_cell == end_cell);
ClearCellBits(&bucket[end_cell], ~end_mask); bucket->ClearCellBits(end_cell, ~end_mask);
} }
// The slot offset specifies a slot at address page_start_ + slot_offset. // The slot offset specifies a slot at address page_start_ + slot_offset.
bool Lookup(int slot_offset) { bool Lookup(size_t slot_offset) {
int bucket_index, cell_index, bit_index; size_t bucket_index;
int cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
Bucket bucket = LoadBucket(&buckets_[bucket_index]); Bucket* bucket = LoadBucket(bucket_index);
if (bucket == nullptr) return false; if (bucket == nullptr) return false;
return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0; return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
} }
// Iterate over all slots in the set and for each slot invoke the callback. // Iterate over all slots in the set and for each slot invoke the callback.
...@@ -181,22 +214,23 @@ class SlotSet : public Malloced { ...@@ -181,22 +214,23 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT; // else return REMOVE_SLOT;
// }); // });
template <typename Callback> template <typename Callback>
int Iterate(Address page_start, Callback callback, EmptyBucketMode mode) { size_t Iterate(Address page_start, size_t buckets, Callback callback,
int new_count = 0; EmptyBucketMode mode) {
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { size_t new_count = 0;
Bucket bucket = LoadBucket(&buckets_[bucket_index]); for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) { if (bucket != nullptr) {
int in_bucket_count = 0; size_t in_bucket_count = 0;
int cell_offset = bucket_index * kBitsPerBucket; size_t cell_offset = bucket_index << kBitsPerBucketLog2;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) { for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
uint32_t cell = LoadCell(&bucket[i]); uint32_t cell = bucket->LoadCell(i);
if (cell) { if (cell) {
uint32_t old_cell = cell; uint32_t old_cell = cell;
uint32_t mask = 0; uint32_t mask = 0;
while (cell) { while (cell) {
int bit_offset = base::bits::CountTrailingZeros(cell); int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset; uint32_t bit_mask = 1u << bit_offset;
uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2; Address slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
if (callback(MaybeObjectSlot(page_start + slot)) == KEEP_SLOT) { if (callback(MaybeObjectSlot(page_start + slot)) == KEEP_SLOT) {
++in_bucket_count; ++in_bucket_count;
} else { } else {
...@@ -206,7 +240,7 @@ class SlotSet : public Malloced { ...@@ -206,7 +240,7 @@ class SlotSet : public Malloced {
} }
uint32_t new_cell = old_cell & ~mask; uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) { if (old_cell != new_cell) {
ClearCellBits(&bucket[i], mask); bucket->ClearCellBits(i, mask);
} }
} }
} }
...@@ -216,18 +250,17 @@ class SlotSet : public Malloced { ...@@ -216,18 +250,17 @@ class SlotSet : public Malloced {
return new_count; return new_count;
} }
void FreeEmptyBuckets() { void FreeEmptyBuckets(size_t buckets) {
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]); Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) { if (bucket != nullptr) {
if (IsEmptyBucket(bucket)) { if (bucket->IsEmpty()) {
ReleaseBucket(bucket_index); ReleaseBucket(bucket_index);
} }
} }
} }
} }
static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
static const int kCellsPerBucket = 32; static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5; static const int kCellsPerBucketLog2 = 5;
static const int kCellSizeBytesLog2 = 2; static const int kCellSizeBytesLog2 = 2;
...@@ -236,114 +269,139 @@ class SlotSet : public Malloced { ...@@ -236,114 +269,139 @@ class SlotSet : public Malloced {
static const int kBitsPerCellLog2 = 5; static const int kBitsPerCellLog2 = 5;
static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell; static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2; static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell; static const int kBucketsRegularPage =
(1 << kPageSizeBits) / kTaggedSize / kCellsPerBucket / kBitsPerCell;
class Bucket : public Malloced {
uint32_t cells_[kCellsPerBucket];
static const int kSize = kBuckets * kSystemPointerSize; public:
Bucket() {
for (int i = 0; i < kCellsPerBucket; i++) {
cells_[i] = 0;
}
}
using Bucket = uint32_t*; uint32_t* cells() { return cells_; }
uint32_t* cell(int cell_index) { return cells() + cell_index; }
private: template <AccessMode access_mode = AccessMode::ATOMIC>
Bucket AllocateBucket() { uint32_t LoadCell(int cell_index) {
Bucket result = NewArray<uint32_t>(kCellsPerBucket); DCHECK_LT(cell_index, kCellsPerBucket);
for (int i = 0; i < kCellsPerBucket; i++) { if (access_mode == AccessMode::ATOMIC)
result[i] = 0; return base::AsAtomic32::Acquire_Load(cells() + cell_index);
return *(cells() + cell_index);
} }
return result;
}
void ClearBucket(Bucket bucket, int start_cell, int end_cell) { template <AccessMode access_mode = AccessMode::ATOMIC>
void SetCellBits(int cell_index, uint32_t mask) {
if (access_mode == AccessMode::ATOMIC) {
base::AsAtomic32::SetBits(cell(cell_index), mask, mask);
} else {
uint32_t* c = cell(cell_index);
*c = (*c & ~mask) | mask;
}
}
void ClearCellBits(int cell_index, uint32_t mask) {
base::AsAtomic32::SetBits(cell(cell_index), 0u, mask);
}
void StoreCell(int cell_index, uint32_t value) {
base::AsAtomic32::Release_Store(cell(cell_index), value);
}
bool IsEmpty() {
for (int i = 0; i < kCellsPerBucket; i++) {
if (cells_[i] != 0) {
return false;
}
}
return true;
}
};
private:
void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
DCHECK_GE(start_cell, 0); DCHECK_GE(start_cell, 0);
DCHECK_LE(end_cell, kCellsPerBucket); DCHECK_LE(end_cell, kCellsPerBucket);
int current_cell = start_cell; int current_cell = start_cell;
while (current_cell < kCellsPerBucket) { while (current_cell < kCellsPerBucket) {
StoreCell(&bucket[current_cell], 0); bucket->StoreCell(current_cell, 0);
current_cell++; current_cell++;
} }
} }
void ReleaseBucket(int bucket_index) { void ReleaseBucket(size_t bucket_index) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]); Bucket* bucket = LoadBucket(bucket_index);
StoreBucket(&buckets_[bucket_index], nullptr); StoreBucket(bucket_index, nullptr);
DeleteArray<uint32_t>(bucket); delete bucket;
} }
template <AccessMode access_mode = AccessMode::ATOMIC> template <AccessMode access_mode = AccessMode::ATOMIC>
Bucket LoadBucket(Bucket* bucket) { Bucket* LoadBucket(Bucket** bucket) {
if (access_mode == AccessMode::ATOMIC) if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(bucket); return base::AsAtomicPointer::Acquire_Load(bucket);
return *bucket; return *bucket;
} }
template <AccessMode access_mode = AccessMode::ATOMIC> template <AccessMode access_mode = AccessMode::ATOMIC>
void StoreBucket(Bucket* bucket, Bucket value) { Bucket* LoadBucket(size_t bucket_index) {
if (access_mode == AccessMode::ATOMIC) { return LoadBucket(bucket(bucket_index));
base::AsAtomicPointer::Release_Store(bucket, value);
} else {
*bucket = value;
}
}
bool IsEmptyBucket(Bucket bucket) {
for (int i = 0; i < kCellsPerBucket; i++) {
if (LoadCell(&bucket[i])) {
return false;
}
}
return true;
} }
template <AccessMode access_mode = AccessMode::ATOMIC> template <AccessMode access_mode = AccessMode::ATOMIC>
bool SwapInNewBucket(Bucket* bucket, Bucket value) { void StoreBucket(Bucket** bucket, Bucket* value) {
if (access_mode == AccessMode::ATOMIC) { if (access_mode == AccessMode::ATOMIC) {
return base::AsAtomicPointer::Release_CompareAndSwap(bucket, nullptr, base::AsAtomicPointer::Release_Store(bucket, value);
value) == nullptr;
} else { } else {
DCHECK_NULL(*bucket);
*bucket = value; *bucket = value;
return true;
} }
} }
template <AccessMode access_mode = AccessMode::ATOMIC> template <AccessMode access_mode = AccessMode::ATOMIC>
uint32_t LoadCell(uint32_t* cell) { void StoreBucket(size_t bucket_index, Bucket* value) {
if (access_mode == AccessMode::ATOMIC) StoreBucket(bucket(bucket_index), value);
return base::AsAtomic32::Acquire_Load(cell);
return *cell;
}
void StoreCell(uint32_t* cell, uint32_t value) {
base::AsAtomic32::Release_Store(cell, value);
}
void ClearCellBits(uint32_t* cell, uint32_t mask) {
base::AsAtomic32::SetBits(cell, 0u, mask);
} }
template <AccessMode access_mode = AccessMode::ATOMIC> template <AccessMode access_mode = AccessMode::ATOMIC>
void SetCellBits(uint32_t* cell, uint32_t mask) { bool SwapInNewBucket(size_t bucket_index, Bucket* value) {
Bucket** b = bucket(bucket_index);
if (access_mode == AccessMode::ATOMIC) { if (access_mode == AccessMode::ATOMIC) {
base::AsAtomic32::SetBits(cell, mask, mask); return base::AsAtomicPointer::Release_CompareAndSwap(b, nullptr, value) ==
nullptr;
} else { } else {
*cell = (*cell & ~mask) | mask; DCHECK_NULL(*b);
*b = value;
return true;
} }
} }
// Converts the slot offset into bucket/cell/bit index. // Converts the slot offset into bucket/cell/bit index.
void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index, static void SlotToIndices(size_t slot_offset, size_t* bucket_index,
int* bit_index) { int* cell_index, int* bit_index) {
DCHECK(IsAligned(slot_offset, kTaggedSize)); DCHECK(IsAligned(slot_offset, kTaggedSize));
int slot = slot_offset >> kTaggedSizeLog2; size_t slot = slot_offset >> kTaggedSizeLog2;
DCHECK(slot >= 0 && slot <= kMaxSlots);
*bucket_index = slot >> kBitsPerBucketLog2; *bucket_index = slot >> kBitsPerBucketLog2;
*cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1); *cell_index =
*bit_index = slot & (kBitsPerCell - 1); static_cast<int>((slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1));
*bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
} }
Bucket buckets_[kBuckets]; Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
#ifdef DEBUG
size_t* initial_buckets() { return reinterpret_cast<size_t*>(this) - 1; }
static const int kInitialBucketsSize = sizeof(size_t);
#else
static const int kInitialBucketsSize = 0;
#endif
}; };
STATIC_ASSERT(std::is_standard_layout<SlotSet>::value); STATIC_ASSERT(std::is_standard_layout<SlotSet>::value);
STATIC_ASSERT(sizeof(SlotSet) == SlotSet::kSize); STATIC_ASSERT(std::is_standard_layout<SlotSet::Bucket>::value);
enum SlotType { enum SlotType {
FULL_EMBEDDED_OBJECT_SLOT, FULL_EMBEDDED_OBJECT_SLOT,
......
...@@ -1090,6 +1090,12 @@ size_t Page::ShrinkToHighWaterMark() { ...@@ -1090,6 +1090,12 @@ size_t Page::ShrinkToHighWaterMark() {
// Ensure that no objects will be allocated on this page. // Ensure that no objects will be allocated on this page.
DCHECK_EQ(0u, AvailableInFreeList()); DCHECK_EQ(0u, AvailableInFreeList());
// Ensure that slot sets are empty. Otherwise the buckets for the shrinked
// area would not be freed when deallocating this page.
DCHECK_NULL(slot_set<OLD_TO_NEW>());
DCHECK_NULL(slot_set<OLD_TO_OLD>());
DCHECK_NULL(sweeping_slot_set());
size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()), size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
MemoryAllocator::GetCommitPageSize()); MemoryAllocator::GetCommitPageSize());
if (unused > 0) { if (unused > 0) {
...@@ -1416,12 +1422,6 @@ void MemoryChunk::ReleaseAllAllocatedMemory() { ...@@ -1416,12 +1422,6 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap(); if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
} }
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK_LT(0, pages);
return new SlotSet[pages];
}
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>(); template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>(); template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
...@@ -1435,11 +1435,11 @@ SlotSet* MemoryChunk::AllocateSweepingSlotSet() { ...@@ -1435,11 +1435,11 @@ SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
} }
SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) { SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
SlotSet* new_slot_set = AllocateAndInitializeSlotSet(size(), address()); SlotSet* new_slot_set = SlotSet::Allocate(buckets());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap( SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
slot_set, nullptr, new_slot_set); slot_set, nullptr, new_slot_set);
if (old_slot_set != nullptr) { if (old_slot_set != nullptr) {
delete[] new_slot_set; SlotSet::Delete(new_slot_set, buckets());
new_slot_set = old_slot_set; new_slot_set = old_slot_set;
} }
DCHECK(new_slot_set); DCHECK(new_slot_set);
...@@ -1460,7 +1460,7 @@ void MemoryChunk::ReleaseSweepingSlotSet() { ...@@ -1460,7 +1460,7 @@ void MemoryChunk::ReleaseSweepingSlotSet() {
void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) { void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
if (*slot_set) { if (*slot_set) {
delete[] * slot_set; SlotSet::Delete(*slot_set, buckets());
*slot_set = nullptr; *slot_set = nullptr;
} }
} }
......
...@@ -76,7 +76,7 @@ class FreeStoreAllocationPolicy { ...@@ -76,7 +76,7 @@ class FreeStoreAllocationPolicy {
void* AllocWithRetry(size_t size); void* AllocWithRetry(size_t size);
V8_EXPORT_PRIVATE void* AlignedAlloc(size_t size, size_t alignment); V8_EXPORT_PRIVATE void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void* ptr); V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer. // Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator(); V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
......
...@@ -14,45 +14,56 @@ ...@@ -14,45 +14,56 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
TEST(SlotSet, BucketsForSize) {
EXPECT_EQ(static_cast<size_t>(SlotSet::kBucketsRegularPage),
SlotSet::BucketsForSize(Page::kPageSize));
EXPECT_EQ(static_cast<size_t>(SlotSet::kBucketsRegularPage) * 2,
SlotSet::BucketsForSize(Page::kPageSize * 2));
}
TEST(SlotSet, InsertAndLookup1) { TEST(SlotSet, InsertAndLookup1) {
SlotSet set; SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i)); EXPECT_FALSE(set->Lookup(i));
} }
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
set.Insert<AccessMode::ATOMIC>(i); set->Insert<AccessMode::ATOMIC>(i);
} }
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_TRUE(set.Lookup(i)); EXPECT_TRUE(set->Lookup(i));
} }
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
} }
TEST(SlotSet, InsertAndLookup2) { TEST(SlotSet, InsertAndLookup2) {
SlotSet set; SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) { if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i); set->Insert<AccessMode::ATOMIC>(i);
} }
} }
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) { if (i % 7 == 0) {
EXPECT_TRUE(set.Lookup(i)); EXPECT_TRUE(set->Lookup(i));
} else { } else {
EXPECT_FALSE(set.Lookup(i)); EXPECT_FALSE(set->Lookup(i));
} }
} }
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
} }
TEST(SlotSet, Iterate) { TEST(SlotSet, Iterate) {
SlotSet set; SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) { if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i); set->Insert<AccessMode::ATOMIC>(i);
} }
} }
set.Iterate( set->Iterate(
kNullAddress, kNullAddress, SlotSet::kBucketsRegularPage,
[](MaybeObjectSlot slot) { [](MaybeObjectSlot slot) {
if (slot.address() % 3 == 0) { if (slot.address() % 3 == 0) {
return KEEP_SLOT; return KEEP_SLOT;
...@@ -64,56 +75,62 @@ TEST(SlotSet, Iterate) { ...@@ -64,56 +75,62 @@ TEST(SlotSet, Iterate) {
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 21 == 0) { if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i)); EXPECT_TRUE(set->Lookup(i));
} else { } else {
EXPECT_FALSE(set.Lookup(i)); EXPECT_FALSE(set->Lookup(i));
} }
} }
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
} }
TEST(SlotSet, Remove) { TEST(SlotSet, Remove) {
SlotSet set; SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) { if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i); set->Insert<AccessMode::ATOMIC>(i);
} }
} }
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 3 != 0) { if (i % 3 != 0) {
set.Remove(i); set->Remove(i);
} }
} }
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 21 == 0) { if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i)); EXPECT_TRUE(set->Lookup(i));
} else { } else {
EXPECT_FALSE(set.Lookup(i)); EXPECT_FALSE(set->Lookup(i));
} }
} }
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
} }
void CheckRemoveRangeOn(uint32_t start, uint32_t end) { void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
SlotSet set; SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
uint32_t first = start == 0 ? 0 : start - kTaggedSize; uint32_t first = start == 0 ? 0 : start - kTaggedSize;
uint32_t last = end == Page::kPageSize ? end - kTaggedSize : end; uint32_t last = end == Page::kPageSize ? end - kTaggedSize : end;
for (const auto mode : for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) { {SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
for (uint32_t i = first; i <= last; i += kTaggedSize) { for (uint32_t i = first; i <= last; i += kTaggedSize) {
set.Insert<AccessMode::ATOMIC>(i); set->Insert<AccessMode::ATOMIC>(i);
} }
set.RemoveRange(start, end, mode); set->RemoveRange(start, end, SlotSet::kBucketsRegularPage, mode);
if (first != start) { if (first != start) {
EXPECT_TRUE(set.Lookup(first)); EXPECT_TRUE(set->Lookup(first));
} }
if (last == end) { if (last == end) {
EXPECT_TRUE(set.Lookup(last)); EXPECT_TRUE(set->Lookup(last));
} }
for (uint32_t i = start; i < end; i += kTaggedSize) { for (uint32_t i = start; i < end; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i)); EXPECT_FALSE(set->Lookup(i));
} }
} }
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
} }
TEST(SlotSet, RemoveRange) { TEST(SlotSet, RemoveRange) {
...@@ -132,15 +149,16 @@ TEST(SlotSet, RemoveRange) { ...@@ -132,15 +149,16 @@ TEST(SlotSet, RemoveRange) {
} }
} }
} }
SlotSet set; SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (const auto mode : for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) { {SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
set.Insert<AccessMode::ATOMIC>(Page::kPageSize / 2); set->Insert<AccessMode::ATOMIC>(Page::kPageSize / 2);
set.RemoveRange(0, Page::kPageSize, mode); set->RemoveRange(0, Page::kPageSize, SlotSet::kBucketsRegularPage, mode);
for (uint32_t i = 0; i < Page::kPageSize; i += kTaggedSize) { for (uint32_t i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i)); EXPECT_FALSE(set->Lookup(i));
} }
} }
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
} }
TEST(TypedSlotSet, Iterate) { TEST(TypedSlotSet, Iterate) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment