Commit 572f536a authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Change SlotSet representation to store dynamic number of buckets

Change SlotSet representation to a variable-sized array of pointers to
buckets. The length of the array/number of buckets depends on the size
of the page.
Before this change the SlotSet always stored a fixed number of
buckets. Large pages needed a SlotSet-Array to cover the whole object.

Now both regular and large pages both use a single SlotSet object,
which contains all bucket pointers.

Change-Id: I2d8d62fad54b58409cd39ae7a52c64497ee7c261
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1876811Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64635}
parent b875f466
......@@ -104,6 +104,16 @@ class AsAtomicImpl {
cast_helper<T>::to_storage_type(new_value)));
}
template <typename T>
static T AcquireRelease_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(AtomicStorageType));
return cast_helper<T>::to_return_type(base::AcquireRelease_CompareAndSwap(
to_storage_addr(addr), cast_helper<T>::to_storage_type(old_value),
cast_helper<T>::to_storage_type(new_value)));
}
// Atomically sets bits selected by the mask to the given value.
// Returns false if the bits are already set as needed.
template <typename T>
......
......@@ -97,6 +97,8 @@ Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value);
void SeqCst_MemoryFence();
void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
......@@ -120,9 +122,10 @@ Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
Atomic64 new_value);
Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value);
void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 Relaxed_Load(volatile const Atomic64* ptr);
......
......@@ -62,6 +62,13 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline AtomicWord AcquireRelease_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::base::AcquireRelease_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline void Relaxed_Store(volatile AtomicWord* ptr, AtomicWord value) {
Relaxed_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);
}
......
......@@ -101,6 +101,14 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return old_value;
}
inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
return old_value;
}
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
......@@ -171,6 +179,14 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
return old_value;
}
inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
return old_value;
}
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
......
......@@ -89,6 +89,15 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return old_value;
}
inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_acq_rel, std::memory_order_acquire);
return old_value;
}
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed);
......@@ -175,6 +184,15 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
return old_value;
}
inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
std::atomic_compare_exchange_strong_explicit(
helper::to_std_atomic(ptr), &old_value, new_value,
std::memory_order_acq_rel, std::memory_order_acquire);
return old_value;
}
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_relaxed);
......
......@@ -337,11 +337,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
TNode<IntPtrT> page = PageFromAddress(object);
// Load address of SlotSet
TNode<IntPtrT> slot_set_array = LoadSlotSetArray(page, &slow_path);
TNode<IntPtrT> slot_set = LoadSlotSet(page, &slow_path);
TNode<IntPtrT> slot_offset = IntPtrSub(slot, page);
// Load bucket
TNode<IntPtrT> bucket = LoadBucket(slot_set_array, slot_offset, &slow_path);
TNode<IntPtrT> bucket = LoadBucket(slot_set, slot_offset, &slow_path);
// Update cell
SetBitInCell(bucket, slot_offset);
......@@ -352,23 +352,21 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
InsertIntoRememberedSetAndGotoSlow(object, slot, mode, next);
}
TNode<IntPtrT> LoadSlotSetArray(TNode<IntPtrT> page, Label* slow_path) {
TNode<IntPtrT> slot_set_array = UncheckedCast<IntPtrT>(
TNode<IntPtrT> LoadSlotSet(TNode<IntPtrT> page, Label* slow_path) {
TNode<IntPtrT> slot_set = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset)));
GotoIf(WordEqual(slot_set_array, IntPtrConstant(0)), slow_path);
GotoIf(WordEqual(slot_set, IntPtrConstant(0)), slow_path);
return slot_set_array;
return slot_set;
}
TNode<IntPtrT> LoadBucket(TNode<IntPtrT> slot_set_array,
TNode<WordT> slot_offset, Label* slow_path) {
// Assume here that SlotSet only contains of buckets
DCHECK_EQ(SlotSet::kSize, SlotSet::kBuckets * sizeof(SlotSet::Bucket));
TNode<IntPtrT> LoadBucket(TNode<IntPtrT> slot_set, TNode<WordT> slot_offset,
Label* slow_path) {
TNode<WordT> bucket_index =
WordShr(slot_offset, SlotSet::kBitsPerBucketLog2 + kTaggedSizeLog2);
TNode<IntPtrT> bucket = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), slot_set_array,
Load(MachineType::Pointer(), slot_set,
WordShl(bucket_index, kSystemPointerSizeLog2)));
GotoIf(WordEqual(bucket, IntPtrConstant(0)), slow_path);
return bucket;
......
......@@ -112,6 +112,8 @@ class BasicMemoryChunk {
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
size_t buckets() const { return SlotSet::BucketsForSize(size()); }
Address area_start() const { return area_start_; }
Address area_end() const { return area_end_; }
......
......@@ -26,28 +26,24 @@ class RememberedSetOperations {
static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Insert<access_mode>(offset %
Page::kPageSize);
slot_set->Insert<access_mode>(offset);
}
template <typename Callback>
static int Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback,
static int Iterate(SlotSet* slot_set, MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
int number_slots = 0;
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
number_slots += slots[page].Iterate(
chunk->address() + page * Page::kPageSize, callback, mode);
}
int slots = 0;
if (slot_set != nullptr) {
slots +=
slot_set->Iterate(chunk->address(), chunk->buckets(), callback, mode);
}
return number_slots;
return slots;
}
static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
if (slot_set != nullptr) {
uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
slot_set->Remove(offset);
}
}
......@@ -57,35 +53,9 @@ class RememberedSetOperations {
uintptr_t start_offset = start - chunk->address();
uintptr_t end_offset = end - chunk->address();
DCHECK_LT(start_offset, end_offset);
if (end_offset < static_cast<uintptr_t>(Page::kPageSize)) {
slot_set->RemoveRange(static_cast<int>(start_offset),
static_cast<int>(end_offset), mode);
} else {
// The large page has multiple slot sets.
// Compute slot set indicies for the range [start_offset, end_offset).
int start_chunk = static_cast<int>(start_offset / Page::kPageSize);
int end_chunk = static_cast<int>((end_offset - 1) / Page::kPageSize);
int offset_in_start_chunk =
static_cast<int>(start_offset % Page::kPageSize);
// Note that using end_offset % Page::kPageSize would be incorrect
// because end_offset is one beyond the last slot to clear.
int offset_in_end_chunk = static_cast<int>(
end_offset - static_cast<uintptr_t>(end_chunk) * Page::kPageSize);
if (start_chunk == end_chunk) {
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
offset_in_end_chunk, mode);
} else {
// Clear all slots from start_offset to the end of first chunk.
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
Page::kPageSize, mode);
// Clear all slots in intermediate chunks.
for (int i = start_chunk + 1; i < end_chunk; i++) {
slot_set[i].RemoveRange(0, Page::kPageSize, mode);
}
// Clear slots from the beginning of the last page to end_offset.
slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk, mode);
}
}
slot_set->RemoveRange(static_cast<int>(start_offset),
static_cast<int>(end_offset), chunk->buckets(),
mode);
}
}
};
......@@ -115,8 +85,7 @@ class RememberedSet : public AllStatic {
return false;
}
uintptr_t offset = slot_addr - chunk->address();
return slot_set[offset / Page::kPageSize].Contains(offset %
Page::kPageSize);
return slot_set->Contains(offset);
}
// Given a page and a slot in that page, this function removes the slot from
......@@ -155,12 +124,12 @@ class RememberedSet : public AllStatic {
OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
SlotSet* sweeping_slots =
SlotSet* slot_set = chunk->slot_set<type>();
SlotSet* sweeping_slot_set =
type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
if (slots != nullptr || sweeping_slots != nullptr ||
typed_slots != nullptr ||
TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
if (slot_set != nullptr || sweeping_slot_set != nullptr ||
typed_slot_set != nullptr ||
chunk->invalidated_slots<type>() != nullptr) {
callback(chunk);
}
......@@ -176,18 +145,15 @@ class RememberedSet : public AllStatic {
template <typename Callback>
static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>();
return RememberedSetOperations::Iterate(slots, chunk, callback, mode);
SlotSet* slot_set = chunk->slot_set<type>();
return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
}
static void FreeEmptyBuckets(MemoryChunk* chunk) {
DCHECK(type == OLD_TO_NEW);
SlotSet* slots = chunk->slot_set<type>();
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
slots[page].FreeEmptyBuckets();
}
SlotSet* slot_set = chunk->slot_set<type>();
if (slot_set != nullptr) {
slot_set->FreeEmptyBuckets(chunk->buckets());
}
}
......@@ -202,20 +168,20 @@ class RememberedSet : public AllStatic {
slot_set->Insert(slot_type, offset);
}
static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> slots) {
static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> other) {
TypedSlotSet* slot_set = page->typed_slot_set<type>();
if (slot_set == nullptr) {
slot_set = page->AllocateTypedSlotSet<type>();
}
slot_set->Merge(slots.get());
slot_set->Merge(other.get());
}
// Given a page and a range of typed slots in that page, this function removes
// the slots from the remembered set.
static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
TypedSlotSet* slots = page->typed_slot_set<type>();
if (slots != nullptr) {
slots->Iterate(
TypedSlotSet* slot_set = page->typed_slot_set<type>();
if (slot_set != nullptr) {
slot_set->Iterate(
[=](SlotType slot_type, Address slot_addr) {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
: KEEP_SLOT;
......@@ -242,9 +208,10 @@ class RememberedSet : public AllStatic {
// and return SlotCallbackResult.
template <typename Callback>
static void IterateTyped(MemoryChunk* chunk, Callback callback) {
TypedSlotSet* slots = chunk->typed_slot_set<type>();
if (slots != nullptr) {
int new_count = slots->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
TypedSlotSet* slot_set = chunk->typed_slot_set<type>();
if (slot_set != nullptr) {
int new_count =
slot_set->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
if (new_count == 0) {
chunk->ReleaseTypedSlotSet<type>();
}
......@@ -382,8 +349,8 @@ class RememberedSetSweeping {
template <typename Callback>
static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->sweeping_slot_set();
return RememberedSetOperations::Iterate(slots, chunk, callback, mode);
SlotSet* slot_set = chunk->sweeping_slot_set();
return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
}
};
......
......@@ -24,100 +24,132 @@ enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Data structure for maintaining a set of slots in a standard (non-large)
// page.
// The data structure assumes that the slots are pointer size aligned and
// splits the valid slot offset range into kBuckets buckets.
// splits the valid slot offset range into buckets.
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
class SlotSet {
public:
enum EmptyBucketMode {
FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
};
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
StoreBucket(&buckets_[i], nullptr);
SlotSet() = delete;
static SlotSet* Allocate(size_t buckets) {
size_t buckets_size = buckets * sizeof(Bucket*);
void* allocation =
AlignedAlloc(kInitialBucketsSize + buckets_size, kSystemPointerSize);
SlotSet* slot_set = reinterpret_cast<SlotSet*>(
reinterpret_cast<uint8_t*>(allocation) + kInitialBucketsSize);
#ifdef DEBUG
*slot_set->initial_buckets() = buckets;
#endif
for (size_t i = 0; i < buckets; i++) {
*slot_set->bucket(i) = nullptr;
}
return slot_set;
}
~SlotSet() {
for (int i = 0; i < kBuckets; i++) {
ReleaseBucket(i);
static void Delete(SlotSet* slot_set, size_t buckets) {
if (slot_set == nullptr) return;
for (size_t i = 0; i < buckets; i++) {
slot_set->ReleaseBucket(i);
}
#ifdef DEBUG
for (size_t i = buckets; i < *slot_set->initial_buckets(); i++) {
DCHECK_NULL(*slot_set->bucket(i));
}
#endif
AlignedFree(reinterpret_cast<uint8_t*>(slot_set) - kInitialBucketsSize);
}
static size_t BucketsForSize(size_t size) {
return (size + (kTaggedSize * kBitsPerBucket) - 1) >>
(kTaggedSizeLog2 + kBitsPerBucketLog2);
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
// AccessMode defines whether there can be concurrent access on the buckets
// or not.
template <AccessMode access_mode>
void Insert(int slot_offset) {
int bucket_index, cell_index, bit_index;
void Insert(size_t slot_offset) {
size_t bucket_index;
int cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
Bucket* bucket = LoadBucket<access_mode>(bucket_index);
if (bucket == nullptr) {
bucket = AllocateBucket();
if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
DeleteArray<uint32_t>(bucket);
bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
bucket = new Bucket;
if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) {
delete bucket;
bucket = LoadBucket<access_mode>(bucket_index);
}
}
// Check that monotonicity is preserved, i.e., once a bucket is set we do
// not free it concurrently.
DCHECK_NOT_NULL(bucket);
DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
DCHECK(bucket != nullptr);
DCHECK_EQ(bucket->cells(), LoadBucket<access_mode>(bucket_index)->cells());
uint32_t mask = 1u << bit_index;
if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
SetCellBits<access_mode>(&bucket[cell_index], mask);
if ((bucket->LoadCell<access_mode>(cell_index) & mask) == 0) {
bucket->SetCellBits<access_mode>(cell_index, mask);
}
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
// Returns true if the set contains the slot.
bool Contains(int slot_offset) {
int bucket_index, cell_index, bit_index;
bool Contains(size_t slot_offset) {
size_t bucket_index;
int cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
Bucket* bucket = LoadBucket(bucket_index);
if (bucket == nullptr) return false;
return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
void Remove(int slot_offset) {
int bucket_index, cell_index, bit_index;
void Remove(size_t slot_offset) {
size_t bucket_index;
int cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) {
uint32_t cell = LoadCell(&bucket[cell_index]);
uint32_t cell = bucket->LoadCell(cell_index);
uint32_t bit_mask = 1u << bit_index;
if (cell & bit_mask) {
ClearCellBits(&bucket[cell_index], bit_mask);
bucket->ClearCellBits(cell_index, bit_mask);
}
}
}
// The slot offsets specify a range of slots at addresses:
// [page_start_ + start_offset ... page_start_ + end_offset).
void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
CHECK_LE(end_offset, 1 << kPageSizeBits);
void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets,
EmptyBucketMode mode) {
CHECK_LE(end_offset, buckets * kBitsPerBucket * kTaggedSize);
DCHECK_LE(start_offset, end_offset);
int start_bucket, start_cell, start_bit;
size_t start_bucket;
int start_cell, start_bit;
SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
int end_bucket, end_cell, end_bit;
size_t end_bucket;
int end_cell, end_bit;
SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
uint32_t start_mask = (1u << start_bit) - 1;
uint32_t end_mask = ~((1u << end_bit) - 1);
Bucket bucket;
Bucket* bucket;
if (start_bucket == end_bucket && start_cell == end_cell) {
bucket = LoadBucket(&buckets_[start_bucket]);
bucket = LoadBucket(start_bucket);
if (bucket != nullptr) {
ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
bucket->ClearCellBits(start_cell, ~(start_mask | end_mask));
}
return;
}
int current_bucket = start_bucket;
size_t current_bucket = start_bucket;
int current_cell = start_cell;
bucket = LoadBucket(&buckets_[current_bucket]);
bucket = LoadBucket(current_bucket);
if (bucket != nullptr) {
ClearCellBits(&bucket[current_cell], ~start_mask);
bucket->ClearCellBits(current_cell, ~start_mask);
}
current_cell++;
if (current_bucket < end_bucket) {
......@@ -136,7 +168,7 @@ class SlotSet : public Malloced {
ReleaseBucket(current_bucket);
} else {
DCHECK(mode == KEEP_EMPTY_BUCKETS);
bucket = LoadBucket(&buckets_[current_bucket]);
bucket = LoadBucket(current_bucket);
if (bucket != nullptr) {
ClearBucket(bucket, 0, kCellsPerBucket);
}
......@@ -145,26 +177,27 @@ class SlotSet : public Malloced {
}
// All buckets between start_bucket and end_bucket are cleared.
DCHECK(current_bucket == end_bucket);
if (current_bucket == kBuckets) return;
bucket = LoadBucket(&buckets_[current_bucket]);
if (current_bucket == buckets) return;
bucket = LoadBucket(current_bucket);
DCHECK(current_cell <= end_cell);
if (bucket == nullptr) return;
while (current_cell < end_cell) {
StoreCell(&bucket[current_cell], 0);
bucket->StoreCell(current_cell, 0);
current_cell++;
}
// All cells between start_cell and end_cell are cleared.
DCHECK(current_bucket == end_bucket && current_cell == end_cell);
ClearCellBits(&bucket[end_cell], ~end_mask);
bucket->ClearCellBits(end_cell, ~end_mask);
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
bool Lookup(int slot_offset) {
int bucket_index, cell_index, bit_index;
bool Lookup(size_t slot_offset) {
size_t bucket_index;
int cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
Bucket* bucket = LoadBucket(bucket_index);
if (bucket == nullptr) return false;
return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
}
// Iterate over all slots in the set and for each slot invoke the callback.
......@@ -181,22 +214,23 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT;
// });
template <typename Callback>
int Iterate(Address page_start, Callback callback, EmptyBucketMode mode) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
size_t Iterate(Address page_start, size_t buckets, Callback callback,
EmptyBucketMode mode) {
size_t new_count = 0;
for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) {
int in_bucket_count = 0;
int cell_offset = bucket_index * kBitsPerBucket;
size_t in_bucket_count = 0;
size_t cell_offset = bucket_index << kBitsPerBucketLog2;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
uint32_t cell = LoadCell(&bucket[i]);
uint32_t cell = bucket->LoadCell(i);
if (cell) {
uint32_t old_cell = cell;
uint32_t mask = 0;
while (cell) {
int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset;
uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
Address slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
if (callback(MaybeObjectSlot(page_start + slot)) == KEEP_SLOT) {
++in_bucket_count;
} else {
......@@ -206,7 +240,7 @@ class SlotSet : public Malloced {
}
uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) {
ClearCellBits(&bucket[i], mask);
bucket->ClearCellBits(i, mask);
}
}
}
......@@ -216,18 +250,17 @@ class SlotSet : public Malloced {
return new_count;
}
void FreeEmptyBuckets() {
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
void FreeEmptyBuckets(size_t buckets) {
for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) {
if (IsEmptyBucket(bucket)) {
if (bucket->IsEmpty()) {
ReleaseBucket(bucket_index);
}
}
}
}
static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5;
static const int kCellSizeBytesLog2 = 2;
......@@ -236,114 +269,139 @@ class SlotSet : public Malloced {
static const int kBitsPerCellLog2 = 5;
static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
static const int kBucketsRegularPage =
(1 << kPageSizeBits) / kTaggedSize / kCellsPerBucket / kBitsPerCell;
class Bucket : public Malloced {
uint32_t cells_[kCellsPerBucket];
static const int kSize = kBuckets * kSystemPointerSize;
public:
Bucket() {
for (int i = 0; i < kCellsPerBucket; i++) {
cells_[i] = 0;
}
}
using Bucket = uint32_t*;
uint32_t* cells() { return cells_; }
uint32_t* cell(int cell_index) { return cells() + cell_index; }
private:
Bucket AllocateBucket() {
Bucket result = NewArray<uint32_t>(kCellsPerBucket);
for (int i = 0; i < kCellsPerBucket; i++) {
result[i] = 0;
template <AccessMode access_mode = AccessMode::ATOMIC>
uint32_t LoadCell(int cell_index) {
DCHECK_LT(cell_index, kCellsPerBucket);
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomic32::Acquire_Load(cells() + cell_index);
return *(cells() + cell_index);
}
return result;
}
void ClearBucket(Bucket bucket, int start_cell, int end_cell) {
template <AccessMode access_mode = AccessMode::ATOMIC>
void SetCellBits(int cell_index, uint32_t mask) {
if (access_mode == AccessMode::ATOMIC) {
base::AsAtomic32::SetBits(cell(cell_index), mask, mask);
} else {
uint32_t* c = cell(cell_index);
*c = (*c & ~mask) | mask;
}
}
void ClearCellBits(int cell_index, uint32_t mask) {
base::AsAtomic32::SetBits(cell(cell_index), 0u, mask);
}
void StoreCell(int cell_index, uint32_t value) {
base::AsAtomic32::Release_Store(cell(cell_index), value);
}
bool IsEmpty() {
for (int i = 0; i < kCellsPerBucket; i++) {
if (cells_[i] != 0) {
return false;
}
}
return true;
}
};
private:
void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
DCHECK_GE(start_cell, 0);
DCHECK_LE(end_cell, kCellsPerBucket);
int current_cell = start_cell;
while (current_cell < kCellsPerBucket) {
StoreCell(&bucket[current_cell], 0);
bucket->StoreCell(current_cell, 0);
current_cell++;
}
}
void ReleaseBucket(int bucket_index) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
StoreBucket(&buckets_[bucket_index], nullptr);
DeleteArray<uint32_t>(bucket);
void ReleaseBucket(size_t bucket_index) {
Bucket* bucket = LoadBucket(bucket_index);
StoreBucket(bucket_index, nullptr);
delete bucket;
}
template <AccessMode access_mode = AccessMode::ATOMIC>
Bucket LoadBucket(Bucket* bucket) {
Bucket* LoadBucket(Bucket** bucket) {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(bucket);
return *bucket;
}
template <AccessMode access_mode = AccessMode::ATOMIC>
void StoreBucket(Bucket* bucket, Bucket value) {
if (access_mode == AccessMode::ATOMIC) {
base::AsAtomicPointer::Release_Store(bucket, value);
} else {
*bucket = value;
}
}
bool IsEmptyBucket(Bucket bucket) {
for (int i = 0; i < kCellsPerBucket; i++) {
if (LoadCell(&bucket[i])) {
return false;
}
}
return true;
Bucket* LoadBucket(size_t bucket_index) {
return LoadBucket(bucket(bucket_index));
}
template <AccessMode access_mode = AccessMode::ATOMIC>
bool SwapInNewBucket(Bucket* bucket, Bucket value) {
void StoreBucket(Bucket** bucket, Bucket* value) {
if (access_mode == AccessMode::ATOMIC) {
return base::AsAtomicPointer::Release_CompareAndSwap(bucket, nullptr,
value) == nullptr;
base::AsAtomicPointer::Release_Store(bucket, value);
} else {
DCHECK_NULL(*bucket);
*bucket = value;
return true;
}
}
template <AccessMode access_mode = AccessMode::ATOMIC>
uint32_t LoadCell(uint32_t* cell) {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomic32::Acquire_Load(cell);
return *cell;
}
void StoreCell(uint32_t* cell, uint32_t value) {
base::AsAtomic32::Release_Store(cell, value);
}
void ClearCellBits(uint32_t* cell, uint32_t mask) {
base::AsAtomic32::SetBits(cell, 0u, mask);
void StoreBucket(size_t bucket_index, Bucket* value) {
StoreBucket(bucket(bucket_index), value);
}
template <AccessMode access_mode = AccessMode::ATOMIC>
void SetCellBits(uint32_t* cell, uint32_t mask) {
bool SwapInNewBucket(size_t bucket_index, Bucket* value) {
Bucket** b = bucket(bucket_index);
if (access_mode == AccessMode::ATOMIC) {
base::AsAtomic32::SetBits(cell, mask, mask);
return base::AsAtomicPointer::Release_CompareAndSwap(b, nullptr, value) ==
nullptr;
} else {
*cell = (*cell & ~mask) | mask;
DCHECK_NULL(*b);
*b = value;
return true;
}
}
// Converts the slot offset into bucket/cell/bit index.
void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
int* bit_index) {
static void SlotToIndices(size_t slot_offset, size_t* bucket_index,
int* cell_index, int* bit_index) {
DCHECK(IsAligned(slot_offset, kTaggedSize));
int slot = slot_offset >> kTaggedSizeLog2;
DCHECK(slot >= 0 && slot <= kMaxSlots);
size_t slot = slot_offset >> kTaggedSizeLog2;
*bucket_index = slot >> kBitsPerBucketLog2;
*cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
*bit_index = slot & (kBitsPerCell - 1);
*cell_index =
static_cast<int>((slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1));
*bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
}
Bucket buckets_[kBuckets];
Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
#ifdef DEBUG
size_t* initial_buckets() { return reinterpret_cast<size_t*>(this) - 1; }
static const int kInitialBucketsSize = sizeof(size_t);
#else
static const int kInitialBucketsSize = 0;
#endif
};
STATIC_ASSERT(std::is_standard_layout<SlotSet>::value);
STATIC_ASSERT(sizeof(SlotSet) == SlotSet::kSize);
STATIC_ASSERT(std::is_standard_layout<SlotSet::Bucket>::value);
enum SlotType {
FULL_EMBEDDED_OBJECT_SLOT,
......
......@@ -1090,6 +1090,12 @@ size_t Page::ShrinkToHighWaterMark() {
// Ensure that no objects will be allocated on this page.
DCHECK_EQ(0u, AvailableInFreeList());
// Ensure that slot sets are empty. Otherwise the buckets for the shrinked
// area would not be freed when deallocating this page.
DCHECK_NULL(slot_set<OLD_TO_NEW>());
DCHECK_NULL(slot_set<OLD_TO_OLD>());
DCHECK_NULL(sweeping_slot_set());
size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
......@@ -1416,12 +1422,6 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
}
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK_LT(0, pages);
return new SlotSet[pages];
}
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
......@@ -1435,11 +1435,11 @@ SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
}
SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
SlotSet* new_slot_set = AllocateAndInitializeSlotSet(size(), address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
SlotSet* new_slot_set = SlotSet::Allocate(buckets());
SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
slot_set, nullptr, new_slot_set);
if (old_slot_set != nullptr) {
delete[] new_slot_set;
SlotSet::Delete(new_slot_set, buckets());
new_slot_set = old_slot_set;
}
DCHECK(new_slot_set);
......@@ -1460,7 +1460,7 @@ void MemoryChunk::ReleaseSweepingSlotSet() {
void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
if (*slot_set) {
delete[] * slot_set;
SlotSet::Delete(*slot_set, buckets());
*slot_set = nullptr;
}
}
......
......@@ -76,7 +76,7 @@ class FreeStoreAllocationPolicy {
void* AllocWithRetry(size_t size);
V8_EXPORT_PRIVATE void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void* ptr);
V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
......
......@@ -14,45 +14,56 @@
namespace v8 {
namespace internal {
TEST(SlotSet, BucketsForSize) {
EXPECT_EQ(static_cast<size_t>(SlotSet::kBucketsRegularPage),
SlotSet::BucketsForSize(Page::kPageSize));
EXPECT_EQ(static_cast<size_t>(SlotSet::kBucketsRegularPage) * 2,
SlotSet::BucketsForSize(Page::kPageSize * 2));
}
TEST(SlotSet, InsertAndLookup1) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_TRUE(set.Lookup(i));
EXPECT_TRUE(set->Lookup(i));
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(SlotSet, InsertAndLookup2) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
EXPECT_TRUE(set.Lookup(i));
EXPECT_TRUE(set->Lookup(i));
} else {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(SlotSet, Iterate) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
}
set.Iterate(
kNullAddress,
set->Iterate(
kNullAddress, SlotSet::kBucketsRegularPage,
[](MaybeObjectSlot slot) {
if (slot.address() % 3 == 0) {
return KEEP_SLOT;
......@@ -64,56 +75,62 @@ TEST(SlotSet, Iterate) {
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i));
EXPECT_TRUE(set->Lookup(i));
} else {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(SlotSet, Remove) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 3 != 0) {
set.Remove(i);
set->Remove(i);
}
}
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 21 == 0) {
EXPECT_TRUE(set.Lookup(i));
EXPECT_TRUE(set->Lookup(i));
} else {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
uint32_t first = start == 0 ? 0 : start - kTaggedSize;
uint32_t last = end == Page::kPageSize ? end - kTaggedSize : end;
for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
for (uint32_t i = first; i <= last; i += kTaggedSize) {
set.Insert<AccessMode::ATOMIC>(i);
set->Insert<AccessMode::ATOMIC>(i);
}
set.RemoveRange(start, end, mode);
set->RemoveRange(start, end, SlotSet::kBucketsRegularPage, mode);
if (first != start) {
EXPECT_TRUE(set.Lookup(first));
EXPECT_TRUE(set->Lookup(first));
}
if (last == end) {
EXPECT_TRUE(set.Lookup(last));
EXPECT_TRUE(set->Lookup(last));
}
for (uint32_t i = start; i < end; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(SlotSet, RemoveRange) {
......@@ -132,15 +149,16 @@ TEST(SlotSet, RemoveRange) {
}
}
}
SlotSet set;
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
set.Insert<AccessMode::ATOMIC>(Page::kPageSize / 2);
set.RemoveRange(0, Page::kPageSize, mode);
set->Insert<AccessMode::ATOMIC>(Page::kPageSize / 2);
set->RemoveRange(0, Page::kPageSize, SlotSet::kBucketsRegularPage, mode);
for (uint32_t i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i));
EXPECT_FALSE(set->Lookup(i));
}
}
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
TEST(TypedSlotSet, Iterate) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment