Commit 1cb133e3 authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Concurrently free empty slot set buckets.

BUG=chromium:648568

Review-Url: https://codereview.chromium.org/2390743005
Cr-Commit-Position: refs/heads/master@{#39982}
parent d5151564
......@@ -3836,6 +3836,9 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
if (page->typed_old_to_new_slots()) {
page->typed_old_to_new_slots()->FreeToBeFreedChunks();
}
if (page->old_to_new_slots()) {
page->old_to_new_slots()->FreeToBeFreedBuckets();
}
{
base::LockGuard<base::Mutex> guard(&mutex_);
......
......@@ -20,10 +20,12 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
for (MemoryChunk* chunk : *heap->old_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate([heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
});
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
}
for (MemoryChunk* chunk : *heap->code_space()) {
......@@ -43,14 +45,17 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
for (MemoryChunk* chunk : *heap->map_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate([heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
// TODO(mlippautz): In map space all allocations would ideally be map
// aligned. After establishing this invariant IsValidSlot could just
// refer to the containing object using alignment and check the mark
// bits.
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
});
slots->Iterate(
[heap, chunk](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
// TODO(mlippautz): In map space all allocations would ideally be
// map
// aligned. After establishing this invariant IsValidSlot could just
// refer to the containing object using alignment and check the mark
// bits.
return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
}
}
......
......@@ -116,10 +116,13 @@ class RememberedSet {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
int new_count = 0;
for (size_t page = 0; page < pages; page++) {
new_count += slots[page].Iterate(callback);
new_count +=
slots[page].Iterate(callback, SlotSet::PREFREE_EMPTY_BUCKETS);
}
if (new_count == 0) {
ReleaseSlotSet(chunk);
// Only old-to-old slot sets are released eagerly. Old-new-slot sets are
// released by the sweeper threads.
if (direction == OLD_TO_OLD && new_count == 0) {
chunk->ReleaseOldToOldSlots();
}
}
}
......@@ -219,14 +222,6 @@ class RememberedSet {
}
}
static void ReleaseSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
chunk->ReleaseOldToOldSlots();
} else {
chunk->ReleaseOldToNewSlots();
}
}
static void ReleaseTypedSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
chunk->ReleaseTypedOldToOldSlots();
......
......@@ -25,6 +25,8 @@ enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
enum IterationMode { PREFREE_EMPTY_BUCKETS, KEEP_EMPTY_BUCKETS };
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
bucket[i].SetValue(nullptr);
......@@ -35,6 +37,7 @@ class SlotSet : public Malloced {
for (int i = 0; i < kBuckets; i++) {
ReleaseBucket(i);
}
FreeToBeFreedBuckets();
}
void SetPageStart(Address page_start) { page_start_ = page_start; }
......@@ -145,7 +148,7 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT;
// });
template <typename Callback>
int Iterate(Callback callback) {
int Iterate(Callback callback, IterationMode mode) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
if (bucket[bucket_index].Value() != nullptr) {
......@@ -182,8 +185,12 @@ class SlotSet : public Malloced {
}
}
}
if (in_bucket_count == 0) {
ReleaseBucket(bucket_index);
if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
base::AtomicValue<uint32_t>* bucket_ptr =
bucket[bucket_index].Value();
to_be_freed_buckets_.push(bucket_ptr);
bucket[bucket_index].SetValue(nullptr);
}
new_count += in_bucket_count;
}
......@@ -191,6 +198,15 @@ class SlotSet : public Malloced {
return new_count;
}
void FreeToBeFreedBuckets() {
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
while (!to_be_freed_buckets_.empty()) {
base::AtomicValue<uint32_t>* top = to_be_freed_buckets_.top();
to_be_freed_buckets_.pop();
DeleteArray<base::AtomicValue<uint32_t>>(top);
}
}
private:
static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
static const int kCellsPerBucket = 32;
......@@ -242,6 +258,8 @@ class SlotSet : public Malloced {
base::AtomicValue<base::AtomicValue<uint32_t>*> bucket[kBuckets];
Address page_start_;
base::Mutex to_be_freed_buckets_mutex_;
std::stack<base::AtomicValue<uint32_t>*> to_be_freed_buckets_;
};
enum SlotType {
......
......@@ -508,7 +508,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
chunk->old_to_new_slots_ = nullptr;
chunk->old_to_new_slots_.SetValue(nullptr);
chunk->old_to_old_slots_ = nullptr;
chunk->typed_old_to_new_slots_.SetValue(nullptr);
chunk->typed_old_to_old_slots_ = nullptr;
......@@ -1075,7 +1075,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete mutex_;
mutex_ = nullptr;
}
if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
if (old_to_new_slots_.Value() != nullptr) ReleaseOldToNewSlots();
if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
if (typed_old_to_new_slots_.Value() != nullptr) ReleaseTypedOldToNewSlots();
if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
......@@ -1093,13 +1093,14 @@ static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
}
void MemoryChunk::AllocateOldToNewSlots() {
DCHECK(nullptr == old_to_new_slots_);
old_to_new_slots_ = AllocateSlotSet(size_, address());
DCHECK(nullptr == old_to_new_slots_.Value());
old_to_new_slots_.SetValue(AllocateSlotSet(size_, address()));
}
void MemoryChunk::ReleaseOldToNewSlots() {
delete[] old_to_new_slots_;
old_to_new_slots_ = nullptr;
SlotSet* old_to_new_slots = old_to_new_slots_.Value();
delete[] old_to_new_slots;
old_to_new_slots_.SetValue(nullptr);
}
void MemoryChunk::AllocateOldToOldSlots() {
......
......@@ -453,7 +453,7 @@ class MemoryChunk {
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
inline SlotSet* old_to_new_slots() { return old_to_new_slots_.Value(); }
inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
inline TypedSlotSet* typed_old_to_new_slots() {
return typed_old_to_new_slots_.Value();
......@@ -653,7 +653,7 @@ class MemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* old_to_new_slots_;
base::AtomicValue<SlotSet*> old_to_new_slots_;
SlotSet* old_to_old_slots_;
base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_;
TypedSlotSet* typed_old_to_old_slots_;
......
......@@ -52,14 +52,16 @@ TEST(SlotSet, Iterate) {
}
}
set.Iterate([](Address slot_address) {
uintptr_t intaddr = reinterpret_cast<uintptr_t>(slot_address);
if (intaddr % 3 == 0) {
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
}
});
set.Iterate(
[](Address slot_address) {
uintptr_t intaddr = reinterpret_cast<uintptr_t>(slot_address);
if (intaddr % 3 == 0) {
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
}
},
SlotSet::KEEP_EMPTY_BUCKETS);
for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
if (i % 21 == 0) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment