Commit add50f20 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Avoid iterating all pages when freeing empty buckets

At the end of scavenge, the GC iterates all pages to search for empty
buckets and free them. With this CL the scavenger marks buckets
(and their corresponding pages) that were empty to reduce work.
After finishing scavenging the GC only needs to revisit those marked
buckets.

Unlike (minor) mark-compact, the scavenger can't directly free those
buckets, since the evacuation and pointer updating-phases are
not separated.

Right now the pages are processed sequentially but this could be
parallelized in a subsequent CL.

Change-Id: I47ed8c0e952b06c5d960e39a6f38e745d5618656
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1889884
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64781}
parent 42e8c231
......@@ -4437,11 +4437,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
}
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap(), [](MemoryChunk* chunk) {
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
});
heap()->account_external_memory_concurrently_freed();
}
......
......@@ -12,6 +12,7 @@
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
......@@ -40,6 +41,20 @@ class RememberedSetOperations {
return slots;
}
template <typename Callback>
static int IterateAndTrackEmptyBuckets(
SlotSet* slot_set, MemoryChunk* chunk, Callback callback,
Worklist<MemoryChunk*, 64>::View empty_chunks) {
int slots = 0;
if (slot_set != nullptr) {
bool found_empty_bucket = false;
slots += slot_set->IterateAndTrackEmptyBuckets(
chunk->address(), chunk->buckets(), callback, &found_empty_bucket);
if (found_empty_bucket) empty_chunks.Push(chunk);
}
return slots;
}
static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
if (slot_set != nullptr) {
uintptr_t offset = slot_addr - chunk->address();
......@@ -149,12 +164,36 @@ class RememberedSet : public AllStatic {
return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
}
template <typename Callback>
static int IterateAndTrackEmptyBuckets(
MemoryChunk* chunk, Callback callback,
Worklist<MemoryChunk*, 64>::View empty_chunks) {
SlotSet* slots = chunk->slot_set<type>();
bool empty_bucket_found = false;
int slot_count = RememberedSetOperations::IterateAndTrackEmptyBuckets(
slots, chunk, callback, empty_chunks);
if (empty_bucket_found) empty_chunks.Push(chunk);
return slot_count;
}
static void FreeEmptyBuckets(MemoryChunk* chunk) {
DCHECK(type == OLD_TO_NEW);
SlotSet* slot_set = chunk->slot_set<type>();
if (slot_set != nullptr) {
slot_set->FreeEmptyBuckets(chunk->buckets());
if (slot_set != nullptr && slot_set->FreeEmptyBuckets(chunk->buckets())) {
chunk->ReleaseSlotSet<type>();
}
}
static bool CheckPossiblyEmptyBuckets(MemoryChunk* chunk) {
DCHECK(type == OLD_TO_NEW);
SlotSet* slot_set = chunk->slot_set<type, AccessMode::NON_ATOMIC>();
if (slot_set != nullptr &&
slot_set->CheckPossiblyEmptyBuckets(chunk->buckets())) {
chunk->ReleaseSlotSet<type>();
return true;
}
return false;
}
// Given a page and a typed slot in that page, this function adds the slot
......
......@@ -244,12 +244,14 @@ void ScavengerCollector::CollectGarbage() {
const bool is_logging = isolate_->LogObjectRelocation();
const int num_scavenge_tasks = NumberOfScavengeTasks();
OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
Worklist<MemoryChunk*, 64> empty_chunks;
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i] = new Scavenger(this, heap_, is_logging, &copied_list,
&promotion_list, &ephemeron_table_list, i);
scavengers[i] =
new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
&promotion_list, &ephemeron_table_list, i);
job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
}
......@@ -362,11 +364,20 @@ void ScavengerCollector::CollectGarbage() {
{
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
MemoryChunk* chunk;
while (empty_chunks.Pop(kMainThreadId, &chunk)) {
RememberedSet<OLD_TO_NEW>::CheckPossiblyEmptyBuckets(chunk);
}
#ifdef DEBUG
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap_, [](MemoryChunk* chunk) {
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
SlotSet* slot_set = chunk->slot_set<OLD_TO_NEW>();
DCHECK_IMPLIES(slot_set != nullptr,
slot_set->IsPossiblyEmptyCleared());
});
#endif
}
// Update how much has survived scavenge.
......@@ -412,10 +423,12 @@ int ScavengerCollector::NumberOfScavengeTasks() {
}
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
Worklist<MemoryChunk*, 64>* empty_chunks,
CopiedList* copied_list, PromotionList* promotion_list,
EphemeronTableList* ephemeron_table_list, int task_id)
: collector_(collector),
heap_(heap),
empty_chunks_(empty_chunks, task_id),
promotion_list_(promotion_list, task_id),
copied_list_(copied_list, task_id),
ephemeron_table_list_(ephemeron_table_list, task_id),
......@@ -459,22 +472,28 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
void Scavenger::ScavengePage(MemoryChunk* page) {
CodePageMemoryModificationScope memory_modification_scope(page);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndScavengeObject(heap_, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSetSweeping::Iterate(
page,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndScavengeObject(heap_, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
if (page->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSet<OLD_TO_NEW>::IterateAndTrackEmptyBuckets(
page,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndScavengeObject(heap_, slot);
},
empty_chunks_);
}
if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSetSweeping::Iterate(
page,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndScavengeObject(heap_, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
}
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
// The invalidated slots are not needed after old-to-new slots were
......@@ -596,6 +615,7 @@ void Scavenger::Finalize() {
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
empty_chunks_.FlushToGlobal();
ephemeron_table_list_.FlushToGlobal();
for (auto it = ephemeron_remembered_set_.begin();
it != ephemeron_remembered_set_.end(); ++it) {
......
......@@ -117,7 +117,8 @@ class Scavenger {
using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
CopiedList* copied_list, PromotionList* promotion_list,
Worklist<MemoryChunk*, 64>* empty_chunks, CopiedList* copied_list,
PromotionList* promotion_list,
EphemeronTableList* ephemeron_table_list, int task_id);
// Entry point for scavenging an old generation page. For scavenging single
......@@ -206,6 +207,7 @@ class Scavenger {
ScavengerCollector* const collector_;
Heap* const heap_;
Worklist<MemoryChunk*, 64>::View empty_chunks_;
PromotionList::View promotion_list_;
CopiedList::View copied_list_;
EphemeronTableList::View ephemeron_table_list_;
......
......@@ -11,6 +11,7 @@
#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
#include "src/heap/worklist.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/slots.h"
#include "src/utils/allocation.h"
......@@ -33,19 +34,42 @@ class SlotSet {
KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
};
enum class PossiblyEmpty : uint8_t {
kYes, // Bucket is non-null but might be empty.
kNoOrNull, // Bucket is null or cannot be empty.
};
SlotSet() = delete;
static SlotSet* Allocate(size_t buckets) {
// SlotSet* slot_set ----------------------+
// |
// v
// +----------------------+-----------------+-------------------------+
// | possibly empty array | initial buckets | buckets array |
// +----------------------+-----------------+-------------------------+
// 1 byte * buckets pointer-sized pointer-sized * buckets
//
//
// The SlotSet pointer points to the beginning of the buckets array for
// faster access in the write barrier. The number of buckets is needed for
// calculating the size of this data structure.
// Since pages can shrink we also store the initial_buckets size.
//
size_t possibly_empty_array_size = PossiblyEmptyArraySize(buckets);
size_t buckets_size = buckets * sizeof(Bucket*);
void* allocation =
AlignedAlloc(kInitialBucketsSize + buckets_size, kSystemPointerSize);
size_t size =
possibly_empty_array_size + kInitialBucketsSize + buckets_size;
void* allocation = AlignedAlloc(size, kSystemPointerSize);
SlotSet* slot_set = reinterpret_cast<SlotSet*>(
reinterpret_cast<uint8_t*>(allocation) + kInitialBucketsSize);
#ifdef DEBUG
reinterpret_cast<uint8_t*>(allocation) + possibly_empty_array_size +
kInitialBucketsSize);
DCHECK(
IsAligned(reinterpret_cast<uintptr_t>(slot_set), kSystemPointerSize));
*slot_set->initial_buckets() = buckets;
#endif
for (size_t i = 0; i < buckets; i++) {
*slot_set->bucket(i) = nullptr;
*slot_set->possibly_empty(i) = PossiblyEmpty::kNoOrNull;
}
return slot_set;
}
......@@ -57,13 +81,17 @@ class SlotSet {
slot_set->ReleaseBucket(i);
}
size_t initial_buckets = *slot_set->initial_buckets();
#ifdef DEBUG
for (size_t i = buckets; i < *slot_set->initial_buckets(); i++) {
for (size_t i = buckets; i < initial_buckets; i++) {
DCHECK_NULL(*slot_set->bucket(i));
}
#endif
AlignedFree(reinterpret_cast<uint8_t*>(slot_set) - kInitialBucketsSize);
size_t possibly_empty_array_size = PossiblyEmptyArraySize(initial_buckets);
AlignedFree(reinterpret_cast<uint8_t*>(slot_set) - kInitialBucketsSize -
possibly_empty_array_size);
}
static size_t BucketsForSize(size_t size) {
......@@ -213,52 +241,81 @@ class SlotSet {
// if (good(slot)) return KEEP_SLOT;
// else return REMOVE_SLOT;
// });
//
// Releases memory for empty buckets with FREE_EMPTY_BUCKETS.
template <typename Callback>
size_t Iterate(Address page_start, size_t buckets, Callback callback,
size_t Iterate(Address chunk_start, size_t buckets, Callback callback,
EmptyBucketMode mode) {
size_t new_count = 0;
return Iterate(chunk_start, buckets, callback,
[this, mode](size_t bucket_index) {
if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
ReleaseBucket(bucket_index);
}
});
}
// Similar to Iterate but marks potentially empty buckets internally. Stores
// true in empty_bucket_found in case a potentially empty bucket was found.
// Assumes that the possibly empty-array was already cleared by
// CheckPossiblyEmptyBuckets.
template <typename Callback>
size_t IterateAndTrackEmptyBuckets(Address chunk_start, size_t buckets,
Callback callback,
bool* empty_bucket_found) {
return Iterate(chunk_start, buckets, callback,
[this, empty_bucket_found](size_t bucket_index) {
*possibly_empty(bucket_index) = PossiblyEmpty::kYes;
*empty_bucket_found = true;
});
}
bool FreeEmptyBuckets(size_t buckets) {
bool empty = true;
for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) {
size_t in_bucket_count = 0;
size_t cell_offset = bucket_index << kBitsPerBucketLog2;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
uint32_t cell = bucket->LoadCell(i);
if (cell) {
uint32_t old_cell = cell;
uint32_t mask = 0;
while (cell) {
int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset;
Address slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
if (callback(MaybeObjectSlot(page_start + slot)) == KEEP_SLOT) {
++in_bucket_count;
} else {
mask |= bit_mask;
}
cell ^= bit_mask;
}
uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) {
bucket->ClearCellBits(i, mask);
}
if (!FreeBucketIfEmpty(bucket_index)) {
empty = false;
}
}
return empty;
}
// Check whether possibly empty buckets are really empty. Empty buckets are
// freed and the possibly empty state is cleared for all buckets.
bool CheckPossiblyEmptyBuckets(size_t buckets) {
bool empty = true;
for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
if (bucket) {
if (*possibly_empty(bucket_index) == PossiblyEmpty::kYes) {
if (bucket->IsEmpty()) {
ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
} else {
empty = false;
}
*possibly_empty(bucket_index) = PossiblyEmpty::kNoOrNull;
} else {
empty = false;
}
new_count += in_bucket_count;
} else {
DCHECK_EQ(*possibly_empty(bucket_index), PossiblyEmpty::kNoOrNull);
}
}
return new_count;
return empty;
}
void FreeEmptyBuckets(size_t buckets) {
// Check wether all possibly empty entries are cleared. Only used
// for testing in debug-builds.
bool IsPossiblyEmptyCleared() {
size_t buckets = *initial_buckets();
for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) {
if (bucket->IsEmpty()) {
ReleaseBucket(bucket_index);
}
if (*possibly_empty(bucket_index) != PossiblyEmpty::kNoOrNull) {
return false;
}
}
return true;
}
static const int kCellsPerBucket = 32;
......@@ -322,6 +379,59 @@ class SlotSet {
};
private:
template <typename Callback, typename EmptyBucketCallback>
size_t Iterate(Address chunk_start, size_t buckets, Callback callback,
EmptyBucketCallback empty_bucket_callback) {
size_t new_count = 0;
for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) {
size_t in_bucket_count = 0;
size_t cell_offset = bucket_index << kBitsPerBucketLog2;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
uint32_t cell = bucket->LoadCell(i);
if (cell) {
uint32_t old_cell = cell;
uint32_t mask = 0;
while (cell) {
int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset;
Address slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
if (callback(MaybeObjectSlot(chunk_start + slot)) == KEEP_SLOT) {
++in_bucket_count;
} else {
mask |= bit_mask;
}
cell ^= bit_mask;
}
uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) {
bucket->ClearCellBits(i, mask);
}
}
}
if (in_bucket_count == 0) {
empty_bucket_callback(bucket_index);
}
new_count += in_bucket_count;
}
}
return new_count;
}
bool FreeBucketIfEmpty(size_t bucket_index) {
Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
if (bucket != nullptr) {
if (bucket->IsEmpty()) {
ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
} else {
return false;
}
}
return true;
}
void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
DCHECK_GE(start_cell, 0);
DCHECK_LE(end_cell, kCellsPerBucket);
......@@ -332,9 +442,10 @@ class SlotSet {
}
}
template <AccessMode access_mode = AccessMode::ATOMIC>
void ReleaseBucket(size_t bucket_index) {
Bucket* bucket = LoadBucket(bucket_index);
StoreBucket(bucket_index, nullptr);
Bucket* bucket = LoadBucket<access_mode>(bucket_index);
StoreBucket<access_mode>(bucket_index, nullptr);
delete bucket;
}
......@@ -388,16 +499,20 @@ class SlotSet {
*bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
}
static size_t PossiblyEmptyArraySize(size_t buckets) {
return (sizeof(PossiblyEmpty) * buckets + (kSystemPointerSize - 1)) /
kSystemPointerSize * kSystemPointerSize;
}
Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
PossiblyEmpty* possibly_empty(size_t bucket_index) {
return reinterpret_cast<PossiblyEmpty*>(buckets()) - kInitialBucketsSize -
1 - bucket_index;
}
#ifdef DEBUG
size_t* initial_buckets() { return reinterpret_cast<size_t*>(this) - 1; }
static const int kInitialBucketsSize = sizeof(size_t);
#else
static const int kInitialBucketsSize = 0;
#endif
};
STATIC_ASSERT(std::is_standard_layout<SlotSet>::value);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment