Commit 2d847f8d authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Remove page_start_ from SlotSet

Do not store page_start_ in SlotSet anymore, when needed this address
can be calculated cheaply and be passed in from the caller.

Bug: v8:9454
Change-Id: I4cdb010e4126680d8df500e40ae3d0bc884cf501
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1838731Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64103}
parent cfa6c898
...@@ -36,7 +36,8 @@ class RememberedSetOperations { ...@@ -36,7 +36,8 @@ class RememberedSetOperations {
if (slots != nullptr) { if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize; size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) { for (size_t page = 0; page < pages; page++) {
slots[page].Iterate(callback, mode); slots[page].Iterate(chunk->address() + page * Page::kPageSize, callback,
mode);
} }
} }
} }
......
...@@ -22,8 +22,7 @@ namespace internal { ...@@ -22,8 +22,7 @@ namespace internal {
enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT }; enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Data structure for maintaining a set of slots in a standard (non-large) // Data structure for maintaining a set of slots in a standard (non-large)
// page. The base address of the page must be set with SetPageStart before any // page.
// operation.
// The data structure assumes that the slots are pointer size aligned and // The data structure assumes that the slots are pointer size aligned and
// splits the valid slot offset range into kBuckets buckets. // splits the valid slot offset range into kBuckets buckets.
// Each bucket is a bitmap with a bit corresponding to a single slot offset. // Each bucket is a bitmap with a bit corresponding to a single slot offset.
...@@ -50,8 +49,6 @@ class SlotSet : public Malloced { ...@@ -50,8 +49,6 @@ class SlotSet : public Malloced {
FreeToBeFreedBuckets(); FreeToBeFreedBuckets();
} }
void SetPageStart(Address page_start) { page_start_ = page_start; }
// The slot offset specifies a slot at address page_start_ + slot_offset. // The slot offset specifies a slot at address page_start_ + slot_offset.
// AccessMode defines whether there can be concurrent access on the buckets // AccessMode defines whether there can be concurrent access on the buckets
// or not. // or not.
...@@ -190,7 +187,7 @@ class SlotSet : public Malloced { ...@@ -190,7 +187,7 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT; // else return REMOVE_SLOT;
// }); // });
template <typename Callback> template <typename Callback>
int Iterate(Callback callback, EmptyBucketMode mode) { int Iterate(Address page_start, Callback callback, EmptyBucketMode mode) {
int new_count = 0; int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]); Bucket bucket = LoadBucket(&buckets_[bucket_index]);
...@@ -206,7 +203,7 @@ class SlotSet : public Malloced { ...@@ -206,7 +203,7 @@ class SlotSet : public Malloced {
int bit_offset = base::bits::CountTrailingZeros(cell); int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset; uint32_t bit_mask = 1u << bit_offset;
uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2; uint32_t slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) { if (callback(MaybeObjectSlot(page_start + slot)) == KEEP_SLOT) {
++in_bucket_count; ++in_bucket_count;
} else { } else {
mask |= bit_mask; mask |= bit_mask;
...@@ -382,7 +379,6 @@ class SlotSet : public Malloced { ...@@ -382,7 +379,6 @@ class SlotSet : public Malloced {
} }
Bucket buckets_[kBuckets]; Bucket buckets_[kBuckets];
Address page_start_;
base::Mutex to_be_freed_buckets_mutex_; base::Mutex to_be_freed_buckets_mutex_;
std::stack<uint32_t*> to_be_freed_buckets_; std::stack<uint32_t*> to_be_freed_buckets_;
}; };
......
...@@ -1430,11 +1430,7 @@ void MemoryChunk::ReleaseAllAllocatedMemory() { ...@@ -1430,11 +1430,7 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) { static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize; size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK_LT(0, pages); DCHECK_LT(0, pages);
SlotSet* slot_set = new SlotSet[pages]; return new SlotSet[pages];
for (size_t i = 0; i < pages; i++) {
slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
}
return slot_set;
} }
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>(); template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
......
...@@ -16,7 +16,6 @@ namespace internal { ...@@ -16,7 +16,6 @@ namespace internal {
TEST(SlotSet, InsertAndLookup1) { TEST(SlotSet, InsertAndLookup1) {
SlotSet set; SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
EXPECT_FALSE(set.Lookup(i)); EXPECT_FALSE(set.Lookup(i));
} }
...@@ -30,7 +29,6 @@ TEST(SlotSet, InsertAndLookup1) { ...@@ -30,7 +29,6 @@ TEST(SlotSet, InsertAndLookup1) {
TEST(SlotSet, InsertAndLookup2) { TEST(SlotSet, InsertAndLookup2) {
SlotSet set; SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) { if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i); set.Insert<AccessMode::ATOMIC>(i);
...@@ -47,7 +45,6 @@ TEST(SlotSet, InsertAndLookup2) { ...@@ -47,7 +45,6 @@ TEST(SlotSet, InsertAndLookup2) {
TEST(SlotSet, Iterate) { TEST(SlotSet, Iterate) {
SlotSet set; SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) { if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i); set.Insert<AccessMode::ATOMIC>(i);
...@@ -55,6 +52,7 @@ TEST(SlotSet, Iterate) { ...@@ -55,6 +52,7 @@ TEST(SlotSet, Iterate) {
} }
set.Iterate( set.Iterate(
kNullAddress,
[](MaybeObjectSlot slot) { [](MaybeObjectSlot slot) {
if (slot.address() % 3 == 0) { if (slot.address() % 3 == 0) {
return KEEP_SLOT; return KEEP_SLOT;
...@@ -75,7 +73,6 @@ TEST(SlotSet, Iterate) { ...@@ -75,7 +73,6 @@ TEST(SlotSet, Iterate) {
TEST(SlotSet, Remove) { TEST(SlotSet, Remove) {
SlotSet set; SlotSet set;
set.SetPageStart(0);
for (int i = 0; i < Page::kPageSize; i += kTaggedSize) { for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
if (i % 7 == 0) { if (i % 7 == 0) {
set.Insert<AccessMode::ATOMIC>(i); set.Insert<AccessMode::ATOMIC>(i);
...@@ -99,7 +96,6 @@ TEST(SlotSet, Remove) { ...@@ -99,7 +96,6 @@ TEST(SlotSet, Remove) {
void CheckRemoveRangeOn(uint32_t start, uint32_t end) { void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
SlotSet set; SlotSet set;
set.SetPageStart(0);
uint32_t first = start == 0 ? 0 : start - kTaggedSize; uint32_t first = start == 0 ? 0 : start - kTaggedSize;
uint32_t last = end == Page::kPageSize ? end - kTaggedSize : end; uint32_t last = end == Page::kPageSize ? end - kTaggedSize : end;
for (const auto mode : for (const auto mode :
...@@ -137,7 +133,6 @@ TEST(SlotSet, RemoveRange) { ...@@ -137,7 +133,6 @@ TEST(SlotSet, RemoveRange) {
} }
} }
SlotSet set; SlotSet set;
set.SetPageStart(0);
for (const auto mode : for (const auto mode :
{SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) { {SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
set.Insert<AccessMode::ATOMIC>(Page::kPageSize / 2); set.Insert<AccessMode::ATOMIC>(Page::kPageSize / 2);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment