Commit 841c40b7 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Clean up TypedSlotSet.

This extracts the parts of the TypedSlotSet that are used only
sequentially into a separate class called TypedSlots.

The new class will be used in the concurrent marker to keep track of
typed slots locally and then to merge them to the main remembered set
during finalization of marking.

The patch also cleans up atomics in the Iterate and ClearInvalidSlots
methods that can run concurrently to each other.

Bug:v8:8459

Change-Id: Id7a63041f7b99218381e5e9e1999210cab9c4369
Reviewed-on: https://chromium-review.googlesource.com/c/1340247
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57673}
parent 359fdff0
......@@ -2077,6 +2077,7 @@ v8_source_set("v8_base") {
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
"src/heap/slot-set.cc",
"src/heap/slot-set.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
......
......@@ -261,12 +261,6 @@ class RememberedSet : public AllStatic {
}
}
// Eliminates all stale slots from the remembered set, i.e.
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
// must be called before sweeping when mark bits are still intact.
static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
private:
static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, ObjectSlot slot);
};
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
TypedSlots::~TypedSlots() {
Chunk* chunk = head_;
while (chunk != nullptr) {
Chunk* next = chunk->next;
delete[] chunk->buffer;
delete chunk;
chunk = next;
}
head_ = nullptr;
tail_ = nullptr;
}
void TypedSlots::Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
TypedSlot slot = {TypeField::encode(type) | OffsetField::encode(offset),
host_offset};
Chunk* chunk = EnsureChunk();
DCHECK_LT(chunk->count, chunk->capacity);
chunk->buffer[chunk->count] = slot;
++chunk->count;
}
void TypedSlots::Merge(TypedSlots* other) {
if (other->head_ == nullptr) {
return;
}
if (head_ == nullptr) {
head_ = other->head_;
tail_ = other->tail_;
} else {
tail_->next = other->head_;
tail_ = other->tail_;
}
other->head_ = nullptr;
other->tail_ = nullptr;
}
TypedSlots::Chunk* TypedSlots::EnsureChunk() {
if (!head_) {
head_ = tail_ = NewChunk(nullptr, kInitialBufferSize);
}
if (head_->count == head_->capacity) {
head_ = NewChunk(head_, NextCapacity(head_->capacity));
}
return head_;
}
TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, int capacity) {
Chunk* chunk = new Chunk;
chunk->next = next;
chunk->buffer = new TypedSlot[capacity];
chunk->capacity = capacity;
chunk->count = 0;
return chunk;
}
TypedSlotSet::~TypedSlotSet() { FreeToBeFreedChunks(); }
void TypedSlotSet::FreeToBeFreedChunks() {
base::MutexGuard guard(&to_be_freed_chunks_mutex_);
std::stack<std::unique_ptr<Chunk>> empty;
to_be_freed_chunks_.swap(empty);
}
void TypedSlotSet::ClearInvalidSlots(
const std::map<uint32_t, uint32_t>& invalid_ranges) {
Chunk* chunk = LoadHead();
while (chunk != nullptr) {
TypedSlot* buffer = chunk->buffer;
int count = chunk->count;
for (int i = 0; i < count; i++) {
TypedSlot slot = LoadTypedSlot(buffer + i);
SlotType type = TypeField::decode(slot.type_and_offset);
if (type == CLEARED_SLOT) continue;
uint32_t host_offset = slot.host_offset;
std::map<uint32_t, uint32_t>::const_iterator upper_bound =
invalid_ranges.upper_bound(host_offset);
if (upper_bound == invalid_ranges.begin()) continue;
// upper_bounds points to the invalid range after the given slot. Hence,
// we have to go to the previous element.
upper_bound--;
DCHECK_LE(upper_bound->first, host_offset);
if (upper_bound->second > host_offset) {
ClearTypedSlot(buffer + i);
}
}
chunk = LoadNext(chunk);
}
}
} // namespace internal
} // namespace v8
This diff is collapsed.
......@@ -361,11 +361,11 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
if (!free_ranges.empty()) {
TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
if (old_to_new != nullptr) {
old_to_new->RemoveInvaldSlots(free_ranges);
old_to_new->ClearInvalidSlots(free_ranges);
}
TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
if (old_to_old != nullptr) {
old_to_old->RemoveInvaldSlots(free_ranges);
old_to_old->ClearInvalidSlots(free_ranges);
}
}
......
......@@ -188,7 +188,7 @@ TEST(TypedSlotSet, Iterate) {
EXPECT_EQ(added / 2, iterated);
}
TEST(TypedSlotSet, RemoveInvalidSlots) {
TEST(TypedSlotSet, ClearInvalidSlots) {
TypedSlotSet set(0);
const int kHostDelta = 100;
uint32_t entries = 10;
......@@ -203,7 +203,7 @@ TEST(TypedSlotSet, RemoveInvalidSlots) {
std::pair<uint32_t, uint32_t>(i * kHostDelta, i * kHostDelta + 1));
}
set.RemoveInvaldSlots(invalid_ranges);
set.ClearInvalidSlots(invalid_ranges);
for (std::map<uint32_t, uint32_t>::iterator it = invalid_ranges.begin();
it != invalid_ranges.end(); ++it) {
uint32_t start = it->first;
......@@ -217,5 +217,35 @@ TEST(TypedSlotSet, RemoveInvalidSlots) {
}
}
TEST(TypedSlotSet, Merge) {
TypedSlotSet set0(0), set1(0);
static const uint32_t kEntries = 10000;
for (uint32_t i = 0; i < kEntries; i++) {
set0.Insert(EMBEDDED_OBJECT_SLOT, 2 * i, 2 * i);
set1.Insert(EMBEDDED_OBJECT_SLOT, 2 * i + 1, 2 * i + 1);
}
uint32_t count = 0;
set0.Merge(&set1);
set0.Iterate(
[&count](SlotType slot_type, Address host_addr, Address slot_addr) {
CHECK_EQ(host_addr, slot_addr);
if (count < kEntries) {
CHECK_EQ(host_addr % 2, 0);
} else {
CHECK_EQ(host_addr % 2, 1);
}
++count;
return KEEP_SLOT;
},
TypedSlotSet::KEEP_EMPTY_CHUNKS);
CHECK_EQ(2 * kEntries, count);
set1.Iterate(
[](SlotType slot_type, Address host_addr, Address slot_addr) {
CHECK(false); // Unreachable.
return KEEP_SLOT;
},
TypedSlotSet::KEEP_EMPTY_CHUNKS);
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment