Commit 841c40b7 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Clean up TypedSlotSet.

This extracts the parts of the TypedSlotSet that are used only
sequentially into a separate class called TypedSlots.

The new class will be used in the concurrent marker to keep track of
typed slots locally and then to merge them to the main remembered set
during finalization of marking.

The patch also cleans up atomics in the Iterate and ClearInvalidSlots
methods that can run concurrently to each other.

Bug:v8:8459

Change-Id: Id7a63041f7b99218381e5e9e1999210cab9c4369
Reviewed-on: https://chromium-review.googlesource.com/c/1340247
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57673}
parent 359fdff0
......@@ -2077,6 +2077,7 @@ v8_source_set("v8_base") {
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
"src/heap/slot-set.cc",
"src/heap/slot-set.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
......
......@@ -261,12 +261,6 @@ class RememberedSet : public AllStatic {
}
}
// Eliminates all stale slots from the remembered set, i.e.
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
// must be called before sweeping when mark bits are still intact.
static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
private:
static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, ObjectSlot slot);
};
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
TypedSlots::~TypedSlots() {
Chunk* chunk = head_;
while (chunk != nullptr) {
Chunk* next = chunk->next;
delete[] chunk->buffer;
delete chunk;
chunk = next;
}
head_ = nullptr;
tail_ = nullptr;
}
void TypedSlots::Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
TypedSlot slot = {TypeField::encode(type) | OffsetField::encode(offset),
host_offset};
Chunk* chunk = EnsureChunk();
DCHECK_LT(chunk->count, chunk->capacity);
chunk->buffer[chunk->count] = slot;
++chunk->count;
}
void TypedSlots::Merge(TypedSlots* other) {
if (other->head_ == nullptr) {
return;
}
if (head_ == nullptr) {
head_ = other->head_;
tail_ = other->tail_;
} else {
tail_->next = other->head_;
tail_ = other->tail_;
}
other->head_ = nullptr;
other->tail_ = nullptr;
}
TypedSlots::Chunk* TypedSlots::EnsureChunk() {
if (!head_) {
head_ = tail_ = NewChunk(nullptr, kInitialBufferSize);
}
if (head_->count == head_->capacity) {
head_ = NewChunk(head_, NextCapacity(head_->capacity));
}
return head_;
}
TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, int capacity) {
Chunk* chunk = new Chunk;
chunk->next = next;
chunk->buffer = new TypedSlot[capacity];
chunk->capacity = capacity;
chunk->count = 0;
return chunk;
}
TypedSlotSet::~TypedSlotSet() { FreeToBeFreedChunks(); }
void TypedSlotSet::FreeToBeFreedChunks() {
base::MutexGuard guard(&to_be_freed_chunks_mutex_);
std::stack<std::unique_ptr<Chunk>> empty;
to_be_freed_chunks_.swap(empty);
}
void TypedSlotSet::ClearInvalidSlots(
const std::map<uint32_t, uint32_t>& invalid_ranges) {
Chunk* chunk = LoadHead();
while (chunk != nullptr) {
TypedSlot* buffer = chunk->buffer;
int count = chunk->count;
for (int i = 0; i < count; i++) {
TypedSlot slot = LoadTypedSlot(buffer + i);
SlotType type = TypeField::decode(slot.type_and_offset);
if (type == CLEARED_SLOT) continue;
uint32_t host_offset = slot.host_offset;
std::map<uint32_t, uint32_t>::const_iterator upper_bound =
invalid_ranges.upper_bound(host_offset);
if (upper_bound == invalid_ranges.begin()) continue;
// upper_bounds points to the invalid range after the given slot. Hence,
// we have to go to the previous element.
upper_bound--;
DCHECK_LE(upper_bound->first, host_offset);
if (upper_bound->second > host_offset) {
ClearTypedSlot(buffer + i);
}
}
chunk = LoadNext(chunk);
}
}
} // namespace internal
} // namespace v8
......@@ -392,101 +392,58 @@ enum SlotType {
CLEARED_SLOT
};
// Data structure for maintaining a multiset of typed slots in a page.
// Data structure for maintaining a list of typed slots in a page.
// Typed slots can only appear in Code and JSFunction objects, so
// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
// The implementation is a chain of chunks, where each chunks is an array of
// encoded (slot type, slot offset) pairs.
// There is no duplicate detection and we do not expect many duplicates because
// typed slots contain V8 internal pointers that are not directly exposed to JS.
class TypedSlotSet {
class TypedSlots {
public:
enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
typedef std::pair<SlotType, uint32_t> TypeAndOffset;
static const int kMaxOffset = 1 << 29;
TypedSlots() = default;
virtual ~TypedSlots();
V8_EXPORT_PRIVATE void Insert(SlotType type, uint32_t host_offset,
uint32_t offset);
V8_EXPORT_PRIVATE void Merge(TypedSlots* other);
protected:
class OffsetField : public BitField<int, 0, 29> {};
class TypeField : public BitField<SlotType, 29, 3> {};
struct TypedSlot {
TypedSlot() : type_and_offset_(0), host_offset_(0) {}
TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
: type_and_offset_(TypeField::encode(type) |
OffsetField::encode(offset)),
host_offset_(host_offset) {}
bool operator==(const TypedSlot other) {
return type_and_offset() == other.type_and_offset() &&
host_offset() == other.host_offset();
}
bool operator!=(const TypedSlot other) { return !(*this == other); }
SlotType type() const { return TypeField::decode(type_and_offset()); }
uint32_t offset() const { return OffsetField::decode(type_and_offset()); }
TypeAndOffset GetTypeAndOffset() const {
uint32_t t_and_o = type_and_offset();
return std::make_pair(TypeField::decode(t_and_o),
OffsetField::decode(t_and_o));
}
uint32_t type_and_offset() const {
return base::AsAtomic32::Acquire_Load(&type_and_offset_);
}
uint32_t host_offset() const {
return base::AsAtomic32::Acquire_Load(&host_offset_);
}
void Set(TypedSlot slot) {
base::AsAtomic32::Release_Store(&type_and_offset_,
slot.type_and_offset());
base::AsAtomic32::Release_Store(&host_offset_, slot.host_offset());
}
void Clear() {
base::AsAtomic32::Release_Store(
&type_and_offset_,
TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
base::AsAtomic32::Release_Store(&host_offset_, 0);
}
uint32_t type_and_offset_;
uint32_t host_offset_;
uint32_t type_and_offset;
uint32_t host_offset;
};
static const int kMaxOffset = 1 << 29;
struct Chunk {
Chunk* next;
TypedSlot* buffer;
int32_t capacity;
int32_t count;
};
static const int kInitialBufferSize = 100;
static const int kMaxBufferSize = 16 * KB;
static int NextCapacity(int capacity) {
return Min(kMaxBufferSize, capacity * 2);
}
Chunk* EnsureChunk();
Chunk* NewChunk(Chunk* next, int capacity);
Chunk* head_ = nullptr;
Chunk* tail_ = nullptr;
};
explicit TypedSlotSet(Address page_start)
: page_start_(page_start), top_(new Chunk(nullptr, kInitialBufferSize)) {}
// A multiset of per-page typed slots that allows concurrent iteration
// clearing of invalid slots.
class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
public:
// The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
// during the iteration are queued in to_be_freed_chunks_, which are
// then freed in FreeToBeFreedChunks.
enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
~TypedSlotSet() {
Chunk* chunk = load_top();
while (chunk != nullptr) {
Chunk* n = chunk->next();
delete chunk;
chunk = n;
}
FreeToBeFreedChunks();
}
explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
// The slot offset specifies a slot at address page_start_ + offset.
// This method can only be called on the main thread.
void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
TypedSlot slot(type, host_offset, offset);
Chunk* top_chunk = load_top();
if (!top_chunk) {
top_chunk = new Chunk(nullptr, kInitialBufferSize);
set_top(top_chunk);
}
if (!top_chunk->AddSlot(slot)) {
Chunk* new_top_chunk =
new Chunk(top_chunk, NextCapacity(top_chunk->capacity()));
bool added = new_top_chunk->AddSlot(slot);
set_top(new_top_chunk);
DCHECK(added);
USE(added);
}
}
~TypedSlotSet() override;
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
......@@ -497,145 +454,90 @@ class TypedSlotSet {
// if (good(slot_type, slot_address)) return KEEP_SLOT;
// else return REMOVE_SLOT;
// });
// This can run concurrently to ClearInvalidSlots().
template <typename Callback>
int Iterate(Callback callback, IterationMode mode) {
STATIC_ASSERT(CLEARED_SLOT < 8);
Chunk* chunk = load_top();
Chunk* chunk = head_;
Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
TypedSlot* buf = chunk->buffer();
TypedSlot* buffer = chunk->buffer;
int count = chunk->count;
bool empty = true;
for (int i = 0; i < chunk->count(); i++) {
// Order is important here. We have to read out the slot type last to
// observe the concurrent removal case consistently.
Address host_addr = page_start_ + buf[i].host_offset();
TypeAndOffset type_and_offset = buf[i].GetTypeAndOffset();
SlotType type = type_and_offset.first;
for (int i = 0; i < count; i++) {
TypedSlot slot = LoadTypedSlot(buffer + i);
SlotType type = TypeField::decode(slot.type_and_offset);
if (type != CLEARED_SLOT) {
Address addr = page_start_ + type_and_offset.second;
uint32_t offset = OffsetField::decode(slot.type_and_offset);
Address addr = page_start_ + offset;
Address host_addr = page_start_ + slot.host_offset;
if (callback(type, host_addr, addr) == KEEP_SLOT) {
new_count++;
empty = false;
} else {
buf[i].Clear();
ClearTypedSlot(buffer + i);
}
}
}
Chunk* n = chunk->next();
Chunk* next = chunk->next;
if (mode == PREFREE_EMPTY_CHUNKS && empty) {
// We remove the chunk from the list but let it still point its next
// chunk to allow concurrent iteration.
if (previous) {
previous->set_next(n);
StoreNext(previous, next);
} else {
set_top(n);
StoreHead(next);
}
base::MutexGuard guard(&to_be_freed_chunks_mutex_);
to_be_freed_chunks_.push(chunk);
to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
} else {
previous = chunk;
}
chunk = n;
chunk = next;
}
return new_count;
}
void FreeToBeFreedChunks() {
base::MutexGuard guard(&to_be_freed_chunks_mutex_);
while (!to_be_freed_chunks_.empty()) {
Chunk* top = to_be_freed_chunks_.top();
to_be_freed_chunks_.pop();
delete top;
}
}
// Clears all slots that have the offset in the specified ranges.
// This can run concurrently to Iterate().
void ClearInvalidSlots(const std::map<uint32_t, uint32_t>& invalid_ranges);
void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
Chunk* chunk = load_top();
while (chunk != nullptr) {
TypedSlot* buf = chunk->buffer();
for (int i = 0; i < chunk->count(); i++) {
uint32_t host_offset = buf[i].host_offset();
std::map<uint32_t, uint32_t>::iterator upper_bound =
invalid_ranges.upper_bound(host_offset);
if (upper_bound == invalid_ranges.begin()) continue;
// upper_bounds points to the invalid range after the given slot. Hence,
// we have to go to the previous element.
upper_bound--;
DCHECK_LE(upper_bound->first, host_offset);
if (upper_bound->second > host_offset) {
buf[i].Clear();
}
}
chunk = chunk->next();
}
}
// Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
void FreeToBeFreedChunks();
private:
static const int kInitialBufferSize = 100;
static const int kMaxBufferSize = 16 * KB;
static int NextCapacity(int capacity) {
return Min(kMaxBufferSize, capacity * 2);
// Atomic operations used by Iterate and ClearInvalidSlots;
Chunk* LoadNext(Chunk* chunk) {
return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
}
void StoreNext(Chunk* chunk, Chunk* next) {
return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
}
Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
void StoreHead(Chunk* chunk) {
base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
}
TypedSlot LoadTypedSlot(TypedSlot* slot) {
// Order is important here and should match that of ClearTypedSlot. The
// order guarantees that type != CLEARED_SLOT implies valid host_offset.
TypedSlot result;
result.host_offset = base::AsAtomic32::Acquire_Load(&slot->host_offset);
result.type_and_offset =
base::AsAtomic32::Relaxed_Load(&slot->type_and_offset);
return result;
}
void ClearTypedSlot(TypedSlot* slot) {
// Order is important here and should match that of LoadTypedSlot.
base::AsAtomic32::Relaxed_Store(
&slot->type_and_offset,
TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
base::AsAtomic32::Release_Store(&slot->host_offset, 0);
}
class OffsetField : public BitField<int, 0, 29> {};
class TypeField : public BitField<SlotType, 29, 3> {};
struct Chunk : Malloced {
explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
next_ = next_chunk;
buffer_ = NewArray<TypedSlot>(chunk_capacity);
capacity_ = chunk_capacity;
count_ = 0;
}
~Chunk() { DeleteArray(buffer_); }
bool AddSlot(TypedSlot slot) {
int current_count = count();
if (current_count == capacity()) return false;
TypedSlot* current_buffer = buffer();
// Order is important here. We have to write the slot first before
// increasing the counter to guarantee that a consistent state is
// observed by concurrent threads.
current_buffer[current_count].Set(slot);
set_count(current_count + 1);
return true;
}
Chunk* next() const { return base::AsAtomicPointer::Acquire_Load(&next_); }
void set_next(Chunk* n) {
return base::AsAtomicPointer::Release_Store(&next_, n);
}
TypedSlot* buffer() const { return buffer_; }
int32_t capacity() const { return capacity_; }
int32_t count() const { return base::AsAtomic32::Acquire_Load(&count_); }
void set_count(int32_t new_value) {
base::AsAtomic32::Release_Store(&count_, new_value);
}
private:
Chunk* next_;
TypedSlot* buffer_;
int32_t capacity_;
int32_t count_;
};
Chunk* load_top() { return base::AsAtomicPointer::Acquire_Load(&top_); }
void set_top(Chunk* c) { base::AsAtomicPointer::Release_Store(&top_, c); }
Address page_start_;
Chunk* top_;
base::Mutex to_be_freed_chunks_mutex_;
std::stack<Chunk*> to_be_freed_chunks_;
std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
};
} // namespace internal
......
......@@ -361,11 +361,11 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
if (!free_ranges.empty()) {
TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
if (old_to_new != nullptr) {
old_to_new->RemoveInvaldSlots(free_ranges);
old_to_new->ClearInvalidSlots(free_ranges);
}
TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
if (old_to_old != nullptr) {
old_to_old->RemoveInvaldSlots(free_ranges);
old_to_old->ClearInvalidSlots(free_ranges);
}
}
......
......@@ -188,7 +188,7 @@ TEST(TypedSlotSet, Iterate) {
EXPECT_EQ(added / 2, iterated);
}
TEST(TypedSlotSet, RemoveInvalidSlots) {
TEST(TypedSlotSet, ClearInvalidSlots) {
TypedSlotSet set(0);
const int kHostDelta = 100;
uint32_t entries = 10;
......@@ -203,7 +203,7 @@ TEST(TypedSlotSet, RemoveInvalidSlots) {
std::pair<uint32_t, uint32_t>(i * kHostDelta, i * kHostDelta + 1));
}
set.RemoveInvaldSlots(invalid_ranges);
set.ClearInvalidSlots(invalid_ranges);
for (std::map<uint32_t, uint32_t>::iterator it = invalid_ranges.begin();
it != invalid_ranges.end(); ++it) {
uint32_t start = it->first;
......@@ -217,5 +217,35 @@ TEST(TypedSlotSet, RemoveInvalidSlots) {
}
}
TEST(TypedSlotSet, Merge) {
TypedSlotSet set0(0), set1(0);
static const uint32_t kEntries = 10000;
for (uint32_t i = 0; i < kEntries; i++) {
set0.Insert(EMBEDDED_OBJECT_SLOT, 2 * i, 2 * i);
set1.Insert(EMBEDDED_OBJECT_SLOT, 2 * i + 1, 2 * i + 1);
}
uint32_t count = 0;
set0.Merge(&set1);
set0.Iterate(
[&count](SlotType slot_type, Address host_addr, Address slot_addr) {
CHECK_EQ(host_addr, slot_addr);
if (count < kEntries) {
CHECK_EQ(host_addr % 2, 0);
} else {
CHECK_EQ(host_addr % 2, 1);
}
++count;
return KEEP_SLOT;
},
TypedSlotSet::KEEP_EMPTY_CHUNKS);
CHECK_EQ(2 * kEntries, count);
set1.Iterate(
[](SlotType slot_type, Address host_addr, Address slot_addr) {
CHECK(false); // Unreachable.
return KEEP_SLOT;
},
TypedSlotSet::KEEP_EMPTY_CHUNKS);
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment