Commit 35e73840 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Remove pre-freeing from TypedSlotSet

TypedSlotSet is only recorded for code pages. Code pages are not swept
concurrently to the application, so pre-freeing is not needed for typed
slot sets anymore.

Also replaces the manually allocated buffer with a regular std::vector.

Bug: v8:9454
Change-Id: I901851ad8b525c1653c9818e6599308319aeade2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1844773Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64140}
parent e25cdf24
......@@ -218,7 +218,7 @@ class RememberedSet : public AllStatic {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
: KEEP_SLOT;
},
TypedSlotSet::PREFREE_EMPTY_CHUNKS);
TypedSlotSet::FREE_EMPTY_CHUNKS);
}
}
......
......@@ -11,7 +11,6 @@ TypedSlots::~TypedSlots() {
Chunk* chunk = head_;
while (chunk != nullptr) {
Chunk* next = chunk->next;
delete[] chunk->buffer;
delete chunk;
chunk = next;
}
......@@ -22,9 +21,8 @@ TypedSlots::~TypedSlots() {
void TypedSlots::Insert(SlotType type, uint32_t offset) {
TypedSlot slot = {TypeField::encode(type) | OffsetField::encode(offset)};
Chunk* chunk = EnsureChunk();
DCHECK_LT(chunk->count, chunk->capacity);
chunk->buffer[chunk->count] = slot;
++chunk->count;
DCHECK_LT(chunk->buffer.size(), chunk->buffer.capacity());
chunk->buffer.push_back(slot);
}
void TypedSlots::Merge(TypedSlots* other) {
......@@ -46,37 +44,25 @@ TypedSlots::Chunk* TypedSlots::EnsureChunk() {
if (!head_) {
head_ = tail_ = NewChunk(nullptr, kInitialBufferSize);
}
if (head_->count == head_->capacity) {
head_ = NewChunk(head_, NextCapacity(head_->capacity));
if (head_->buffer.size() == head_->buffer.capacity()) {
head_ = NewChunk(head_, NextCapacity(head_->buffer.capacity()));
}
return head_;
}
TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, int capacity) {
TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, size_t capacity) {
Chunk* chunk = new Chunk;
chunk->next = next;
chunk->buffer = new TypedSlot[capacity];
chunk->capacity = capacity;
chunk->count = 0;
chunk->buffer.reserve(capacity);
DCHECK_EQ(chunk->buffer.capacity(), capacity);
return chunk;
}
TypedSlotSet::~TypedSlotSet() { FreeToBeFreedChunks(); }
void TypedSlotSet::FreeToBeFreedChunks() {
base::MutexGuard guard(&to_be_freed_chunks_mutex_);
std::stack<std::unique_ptr<Chunk>> empty;
to_be_freed_chunks_.swap(empty);
}
void TypedSlotSet::ClearInvalidSlots(
const std::map<uint32_t, uint32_t>& invalid_ranges) {
Chunk* chunk = LoadHead();
while (chunk != nullptr) {
TypedSlot* buffer = chunk->buffer;
int count = chunk->count;
for (int i = 0; i < count; i++) {
TypedSlot slot = LoadTypedSlot(buffer + i);
for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
if (type == CLEARED_SLOT) continue;
uint32_t offset = OffsetField::decode(slot.type_and_offset);
......@@ -88,7 +74,7 @@ void TypedSlotSet::ClearInvalidSlots(
upper_bound--;
DCHECK_LE(upper_bound->first, offset);
if (upper_bound->second > offset) {
ClearTypedSlot(buffer + i);
slot = ClearedTypedSlot();
}
}
chunk = LoadNext(chunk);
......
......@@ -355,9 +355,9 @@ enum SlotType {
};
// Data structure for maintaining a list of typed slots in a page.
// Typed slots can only appear in Code and JSFunction objects, so
// Typed slots can only appear in Code objects, so
// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
// The implementation is a chain of chunks, where each chunks is an array of
// The implementation is a chain of chunks, where each chunk is an array of
// encoded (slot type, slot offset) pairs.
// There is no duplicate detection and we do not expect many duplicates because
// typed slots contain V8 internal pointers that are not directly exposed to JS.
......@@ -377,17 +377,15 @@ class V8_EXPORT_PRIVATE TypedSlots {
};
struct Chunk {
Chunk* next;
TypedSlot* buffer;
int32_t capacity;
int32_t count;
std::vector<TypedSlot> buffer;
};
static const int kInitialBufferSize = 100;
static const int kMaxBufferSize = 16 * KB;
static int NextCapacity(int capacity) {
static const size_t kInitialBufferSize = 100;
static const size_t kMaxBufferSize = 16 * KB;
static size_t NextCapacity(size_t capacity) {
return Min(kMaxBufferSize, capacity * 2);
}
Chunk* EnsureChunk();
Chunk* NewChunk(Chunk* next, int capacity);
Chunk* NewChunk(Chunk* next, size_t capacity);
Chunk* head_ = nullptr;
Chunk* tail_ = nullptr;
};
......@@ -396,15 +394,10 @@ class V8_EXPORT_PRIVATE TypedSlots {
// clearing of invalid slots.
class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
public:
// The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
// during the iteration are queued in to_be_freed_chunks_, which are
// then freed in FreeToBeFreedChunks.
enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
enum IterationMode { FREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
~TypedSlotSet() override;
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
// Returns the new number of slots.
......@@ -422,11 +415,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
TypedSlot* buffer = chunk->buffer;
int count = chunk->count;
bool empty = true;
for (int i = 0; i < count; i++) {
TypedSlot slot = LoadTypedSlot(buffer + i);
for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
if (type != CLEARED_SLOT) {
uint32_t offset = OffsetField::decode(slot.type_and_offset);
......@@ -435,12 +425,12 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
new_count++;
empty = false;
} else {
ClearTypedSlot(buffer + i);
slot = ClearedTypedSlot();
}
}
}
Chunk* next = chunk->next;
if (mode == PREFREE_EMPTY_CHUNKS && empty) {
if (mode == FREE_EMPTY_CHUNKS && empty) {
// We remove the chunk from the list but let it still point its next
// chunk to allow concurrent iteration.
if (previous) {
......@@ -448,8 +438,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
} else {
StoreHead(next);
}
base::MutexGuard guard(&to_be_freed_chunks_mutex_);
to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
delete chunk;
} else {
previous = chunk;
}
......@@ -477,19 +467,11 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
void StoreHead(Chunk* chunk) {
base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
}
TypedSlot LoadTypedSlot(TypedSlot* slot) {
return TypedSlot{base::AsAtomic32::Relaxed_Load(&slot->type_and_offset)};
}
void ClearTypedSlot(TypedSlot* slot) {
// Order is important here and should match that of LoadTypedSlot.
base::AsAtomic32::Relaxed_Store(
&slot->type_and_offset,
TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
static TypedSlot ClearedTypedSlot() {
return TypedSlot{TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0)};
}
Address page_start_;
base::Mutex to_be_freed_chunks_mutex_;
std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
};
} // namespace internal
......
......@@ -403,6 +403,10 @@ void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
Page* page = nullptr;
while (!stop_sweeper_tasks_ &&
((page = GetSweepingPageSafe(identity)) != nullptr)) {
// Typed slot sets are only recorded on code pages. Code pages
// are not swept concurrently to the application to ensure W^X.
DCHECK(!page->typed_slot_set<OLD_TO_NEW>() &&
!page->typed_slot_set<OLD_TO_OLD>());
ParallelSweepPage(page, identity);
}
}
......@@ -461,12 +465,6 @@ int Sweeper::ParallelSweepPage(
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
invalidated_slots_in_free_space);
DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
if (typed_slot_set) {
typed_slot_set->FreeToBeFreedChunks();
}
}
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment