Commit ec383575 authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Reland Concurrently free empty typed slot set chunks.

BUG=chromium:648568

Review-Url: https://codereview.chromium.org/2365603002
Cr-Commit-Position: refs/heads/master@{#39630}
parent 447de9ae
......@@ -3865,6 +3865,12 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
} else {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
}
// After finishing sweeping of a page we clean up its remembered set.
if (page->typed_old_to_new_slots()) {
page->typed_old_to_new_slots()->FreeToBeFreedChunks();
}
{
base::LockGuard<base::Mutex> guard(&mutex_);
swept_list_[identity].Add(page);
......
......@@ -36,7 +36,8 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
} else {
return REMOVE_SLOT;
}
});
},
TypedSlotSet::PREFREE_EMPTY_CHUNKS);
}
}
for (MemoryChunk* chunk : *heap->map_space()) {
......
......@@ -149,10 +149,13 @@ class RememberedSet {
static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
TypedSlotSet* slots = GetTypedSlotSet(page);
if (slots != nullptr) {
slots->Iterate([start, end](SlotType slot_type, Address host_addr,
slots->Iterate(
[start, end](SlotType slot_type, Address host_addr,
Address slot_addr) {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
});
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
: KEEP_SLOT;
},
TypedSlotSet::PREFREE_EMPTY_CHUNKS);
}
}
......@@ -173,7 +176,7 @@ class RememberedSet {
static void IterateTyped(MemoryChunk* chunk, Callback callback) {
TypedSlotSet* slots = GetTypedSlotSet(chunk);
if (slots != nullptr) {
int new_count = slots->Iterate(callback);
int new_count = slots->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
if (new_count == 0) {
ReleaseTypedSlotSet(chunk);
}
......
......@@ -5,6 +5,8 @@
#ifndef V8_SLOT_SET_H
#define V8_SLOT_SET_H
#include <stack>
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
......@@ -261,6 +263,8 @@ enum SlotType {
// typed slots contain V8 internal pointers that are not directly exposed to JS.
class TypedSlotSet {
public:
enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
typedef std::pair<SlotType, uint32_t> TypeAndOffset;
struct TypedSlot {
......@@ -328,6 +332,10 @@ class TypedSlotSet {
void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
TypedSlot slot(type, host_offset, offset);
Chunk* top_chunk = chunk_.Value();
if (!top_chunk) {
top_chunk = new Chunk(nullptr, kInitialBufferSize);
chunk_.SetValue(top_chunk);
}
if (!top_chunk->AddSlot(slot)) {
Chunk* new_top_chunk =
new Chunk(top_chunk, NextCapacity(top_chunk->capacity.Value()));
......@@ -348,13 +356,15 @@ class TypedSlotSet {
// else return REMOVE_SLOT;
// });
template <typename Callback>
int Iterate(Callback callback) {
int Iterate(Callback callback, IterationMode mode) {
STATIC_ASSERT(CLEARED_SLOT < 8);
Chunk* chunk = chunk_.Value();
Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
TypedSlot* buffer = chunk->buffer.Value();
int count = chunk->count.Value();
bool empty = true;
for (int i = 0; i < count; i++) {
// Order is important here. We have to read out the slot type last to
// observe the concurrent removal case consistently.
......@@ -365,16 +375,40 @@ class TypedSlotSet {
Address addr = page_start_ + type_and_offset.second;
if (callback(type, host_addr, addr) == KEEP_SLOT) {
new_count++;
empty = false;
} else {
buffer[i].Clear();
}
}
}
if (mode == PREFREE_EMPTY_CHUNKS && empty) {
// We remove the chunk from the list but let it still point its next
// chunk to allow concurrent iteration.
if (previous) {
previous->next.SetValue(chunk->next.Value());
} else {
chunk_.SetValue(chunk->next.Value());
}
base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
to_be_freed_chunks_.push(chunk);
} else {
previous = chunk;
}
chunk = chunk->next.Value();
}
return new_count;
}
void FreeToBeFreedChunks() {
base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
while (!to_be_freed_chunks_.empty()) {
Chunk* top = to_be_freed_chunks_.top();
to_be_freed_chunks_.pop();
delete top;
}
}
private:
static const int kInitialBufferSize = 100;
static const int kMaxBufferSize = 16 * KB;
......@@ -413,6 +447,8 @@ class TypedSlotSet {
Address page_start_;
base::AtomicValue<Chunk*> chunk_;
base::Mutex to_be_freed_chunks_mutex_;
std::stack<Chunk*> to_be_freed_chunks_;
};
} // namespace internal
......
......@@ -510,7 +510,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->InitializeReservedMemory();
chunk->old_to_new_slots_ = nullptr;
chunk->old_to_old_slots_ = nullptr;
chunk->typed_old_to_new_slots_ = nullptr;
chunk->typed_old_to_new_slots_.SetValue(nullptr);
chunk->typed_old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
......@@ -1077,7 +1077,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
}
if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
if (typed_old_to_new_slots_ != nullptr) ReleaseTypedOldToNewSlots();
if (typed_old_to_new_slots_.Value() != nullptr) ReleaseTypedOldToNewSlots();
if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
}
......@@ -1113,13 +1113,14 @@ void MemoryChunk::ReleaseOldToOldSlots() {
}
void MemoryChunk::AllocateTypedOldToNewSlots() {
DCHECK(nullptr == typed_old_to_new_slots_);
typed_old_to_new_slots_ = new TypedSlotSet(address());
DCHECK(nullptr == typed_old_to_new_slots_.Value());
typed_old_to_new_slots_.SetValue(new TypedSlotSet(address()));
}
void MemoryChunk::ReleaseTypedOldToNewSlots() {
delete typed_old_to_new_slots_;
typed_old_to_new_slots_ = nullptr;
TypedSlotSet* typed_old_to_new_slots = typed_old_to_new_slots_.Value();
delete typed_old_to_new_slots;
typed_old_to_new_slots_.SetValue(nullptr);
}
void MemoryChunk::AllocateTypedOldToOldSlots() {
......
......@@ -456,7 +456,7 @@ class MemoryChunk {
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
inline TypedSlotSet* typed_old_to_new_slots() {
return typed_old_to_new_slots_;
return typed_old_to_new_slots_.Value();
}
inline TypedSlotSet* typed_old_to_old_slots() {
return typed_old_to_old_slots_;
......@@ -656,7 +656,7 @@ class MemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* old_to_new_slots_;
SlotSet* old_to_old_slots_;
TypedSlotSet* typed_old_to_new_slots_;
base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_;
TypedSlotSet* typed_old_to_old_slots_;
SkipList* skip_list_;
......
......@@ -152,24 +152,29 @@ TEST(TypedSlotSet, Iterate) {
++added;
}
int iterated = 0;
set.Iterate([&iterated, kDelta, kHostDelta](SlotType type, Address host_addr,
set.Iterate(
[&iterated, kDelta, kHostDelta](SlotType type, Address host_addr,
Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
uint32_t j = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(host_addr));
uint32_t j =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(host_addr));
EXPECT_EQ(i % CLEARED_SLOT, static_cast<uint32_t>(type));
EXPECT_EQ(0, i % kDelta);
EXPECT_EQ(0, j % kHostDelta);
++iterated;
return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
});
},
TypedSlotSet::KEEP_EMPTY_CHUNKS);
EXPECT_EQ(added, iterated);
iterated = 0;
set.Iterate([&iterated](SlotType type, Address host_addr, Address addr) {
set.Iterate(
[&iterated](SlotType type, Address host_addr, Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
EXPECT_EQ(0, i % 2);
++iterated;
return KEEP_SLOT;
});
},
TypedSlotSet::KEEP_EMPTY_CHUNKS);
EXPECT_EQ(added / 2, iterated);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment