Commit 3eda099c authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Scalable slots buffer for parallel compaction.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1341973003

Cr-Commit-Position: refs/heads/master@{#30800}
parent 3d964e0b
......@@ -483,9 +483,11 @@ class MarkCompactCollector::CompactionTask : public v8::Task {
private:
// v8::Task overrides.
void Run() override {
heap_->mark_compact_collector()->EvacuatePages(spaces_);
heap_->mark_compact_collector()
->pending_compaction_tasks_semaphore_.Signal();
MarkCompactCollector* mark_compact = heap_->mark_compact_collector();
SlotsBuffer* evacuation_slots_buffer = nullptr;
mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
mark_compact->pending_compaction_tasks_semaphore_.Signal();
}
Heap* heap_;
......@@ -1730,7 +1732,7 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
}
Object* target = allocation.ToObjectChecked();
MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
}
......@@ -2558,7 +2560,8 @@ void MarkCompactCollector::AbortWeakCells() {
}
void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
void MarkCompactCollector::RecordMigratedSlot(
Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
// When parallel compaction is in progress, store and slots buffer entries
// require synchronization.
if (heap_->InNewSpace(value)) {
......@@ -2568,48 +2571,29 @@ void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
heap_->store_buffer()->Mark(slot);
}
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
if (parallel_compaction_in_progress_) {
SlotsBuffer::AddToSynchronized(
slots_buffer_allocator_, &migration_slots_buffer_,
&migration_slots_buffer_mutex_, reinterpret_cast<Object**>(slot),
SlotsBuffer::IGNORE_OVERFLOW);
} else {
SlotsBuffer::AddTo(slots_buffer_allocator_, &migration_slots_buffer_,
reinterpret_cast<Object**>(slot),
SlotsBuffer::IGNORE_OVERFLOW);
}
SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
reinterpret_cast<Object**>(slot),
SlotsBuffer::IGNORE_OVERFLOW);
}
}
void MarkCompactCollector::RecordMigratedCodeEntrySlot(
Address code_entry, Address code_entry_slot) {
Address code_entry, Address code_entry_slot,
SlotsBuffer** evacuation_slots_buffer) {
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
if (parallel_compaction_in_progress_) {
SlotsBuffer::AddToSynchronized(
slots_buffer_allocator_, &migration_slots_buffer_,
&migration_slots_buffer_mutex_, SlotsBuffer::CODE_ENTRY_SLOT,
code_entry_slot, SlotsBuffer::IGNORE_OVERFLOW);
} else {
SlotsBuffer::AddTo(slots_buffer_allocator_, &migration_slots_buffer_,
SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
}
void MarkCompactCollector::RecordMigratedCodeObjectSlot(Address code_object) {
if (parallel_compaction_in_progress_) {
SlotsBuffer::AddToSynchronized(
slots_buffer_allocator_, &migration_slots_buffer_,
&migration_slots_buffer_mutex_, SlotsBuffer::RELOCATED_CODE_OBJECT,
code_object, SlotsBuffer::IGNORE_OVERFLOW);
} else {
SlotsBuffer::AddTo(slots_buffer_allocator_, &migration_slots_buffer_,
SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
SlotsBuffer::IGNORE_OVERFLOW);
}
void MarkCompactCollector::RecordMigratedCodeObjectSlot(
Address code_object, SlotsBuffer** evacuation_slots_buffer) {
SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
SlotsBuffer::IGNORE_OVERFLOW);
}
......@@ -2675,21 +2659,23 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
int size, AllocationSpace dest) {
void MarkCompactCollector::MigrateObject(
HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
SlotsBuffer** evacuation_slots_buffer) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_SPACE) {
DCHECK(evacuation_slots_buffer != nullptr);
DCHECK(IsAligned(size, kPointerSize));
switch (src->ContentType()) {
case HeapObjectContents::kTaggedValues:
MigrateObjectTagged(dst, src, size);
MigrateObjectTagged(dst, src, size, evacuation_slots_buffer);
break;
case HeapObjectContents::kMixedValues:
MigrateObjectMixed(dst, src, size);
MigrateObjectMixed(dst, src, size, evacuation_slots_buffer);
break;
case HeapObjectContents::kRawValues:
......@@ -2700,14 +2686,17 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
if (compacting_ && dst->IsJSFunction()) {
Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset;
Address code_entry = Memory::Address_at(code_entry_slot);
RecordMigratedCodeEntrySlot(code_entry, code_entry_slot);
RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
evacuation_slots_buffer);
}
} else if (dest == CODE_SPACE) {
DCHECK(evacuation_slots_buffer != nullptr);
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
heap()->MoveBlock(dst_addr, src_addr, size);
RecordMigratedCodeObjectSlot(dst_addr);
RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
DCHECK(evacuation_slots_buffer == nullptr);
DCHECK(dest == NEW_SPACE);
heap()->MoveBlock(dst_addr, src_addr, size);
}
......@@ -2716,33 +2705,36 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
}
void MarkCompactCollector::MigrateObjectTagged(HeapObject* dst, HeapObject* src,
int size) {
void MarkCompactCollector::MigrateObjectTagged(
HeapObject* dst, HeapObject* src, int size,
SlotsBuffer** evacuation_slots_buffer) {
Address src_slot = src->address();
Address dst_slot = dst->address();
for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
Object* value = Memory::Object_at(src_slot);
Memory::Object_at(dst_slot) = value;
RecordMigratedSlot(value, dst_slot);
RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
src_slot += kPointerSize;
dst_slot += kPointerSize;
}
}
void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
int size) {
void MarkCompactCollector::MigrateObjectMixed(
HeapObject* dst, HeapObject* src, int size,
SlotsBuffer** evacuation_slots_buffer) {
if (src->IsFixedTypedArrayBase()) {
heap()->MoveBlock(dst->address(), src->address(), size);
Address base_pointer_slot =
dst->address() + FixedTypedArrayBase::kBasePointerOffset;
RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot);
RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot,
evacuation_slots_buffer);
} else if (src->IsBytecodeArray()) {
heap()->MoveBlock(dst->address(), src->address(), size);
Address constant_pool_slot =
dst->address() + BytecodeArray::kConstantPoolOffset;
RecordMigratedSlot(Memory::Object_at(constant_pool_slot),
constant_pool_slot);
constant_pool_slot, evacuation_slots_buffer);
} else if (src->IsJSArrayBuffer()) {
heap()->MoveBlock(dst->address(), src->address(), size);
......@@ -2752,7 +2744,8 @@ void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
Address regular_slots_end =
dst->address() + JSArrayBuffer::kByteLengthOffset + kPointerSize;
while (regular_slot < regular_slots_end) {
RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot);
RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot,
evacuation_slots_buffer);
regular_slot += kPointerSize;
}
......@@ -2762,7 +2755,7 @@ void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
dst->address() + JSArrayBuffer::kSizeWithInternalFields;
while (internal_field_slot < internal_fields_end) {
RecordMigratedSlot(Memory::Object_at(internal_field_slot),
internal_field_slot);
internal_field_slot, evacuation_slots_buffer);
internal_field_slot += kPointerSize;
}
} else if (FLAG_unbox_double_fields) {
......@@ -2779,7 +2772,7 @@ void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
Memory::Object_at(dst_slot) = value;
if (helper.IsTagged(static_cast<int>(src_slot - src_addr))) {
RecordMigratedSlot(value, dst_slot);
RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
}
src_slot += kPointerSize;
......@@ -3094,7 +3087,8 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
if (allocation.To(&target)) {
MigrateObject(target, object, object_size, old_space->identity());
MigrateObject(target, object, object_size, old_space->identity(),
&migration_slots_buffer_);
// If we end up needing more special cases, we should factor this out.
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
......@@ -3326,8 +3320,15 @@ void MarkCompactCollector::EvacuateNewSpace() {
}
void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer) {
base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
evacuation_slots_buffers_.Add(evacuation_slots_buffer);
}
bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
Page* p, PagedSpace* target_space) {
Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
AlwaysAllocateScope always_allocate(isolate());
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
......@@ -3352,7 +3353,9 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
if (!allocation.To(&target_object)) {
return false;
}
MigrateObject(target_object, object, size, target_space->identity());
MigrateObject(target_object, object, size, target_space->identity(),
evacuation_slots_buffer);
DCHECK(object->map_word().IsForwardingAddress());
}
......@@ -3396,7 +3399,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// Contribute in main thread. Counter and signal are in principal not needed.
concurrent_compaction_tasks_active_++;
EvacuatePages(compaction_spaces_for_tasks[0]);
EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
pending_compaction_tasks_semaphore_.Signal();
WaitUntilCompactionCompleted();
......@@ -3472,7 +3475,8 @@ void MarkCompactCollector::WaitUntilCompactionCompleted() {
void MarkCompactCollector::EvacuatePages(
CompactionSpaceCollection* compaction_spaces) {
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer) {
for (int i = 0; i < evacuation_candidates_.length(); i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
......@@ -3485,7 +3489,8 @@ void MarkCompactCollector::EvacuatePages(
DCHECK_EQ(p->parallel_compaction_state().Value(),
MemoryChunk::kCompactingInProgress);
if (EvacuateLiveObjectsFromPage(
p, compaction_spaces->Get(p->owner()->identity()))) {
p, compaction_spaces->Get(p->owner()->identity()),
evacuation_slots_buffer)) {
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingFinalize);
} else {
......@@ -3687,6 +3692,28 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuatePagesInParallel();
}
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
UpdateSlotsRecordedIn(migration_slots_buffer_);
if (FLAG_trace_fragmentation_verbose) {
PrintF(" migration slots buffer: %d\n",
SlotsBuffer::SizeOfChain(migration_slots_buffer_));
}
slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
DCHECK(migration_slots_buffer_ == NULL);
// TODO(hpayer): Process the slots buffers in parallel. This has to be done
// after evacuation of all pages finishes.
int buffers = evacuation_slots_buffers_.length();
for (int i = 0; i < buffers; i++) {
SlotsBuffer* buffer = evacuation_slots_buffers_[i];
UpdateSlotsRecordedIn(buffer);
slots_buffer_allocator_->DeallocateChain(&buffer);
}
evacuation_slots_buffers_.Rewind(0);
}
// Second pass: find pointers to new space and update them.
PointersUpdatingVisitor updating_visitor(heap());
......@@ -3718,16 +3745,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
}
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
UpdateSlotsRecordedIn(migration_slots_buffer_);
if (FLAG_trace_fragmentation_verbose) {
PrintF(" migration slots buffer: %d\n",
SlotsBuffer::SizeOfChain(migration_slots_buffer_));
}
}
int npages = evacuation_candidates_.length();
{
GCTracer::Scope gc_scope(
......@@ -3805,9 +3822,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
DCHECK(migration_slots_buffer_ == NULL);
// The hashing of weak_object_to_code_table is no longer valid.
heap()->weak_object_to_code_table()->Rehash(
heap()->isolate()->factory()->undefined_value());
......
......@@ -427,10 +427,13 @@ class MarkCompactCollector {
void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
void MigrateObject(HeapObject* dst, HeapObject* src, int size,
AllocationSpace to_old_space);
AllocationSpace to_old_space,
SlotsBuffer** evacuation_slots_buffer);
void MigrateObjectTagged(HeapObject* dst, HeapObject* src, int size);
void MigrateObjectMixed(HeapObject* dst, HeapObject* src, int size);
void MigrateObjectTagged(HeapObject* dst, HeapObject* src, int size,
SlotsBuffer** evacuation_slots_buffer);
void MigrateObjectMixed(HeapObject* dst, HeapObject* src, int size,
SlotsBuffer** evacuation_slots_buffer);
void MigrateObjectRaw(HeapObject* dst, HeapObject* src, int size);
bool TryPromoteObject(HeapObject* object, int object_size);
......@@ -566,8 +569,6 @@ class MarkCompactCollector {
SlotsBuffer* migration_slots_buffer_;
base::Mutex migration_slots_buffer_mutex_;
// Finishes GC, performs heap verification if enabled.
void Finish();
......@@ -716,9 +717,15 @@ class MarkCompactCollector {
void EvacuateNewSpace();
bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space);
bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space,
SlotsBuffer** evacuation_slots_buffer);
void AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer);
void EvacuatePages(CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer);
void EvacuatePages(CompactionSpaceCollection* compaction_spaces);
void EvacuatePagesInParallel();
int NumberOfParallelCompactionTasks() {
......@@ -746,13 +753,16 @@ class MarkCompactCollector {
void ParallelSweepSpaceComplete(PagedSpace* space);
// Updates store buffer and slot buffer for a pointer in a migrating object.
void RecordMigratedSlot(Object* value, Address slot);
void RecordMigratedSlot(Object* value, Address slot,
SlotsBuffer** evacuation_slots_buffer);
// Adds the code entry slot to the slots buffer.
void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot);
void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
SlotsBuffer** evacuation_slots_buffer);
// Adds the slot of a moved code object.
void RecordMigratedCodeObjectSlot(Address code_object);
void RecordMigratedCodeObjectSlot(Address code_object,
SlotsBuffer** evacuation_slots_buffer);
#ifdef DEBUG
friend class MarkObjectVisitor;
......@@ -771,6 +781,14 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
// The evacuation_slots_buffers_ are used by the compaction threads.
// When a compaction task finishes, it uses
// AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
// evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
// lock.
base::Mutex evacuation_slots_buffers_mutex_;
List<SlotsBuffer*> evacuation_slots_buffers_;
base::SmartPointer<FreeList> free_list_old_space_;
base::SmartPointer<FreeList> free_list_code_space_;
base::SmartPointer<FreeList> free_list_map_space_;
......
......@@ -16,15 +16,6 @@ bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
}
bool SlotsBuffer::AddToSynchronized(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address,
base::Mutex* buffer_mutex, SlotType type,
Address addr, AdditionMode mode) {
base::LockGuard<base::Mutex> lock_guard(buffer_mutex);
return AddTo(allocator, buffer_address, type, addr, mode);
}
bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type,
Address addr, AdditionMode mode) {
......
......@@ -122,14 +122,6 @@ class SlotsBuffer {
return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
}
INLINE(static bool AddToSynchronized(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address,
base::Mutex* buffer_mutex,
ObjectSlot slot, AdditionMode mode)) {
base::LockGuard<base::Mutex> lock_guard(buffer_mutex);
return AddTo(allocator, buffer_address, slot, mode);
}
INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, ObjectSlot slot,
AdditionMode mode)) {
......@@ -148,11 +140,6 @@ class SlotsBuffer {
static bool IsTypedSlot(ObjectSlot slot);
static bool AddToSynchronized(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address,
base::Mutex* buffer_mutex, SlotType type,
Address addr, AdditionMode mode);
static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type, Address addr,
AdditionMode mode);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment