Commit 01b8fc89 authored by ulan's avatar ulan Committed by Commit bot

Reland "Replace slots buffer with remembered set. (patchset #14 id:250001 of...

Reland "Replace slots buffer with remembered set. (patchset #14 id:250001 of https://codereview.chromium.org/1703823002/ )"

This reverts commit 9146bc5e.

This contains a fix for the following crash:
1. We record slots for a fixed array.
2. We trim the fixed array, so that some recorded slots are now in free space.
3. During mark-compact we sweep the page with the fixed array. Now free list items contain memory with recorded slots.
4. We evacuate a byte array using the new free list items.
5. We iterate slots that are now inside the byte array and crash.

BUG=chromium:589413,chromium:578883
LOG=NO

Review URL: https://codereview.chromium.org/1735523002

Cr-Commit-Position: refs/heads/master@{#34302}
parent e9495433
......@@ -1072,8 +1072,6 @@ source_set("v8_base") {
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
"src/heap/slot-set.h",
"src/heap/slots-buffer.cc",
"src/heap/slots-buffer.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
......
......@@ -5576,6 +5576,7 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
}
}
......@@ -5585,6 +5586,7 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
store_buffer()->MoveEntriesToRememberedSet();
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end);
RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end);
}
}
......
......@@ -26,11 +26,10 @@ void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
}
}
void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
Object* value) {
if (IsMarking() && value->IsHeapObject()) {
RecordWriteIntoCodeSlow(obj, rinfo, value);
RecordWriteIntoCodeSlow(host, rinfo, value);
}
}
......
......@@ -131,13 +131,11 @@ void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
}
}
void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
RelocInfo* rinfo,
void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
Object* value) {
if (BaseRecordWrite(obj, value)) {
// Object is not going to be rescanned. We need to record the slot.
heap_->mark_compact_collector()->RecordRelocSlot(rinfo, value);
if (BaseRecordWrite(host, value)) {
// Object is not going to be rescanned. We need to record the slot.
heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
}
}
......
......@@ -165,15 +165,13 @@ class IncrementalMarking {
// the incremental cycle (stays white).
INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
Object* value));
INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo,
Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
void RecordCodeTargetPatch(Address pc, HeapObject* value);
......
......@@ -6,7 +6,7 @@
#define V8_HEAP_MARK_COMPACT_INL_H_
#include "src/heap/mark-compact.h"
#include "src/heap/slots-buffer.h"
#include "src/heap/remembered-set.h"
#include "src/isolate.h"
namespace v8 {
......@@ -70,25 +70,12 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
target_page->slots_buffer_address(), slot,
SlotsBuffer::FAIL_ON_OVERFLOW)) {
EvictPopularEvacuationCandidate(target_page);
}
}
}
void MarkCompactCollector::ForceRecordSlot(HeapObject* object, Object** slot,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
CHECK(SlotsBuffer::AddTo(slots_buffer_allocator_,
target_page->slots_buffer_address(), slot,
SlotsBuffer::IGNORE_OVERFLOW));
DCHECK(Marking::IsBlackOrGrey(Marking::MarkBitFrom(object)));
RememberedSet<OLD_TO_OLD>::Insert(source_page,
reinterpret_cast<Address>(slot));
}
}
......
......@@ -21,7 +21,6 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/slots-buffer.h"
#include "src/heap/spaces-inl.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
......@@ -55,8 +54,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
marking_parity_(ODD_MARKING_PARITY),
was_marked_incrementally_(false),
evacuation_(false),
slots_buffer_allocator_(nullptr),
migration_slots_buffer_(nullptr),
heap_(heap),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
......@@ -249,7 +246,6 @@ void MarkCompactCollector::SetUp() {
free_list_map_space_.Reset(new FreeList(heap_->map_space()));
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
slots_buffer_allocator_ = new SlotsBufferAllocator();
if (FLAG_flush_code) {
code_flusher_ = new CodeFlusher(isolate());
......@@ -263,7 +259,6 @@ void MarkCompactCollector::SetUp() {
void MarkCompactCollector::TearDown() {
AbortCompaction();
delete marking_deque_memory_;
delete slots_buffer_allocator_;
delete code_flusher_;
}
......@@ -310,55 +305,26 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
return compacting_;
}
void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
}
// There is not need to filter the old to old set because
// it is completely cleared after the mark-compact GC.
// The slots that become invalid due to runtime transitions are
// cleared eagerly immediately after the transition.
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
for (Page* p : evacuation_candidates_) {
SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
}
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyValidStoreAndSlotsBufferEntries();
RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
}
#endif
}
#ifdef VERIFY_HEAP
static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
SlotsBuffer::VerifySlots(heap, p->slots_buffer());
}
}
void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
VerifyValidSlotsBufferEntries(heap(), heap()->map_space());
LargeObjectIterator it(heap()->lo_space());
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer());
}
}
#endif
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
......@@ -708,7 +674,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
// of a GC all evacuation candidates are cleared and their slot buffers are
// released.
CHECK(!p->IsEvacuationCandidate());
CHECK(p->slots_buffer() == nullptr);
CHECK_NULL(p->old_to_old_slots());
CHECK_NULL(p->typed_old_to_old_slots());
CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size);
pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
......@@ -814,8 +781,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
void MarkCompactCollector::AbortCompaction() {
if (compacting_) {
RememberedSet<OLD_TO_OLD>::ClearAll(heap());
for (Page* p : evacuation_candidates_) {
slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
......@@ -1231,9 +1198,11 @@ class MarkCompactMarkingVisitor
// was marked through the compilation cache before marker reached JSRegExp
// object.
FixedArray* data = FixedArray::cast(re->data());
Object** slot =
data->data_start() + JSRegExp::saved_code_index(is_one_byte);
heap->mark_compact_collector()->RecordSlot(data, slot, code);
if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) {
Object** slot =
data->data_start() + JSRegExp::saved_code_index(is_one_byte);
heap->mark_compact_collector()->RecordSlot(data, slot, code);
}
// Set a number in the 0-255 range to guarantee no smi overflow.
re->SetDataAt(JSRegExp::code_index(is_one_byte),
......@@ -1530,12 +1499,12 @@ class MarkCompactCollector::EvacuateVisitorBase
: public MarkCompactCollector::HeapObjectVisitor {
public:
EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer)
LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots)
: heap_(heap),
evacuation_slots_buffer_(evacuation_slots_buffer),
compaction_spaces_(compaction_spaces),
local_store_buffer_(local_store_buffer) {}
old_to_old_slots_(old_to_old_slots),
old_to_new_slots_(old_to_new_slots) {}
bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
HeapObject** target_object) {
......@@ -1545,7 +1514,7 @@ class MarkCompactCollector::EvacuateVisitorBase
if (allocation.To(target_object)) {
heap_->mark_compact_collector()->MigrateObject(
*target_object, object, size, target_space->identity(),
evacuation_slots_buffer_, local_store_buffer_);
old_to_old_slots_, old_to_new_slots_);
return true;
}
return false;
......@@ -1553,9 +1522,9 @@ class MarkCompactCollector::EvacuateVisitorBase
protected:
Heap* heap_;
SlotsBuffer** evacuation_slots_buffer_;
CompactionSpaceCollection* compaction_spaces_;
LocalStoreBuffer* local_store_buffer_;
LocalSlotsBuffer* old_to_old_slots_;
LocalSlotsBuffer* old_to_new_slots_;
};
......@@ -1567,11 +1536,11 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
explicit EvacuateNewSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer,
LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots,
HashMap* local_pretenuring_feedback)
: EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
local_store_buffer),
: EvacuateVisitorBase(heap, compaction_spaces, old_to_old_slots,
old_to_new_slots),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
space_to_allocate_(NEW_SPACE),
promoted_size_(0),
......@@ -1598,8 +1567,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
AllocationSpace space = AllocateTargetObject(object, &target);
heap_->mark_compact_collector()->MigrateObject(
HeapObject::cast(target), object, size, space,
(space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
(space == NEW_SPACE) ? nullptr : local_store_buffer_);
(space == NEW_SPACE) ? nullptr : old_to_old_slots_,
(space == NEW_SPACE) ? nullptr : old_to_new_slots_);
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
}
......@@ -1719,10 +1688,10 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
public:
EvacuateOldSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer)
: EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
local_store_buffer) {}
LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots)
: EvacuateVisitorBase(heap, compaction_spaces, old_to_old_slots,
old_to_new_slots) {}
bool Visit(HeapObject* object) override {
CompactionSpace* target_space = compaction_spaces_->Get(
......@@ -2179,7 +2148,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
ClearWeakCollections();
ClearInvalidStoreAndSlotsBufferEntries();
ClearInvalidRememberedSetSlots();
}
......@@ -2542,88 +2511,56 @@ void MarkCompactCollector::AbortTransitionArrays() {
}
void MarkCompactCollector::RecordMigratedSlot(
Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer) {
Object* value, Address slot, LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots) {
// When parallel compaction is in progress, store and slots buffer entries
// require synchronization.
if (heap_->InNewSpace(value)) {
if (compaction_in_progress_) {
local_store_buffer->Record(slot);
old_to_new_slots->Record(slot);
} else {
Page* page = Page::FromAddress(slot);
RememberedSet<OLD_TO_NEW>::Insert(page, slot);
}
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
reinterpret_cast<Object**>(slot),
SlotsBuffer::IGNORE_OVERFLOW);
}
}
void MarkCompactCollector::RecordMigratedCodeEntrySlot(
Address code_entry, Address code_entry_slot,
SlotsBuffer** evacuation_slots_buffer) {
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
old_to_old_slots->Record(slot);
}
}
void MarkCompactCollector::RecordMigratedCodeObjectSlot(
Address code_object, SlotsBuffer** evacuation_slots_buffer) {
SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
SlotsBuffer::IGNORE_OVERFLOW);
}
static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
if (RelocInfo::IsCodeTarget(rmode)) {
return SlotsBuffer::CODE_TARGET_SLOT;
return CODE_TARGET_SLOT;
} else if (RelocInfo::IsCell(rmode)) {
return SlotsBuffer::CELL_TARGET_SLOT;
return CELL_TARGET_SLOT;
} else if (RelocInfo::IsEmbeddedObject(rmode)) {
return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
return EMBEDDED_OBJECT_SLOT;
} else if (RelocInfo::IsDebugBreakSlot(rmode)) {
return SlotsBuffer::DEBUG_TARGET_SLOT;
return DEBUG_TARGET_SLOT;
}
UNREACHABLE();
return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
}
static inline SlotsBuffer::SlotType DecodeSlotType(
SlotsBuffer::ObjectSlot slot) {
return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
return NUMBER_OF_SLOT_TYPES;
}
void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
RelocInfo::Mode rmode = rinfo->rmode();
if (target_page->IsEvacuationCandidate() &&
(rinfo->host() == NULL ||
!ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
Address addr = rinfo->pc();
SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
SlotType slot_type = SlotTypeForRMode(rmode);
if (rinfo->IsInConstantPool()) {
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTarget(rmode)) {
slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
slot_type = CODE_ENTRY_SLOT;
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rmode));
slot_type = SlotsBuffer::OBJECT_SLOT;
slot_type = OBJECT_SLOT;
}
}
bool success = SlotsBuffer::AddTo(
slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type,
addr, SlotsBuffer::FAIL_ON_OVERFLOW);
if (!success) {
EvictPopularEvacuationCandidate(target_page);
}
RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr);
}
}
......@@ -2631,23 +2568,21 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
class RecordMigratedSlotVisitor final : public ObjectVisitor {
public:
RecordMigratedSlotVisitor(MarkCompactCollector* collector,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer)
LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots)
: collector_(collector),
evacuation_slots_buffer_(evacuation_slots_buffer),
local_store_buffer_(local_store_buffer) {}
old_to_old_slots_(old_to_old_slots),
old_to_new_slots_(old_to_new_slots) {}
V8_INLINE void VisitPointer(Object** p) override {
collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
evacuation_slots_buffer_,
local_store_buffer_);
old_to_old_slots_, old_to_new_slots_);
}
V8_INLINE void VisitPointers(Object** start, Object** end) override {
while (start < end) {
collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
evacuation_slots_buffer_,
local_store_buffer_);
old_to_old_slots_, old_to_new_slots_);
++start;
}
}
......@@ -2655,15 +2590,16 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
if (collector_->compacting_) {
Address code_entry = Memory::Address_at(code_entry_slot);
collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
evacuation_slots_buffer_);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
old_to_old_slots_->Record(CODE_ENTRY_SLOT, code_entry_slot);
}
}
}
private:
MarkCompactCollector* collector_;
SlotsBuffer** evacuation_slots_buffer_;
LocalStoreBuffer* local_store_buffer_;
LocalSlotsBuffer* old_to_old_slots_;
LocalSlotsBuffer* old_to_new_slots_;
};
......@@ -2683,31 +2619,28 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
// pointers to new space.
void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
int size, AllocationSpace dest,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer) {
LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
DCHECK(dest != LO_SPACE);
if (dest == OLD_SPACE) {
DCHECK_OBJECT_SIZE(size);
DCHECK(evacuation_slots_buffer != nullptr);
DCHECK(IsAligned(size, kPointerSize));
heap()->MoveBlock(dst->address(), src->address(), size);
RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
local_store_buffer);
RecordMigratedSlotVisitor visitor(this, old_to_old_slots, old_to_new_slots);
dst->IterateBody(&visitor);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
DCHECK(evacuation_slots_buffer != nullptr);
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
heap()->MoveBlock(dst_addr, src_addr, size);
RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
old_to_old_slots->Record(RELOCATED_CODE_OBJECT, dst_addr);
Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(evacuation_slots_buffer == nullptr);
DCHECK(old_to_old_slots == nullptr);
DCHECK(dest == NEW_SPACE);
heap()->MoveBlock(dst_addr, src_addr, size);
}
......@@ -2715,41 +2648,40 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
Memory::Address_at(src_addr) = dst_addr;
}
static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
SlotsBuffer::SlotType slot_type, Address addr) {
static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v,
SlotType slot_type, Address addr) {
switch (slot_type) {
case SlotsBuffer::CODE_TARGET_SLOT: {
case CODE_TARGET_SLOT: {
RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::CELL_TARGET_SLOT: {
case CELL_TARGET_SLOT: {
RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::CODE_ENTRY_SLOT: {
case CODE_ENTRY_SLOT: {
v->VisitCodeEntry(addr);
break;
}
case SlotsBuffer::RELOCATED_CODE_OBJECT: {
case RELOCATED_CODE_OBJECT: {
HeapObject* obj = HeapObject::FromAddress(addr);
Code::BodyDescriptor::IterateBody(obj, v);
break;
}
case SlotsBuffer::DEBUG_TARGET_SLOT: {
case DEBUG_TARGET_SLOT: {
RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
NULL);
if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
case EMBEDDED_OBJECT_SLOT: {
RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::OBJECT_SLOT: {
case OBJECT_SLOT: {
v->VisitPointer(reinterpret_cast<Object**>(addr));
break;
}
......@@ -2854,32 +2786,6 @@ class PointersUpdatingVisitor : public ObjectVisitor {
};
void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
PointersUpdatingVisitor v(heap_);
size_t buffer_size = buffer->Size();
for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) {
SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx);
if (!SlotsBuffer::IsTypedSlot(slot)) {
PointersUpdatingVisitor::UpdateSlot(heap_, slot);
} else {
++slot_idx;
DCHECK(slot_idx < buffer_size);
UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot),
reinterpret_cast<Address>(buffer->Get(slot_idx)));
}
}
}
void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
while (buffer != NULL) {
UpdateSlots(buffer);
buffer = buffer->next();
}
}
static void UpdatePointer(HeapObject** address, HeapObject* object) {
MapWord map_word = object->map_word();
// Since we only filter invalid slots in old space, the store buffer can
......@@ -3001,33 +2907,33 @@ bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
return false;
}
bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
Page* p = Page::FromAddress(slot);
// This function does not support large objects right now.
Space* owner = p->owner();
if (owner == heap_->lo_space() || owner == NULL) {
if (owner == heap_->lo_space() || owner == nullptr) {
Object* large_object = heap_->lo_space()->FindObject(slot);
// This object has to exist, otherwise we would not have recorded a slot
// for it.
CHECK(large_object->IsHeapObject());
HeapObject* large_heap_object = HeapObject::cast(large_object);
if (IsMarked(large_heap_object)) {
return true;
return large_heap_object;
}
return false;
return nullptr;
}
LiveObjectIterator<kBlackObjects> it(p);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
HeapObject* object = nullptr;
while ((object = it.Next()) != nullptr) {
int size = object->Size();
if (object->address() > slot) return false;
if (object->address() > slot) return nullptr;
if (object->address() <= slot && slot < (object->address() + size)) {
return true;
return object;
}
}
return false;
return nullptr;
}
......@@ -3046,18 +2952,6 @@ bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
}
void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
HeapObject* object) {
// The target object has to be black.
CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
// The target object is black but we don't know if the source slot is black.
// The source object could have died and the slot could be part of a free
// space. Use the mark bit iterator to find out about liveness of the slot.
CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
}
void MarkCompactCollector::EvacuateNewSpacePrologue() {
NewSpace* new_space = heap()->new_space();
NewSpacePageIterator it(new_space->bottom(), new_space->top());
......@@ -3074,12 +2968,6 @@ void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
}
void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer) {
base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
evacuation_slots_buffers_.Add(evacuation_slots_buffer);
}
class MarkCompactCollector::Evacuator : public Malloced {
public:
Evacuator(MarkCompactCollector* collector,
......@@ -3089,15 +2977,13 @@ class MarkCompactCollector::Evacuator : public Malloced {
evacuation_candidates_(evacuation_candidates),
newspace_evacuation_candidates_(newspace_evacuation_candidates),
compaction_spaces_(collector->heap()),
local_slots_buffer_(nullptr),
local_store_buffer_(collector->heap()),
local_pretenuring_feedback_(HashMap::PointersMatch,
kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_slots_buffer_, &local_store_buffer_,
&old_to_old_slots_, &old_to_new_slots_,
&local_pretenuring_feedback_),
old_space_visitor_(collector->heap(), &compaction_spaces_,
&local_slots_buffer_, &local_store_buffer_),
&old_to_old_slots_, &old_to_new_slots_),
duration_(0.0),
bytes_compacted_(0),
task_id_(0) {}
......@@ -3134,8 +3020,8 @@ class MarkCompactCollector::Evacuator : public Malloced {
// Locally cached collector data.
CompactionSpaceCollection compaction_spaces_;
SlotsBuffer* local_slots_buffer_;
LocalStoreBuffer local_store_buffer_;
LocalSlotsBuffer old_to_old_slots_;
LocalSlotsBuffer old_to_new_slots_;
HashMap local_pretenuring_feedback_;
// Vistors for the corresponding spaces.
......@@ -3213,8 +3099,22 @@ void MarkCompactCollector::Evacuator::Finalize() {
new_space_visitor_.promoted_size() +
new_space_visitor_.semispace_copied_size());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
local_store_buffer_.Process(heap()->store_buffer());
collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_);
// Move locally recorded slots to the global remembered sets.
old_to_new_slots_.Iterate(
[](Address slot) {
Page* page = Page::FromAddress(slot);
RememberedSet<OLD_TO_NEW>::Insert(page, slot);
},
[](SlotType type, Address slot) { UNREACHABLE(); });
old_to_old_slots_.Iterate(
[](Address slot) {
Page* page = Page::FromAddress(slot);
RememberedSet<OLD_TO_OLD>::Insert(page, slot);
},
[](SlotType type, Address slot) {
Page* page = Page::FromAddress(slot);
RememberedSet<OLD_TO_OLD>::InsertTyped(page, type, slot);
});
}
class MarkCompactCollector::CompactionTask : public CancelableTask {
......@@ -3521,8 +3421,10 @@ void MarkCompactCollector::InvalidateCode(Code* code) {
// Ignore all slots that might have been recorded in the body of the
// deoptimized code object. Assumption: no slots will be recorded for
// this object after invalidating it.
RemoveObjectSlots(code->instruction_start(),
code->address() + code->Size());
Page* page = Page::FromAddress(code->address());
Address start = code->instruction_start();
Address end = code->address() + code->Size();
RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
}
}
......@@ -3533,21 +3435,6 @@ bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
}
void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
Address end_slot) {
// Remove entries by replacing them with an old-space slot containing a smi
// that is located in an unmovable page.
for (Page* p : evacuation_candidates_) {
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
end_slot);
}
}
}
#ifdef VERIFY_HEAP
static void VerifyAllBlackObjects(MemoryChunk* page) {
LiveObjectIterator<kAllLiveObjects> it(page);
......@@ -3699,30 +3586,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
{
GCTracer::Scope gc_scope(
heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
UpdateSlotsRecordedIn(migration_slots_buffer_);
if (FLAG_trace_fragmentation_verbose) {
PrintF(" migration slots buffer: %d\n",
SlotsBuffer::SizeOfChain(migration_slots_buffer_));
}
slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
DCHECK(migration_slots_buffer_ == NULL);
// TODO(hpayer): Process the slots buffers in parallel. This has to be done
// after evacuation of all pages finishes.
int buffers = evacuation_slots_buffers_.length();
for (int i = 0; i < buffers; i++) {
SlotsBuffer* buffer = evacuation_slots_buffers_[i];
UpdateSlotsRecordedIn(buffer);
slots_buffer_allocator_->DeallocateChain(&buffer);
}
evacuation_slots_buffers_.Rewind(0);
}
// Second pass: find pointers to new space and update them.
PointersUpdatingVisitor updating_visitor(heap());
{
......@@ -3742,6 +3606,26 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer);
}
{
Heap* heap = this->heap();
GCTracer::Scope gc_scope(
heap->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
RememberedSet<OLD_TO_OLD>::Iterate(heap, [heap](Address slot) {
PointersUpdatingVisitor::UpdateSlot(heap,
reinterpret_cast<Object**>(slot));
return REMOVE_SLOT;
});
Isolate* isolate = heap->isolate();
PointersUpdatingVisitor* visitor = &updating_visitor;
RememberedSet<OLD_TO_OLD>::IterateTyped(
heap, [isolate, visitor](SlotType type, Address slot) {
UpdateTypedSlot(isolate, visitor, type, slot);
return REMOVE_SLOT;
});
}
{
GCTracer::Scope gc_scope(
heap()->tracer(),
......@@ -3751,13 +3635,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
UpdateSlotsRecordedIn(p->slots_buffer());
if (FLAG_trace_fragmentation_verbose) {
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
}
slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
// Important: skip list should be cleared only after roots were updated
// because root iteration traverses the stack and might have to find
// code objects from non-updated pc pointing into evacuation candidate.
......@@ -4019,41 +3896,13 @@ void MarkCompactCollector::Initialize() {
IncrementalMarking::Initialize();
}
void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
if (FLAG_trace_fragmentation) {
PrintF("Page %p is too popular. Disabling evacuation.\n",
reinterpret_cast<void*>(page));
}
isolate()->CountUsage(v8::Isolate::UseCounterFeature::kSlotsBufferOverflow);
// TODO(gc) If all evacuation candidates are too popular we
// should stop slots recording entirely.
page->ClearEvacuationCandidate();
DCHECK(!page->IsFlagSet(Page::POPULAR_PAGE));
page->SetFlag(Page::POPULAR_PAGE);
// We were not collecting slots on this page that point
// to other evacuation candidates thus we have to
// rescan the page after evacuation to discover and update all
// pointers to evacuated objects.
page->SetFlag(Page::RESCAN_ON_EVACUATION);
}
void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
Code* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
target_page->slots_buffer_address(),
SlotsBuffer::CODE_ENTRY_SLOT, slot,
SlotsBuffer::FAIL_ON_OVERFLOW)) {
EvictPopularEvacuationCandidate(target_page);
}
!ShouldSkipEvacuationSlotRecording(host)) {
RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot);
}
}
......@@ -4067,7 +3916,7 @@ void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
MarkBit mark_bit = Marking::MarkBitFrom(host);
if (Marking::IsBlack(mark_bit)) {
RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
RecordRelocSlot(&rinfo, target);
RecordRelocSlot(host, &rinfo, target);
}
}
}
......
......@@ -25,9 +25,7 @@ class CodeFlusher;
class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
class SlotsBuffer;
class SlotsBufferAllocator;
class LocalSlotsBuffer;
class Marking : public AllStatic {
public:
......@@ -395,8 +393,8 @@ class MarkCompactCollector {
->IsEvacuationCandidate();
}
void RecordRelocSlot(RelocInfo* rinfo, Object* target);
void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
......@@ -407,8 +405,8 @@ class MarkCompactCollector {
void MigrateObject(HeapObject* dst, HeapObject* src, int size,
AllocationSpace to_old_space,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer);
LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots);
void InvalidateCode(Code* code);
......@@ -484,9 +482,8 @@ class MarkCompactCollector {
// whole transitive closure is known. They must be called before sweeping
// when mark bits are still intact.
bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
bool IsSlotInBlackObjectSlow(Page* p, Address slot);
HeapObject* FindBlackObjectBySlotSlow(Address slot);
bool IsSlotInLiveObject(Address slot);
void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
// Removes all the slots in the slot buffers that are within the given
// address range.
......@@ -520,8 +517,7 @@ class MarkCompactCollector {
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
void EvictPopularEvacuationCandidate(Page* page);
void ClearInvalidStoreAndSlotsBufferEntries();
void ClearInvalidRememberedSetSlots();
void StartSweeperThreads();
......@@ -550,10 +546,6 @@ class MarkCompactCollector {
bool evacuation_;
SlotsBufferAllocator* slots_buffer_allocator_;
SlotsBuffer* migration_slots_buffer_;
// Finishes GC, performs heap verification if enabled.
void Finish();
......@@ -707,9 +699,6 @@ class MarkCompactCollector {
void EvacuateNewSpacePrologue();
void EvacuateNewSpaceEpilogue();
void AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer);
void EvacuatePagesInParallel();
// The number of parallel compaction tasks, including the main thread.
......@@ -745,16 +734,8 @@ class MarkCompactCollector {
// Updates store buffer and slot buffer for a pointer in a migrating object.
void RecordMigratedSlot(Object* value, Address slot,
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer);
// Adds the code entry slot to the slots buffer.
void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
SlotsBuffer** evacuation_slots_buffer);
// Adds the slot of a moved code object.
void RecordMigratedCodeObjectSlot(Address code_object,
SlotsBuffer** evacuation_slots_buffer);
LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots);
#ifdef DEBUG
friend class MarkObjectVisitor;
......@@ -774,14 +755,6 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_;
// The evacuation_slots_buffers_ are used by the compaction threads.
// When a compaction task finishes, it uses
// AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
// evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
// lock.
base::Mutex evacuation_slots_buffers_mutex_;
List<SlotsBuffer*> evacuation_slots_buffers_;
base::SmartPointer<FreeList> free_list_old_space_;
base::SmartPointer<FreeList> free_list_code_space_;
base::SmartPointer<FreeList> free_list_map_space_;
......
......@@ -220,11 +220,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
Heap* heap, RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
// TODO(ulan): It could be better to record slots only for strongly embedded
// objects here and record slots for weakly embedded object during clearing
// of non-live references in mark-compact.
if (!rinfo->host()->IsWeakObject(object)) {
if (!host->IsWeakObject(object)) {
StaticVisitor::MarkObject(heap, object);
}
}
......@@ -235,8 +236,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCell(Heap* heap,
RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::CELL);
Cell* cell = rinfo->target_cell();
heap->mark_compact_collector()->RecordRelocSlot(rinfo, cell);
if (!rinfo->host()->IsWeakObject(cell)) {
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, cell);
if (!host->IsWeakObject(cell)) {
StaticVisitor::MarkObject(heap, cell);
}
}
......@@ -248,7 +250,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap,
DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence());
Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
......@@ -268,7 +271,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
......@@ -279,7 +283,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Code* target = rinfo->code_age_stub();
DCHECK(target != NULL);
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
......
......@@ -24,8 +24,7 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
if (slots != nullptr) {
slots->Iterate([heap](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, slot) ? SlotSet::KEEP_SLOT
: SlotSet::REMOVE_SLOT;
return IsValidSlot(heap, slot) ? KEEP_SLOT : REMOVE_SLOT;
});
}
}
......@@ -33,17 +32,24 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
template <PointerDirection direction>
void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Iterate(heap, [heap](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
Object* object = *slot;
if (Page::FromAddress(addr)->owner() != nullptr &&
Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
CHECK(IsValidSlot(heap, slot));
heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
reinterpret_cast<Address>(slot), HeapObject::cast(object));
HeapObject* obj =
heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr);
if (obj == nullptr) {
// The slot is in dead object.
MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr);
AllocationSpace owner = chunk->owner()->identity();
// The old to old remembered set should not have dead slots.
CHECK_NE(direction, OLD_TO_OLD);
// The old to new remembered set is allowed to have slots in dead
// objects only in map and large object space because these spaces cannot
// have raw untaged pointers.
CHECK(owner == MAP_SPACE || owner == LO_SPACE);
} else {
int offset = static_cast<int>(addr - obj->address());
CHECK(obj->IsValidSlot(offset));
}
return SlotSet::KEEP_SLOT;
return KEEP_SLOT;
});
}
......@@ -64,6 +70,7 @@ bool RememberedSet<direction>::IsValidSlot(Heap* heap, Object** slot) {
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap);
} // namespace internal
} // namespace v8
......@@ -56,10 +56,12 @@ class RememberedSet {
}
// Iterates and filters the remembered set with the given callback.
// The callback should take (Address slot) and return SlotSet::CallbackResult.
// The callback should take (Address slot) and return SlotCallbackResult.
template <typename Callback>
static void Iterate(Heap* heap, Callback callback) {
PointerChunkIterator it(heap);
MemoryChunkIterator it(heap, direction == OLD_TO_OLD
? MemoryChunkIterator::ALL
: MemoryChunkIterator::ALL_BUT_CODE_SPACE);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = GetSlotSet(chunk);
......@@ -89,6 +91,60 @@ class RememberedSet {
});
}
// Given a page and a typed slot in that page, this function adds the slot
// to the remembered set.
static void InsertTyped(Page* page, SlotType slot_type, Address slot_addr) {
STATIC_ASSERT(direction == OLD_TO_OLD);
TypedSlotSet* slot_set = page->typed_old_to_old_slots();
if (slot_set == nullptr) {
page->AllocateTypedOldToOldSlots();
slot_set = page->typed_old_to_old_slots();
}
uintptr_t offset = slot_addr - page->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
slot_set->Insert(slot_type, static_cast<uint32_t>(offset));
}
// Given a page and a range of typed slots in that page, this function removes
// the slots from the remembered set.
static void RemoveRangeTyped(Page* page, Address start, Address end) {
TypedSlotSet* slots = page->typed_old_to_old_slots();
if (slots != nullptr) {
slots->Iterate([start, end](SlotType slot_type, Address slot_addr) {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
});
}
}
// Iterates and filters typed old to old pointers with the given callback.
// The callback should take (SlotType slot_type, Address slot_addr) and
// return SlotCallbackResult.
template <typename Callback>
static void IterateTyped(Heap* heap, Callback callback) {
MemoryChunkIterator it(heap, MemoryChunkIterator::ALL_BUT_MAP_SPACE);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
TypedSlotSet* slots = chunk->typed_old_to_old_slots();
if (slots != nullptr) {
int new_count = slots->Iterate(callback);
if (new_count == 0) {
chunk->ReleaseTypedOldToOldSlots();
}
}
}
}
// Clear all old to old slots from the remembered set.
static void ClearAll(Heap* heap) {
STATIC_ASSERT(direction == OLD_TO_OLD);
MemoryChunkIterator it(heap, MemoryChunkIterator::ALL);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseOldToOldSlots();
chunk->ReleaseTypedOldToOldSlots();
}
}
// Eliminates all stale slots from the remembered set, i.e.
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
......@@ -125,8 +181,8 @@ class RememberedSet {
}
template <typename Callback>
static SlotSet::CallbackResult Wrapper(Heap* heap, Address slot_address,
Callback slot_callback) {
static SlotCallbackResult Wrapper(Heap* heap, Address slot_address,
Callback slot_callback) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* object = *slot;
......@@ -140,17 +196,97 @@ class RememberedSet {
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
if (heap->InToSpace(object)) {
return SlotSet::KEEP_SLOT;
return KEEP_SLOT;
}
} else {
DCHECK(!heap->InNewSpace(object));
}
return SlotSet::REMOVE_SLOT;
return REMOVE_SLOT;
}
static bool IsValidSlot(Heap* heap, Object** slot);
};
// Buffer for keeping thead local migration slots during compaction.
// TODO(ulan): Remove this once every thread gets local pages in compaction
// space.
class LocalSlotsBuffer BASE_EMBEDDED {
public:
LocalSlotsBuffer() : top_(new Node(nullptr)) {}
~LocalSlotsBuffer() {
Node* current = top_;
while (current != nullptr) {
Node* tmp = current->next;
delete current;
current = tmp;
}
}
void Record(Address addr) {
EnsureSpaceFor(1);
uintptr_t entry = reinterpret_cast<uintptr_t>(addr);
DCHECK_GE(entry, static_cast<uintptr_t>(NUMBER_OF_SLOT_TYPES));
Insert(entry);
}
void Record(SlotType type, Address addr) {
EnsureSpaceFor(2);
Insert(static_cast<uintptr_t>(type));
uintptr_t entry = reinterpret_cast<uintptr_t>(addr);
DCHECK_GE(entry, static_cast<uintptr_t>(NUMBER_OF_SLOT_TYPES));
Insert(entry);
}
template <typename UntypedCallback, typename TypedCallback>
void Iterate(UntypedCallback untyped_callback, TypedCallback typed_callback) {
Node* current = top_;
bool typed = false;
SlotType type;
Address addr;
while (current != nullptr) {
for (int i = 0; i < current->count; i++) {
uintptr_t entry = current->buffer[i];
if (entry < NUMBER_OF_SLOT_TYPES) {
DCHECK(!typed);
typed = true;
type = static_cast<SlotType>(entry);
} else {
addr = reinterpret_cast<Address>(entry);
if (typed) {
typed_callback(type, addr);
typed = false;
} else {
untyped_callback(addr);
}
}
}
current = current->next;
}
}
private:
void EnsureSpaceFor(int count) {
if (top_->remaining_free_slots() < count) top_ = new Node(top_);
}
void Insert(uintptr_t entry) { top_->buffer[top_->count++] = entry; }
static const int kBufferSize = 16 * KB;
struct Node : Malloced {
explicit Node(Node* next_node) : next(next_node), count(0) {}
inline int remaining_free_slots() { return kBufferSize - count; }
Node* next;
uintptr_t buffer[kBufferSize];
int count;
};
Node* top_;
};
} // namespace internal
} // namespace v8
......
......@@ -7,10 +7,13 @@
#include "src/allocation.h"
#include "src/base/bits.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Data structure for maintaining a set of slots in a standard (non-large)
// page. The base address of the page must be set with SetPageStart before any
// operation.
......@@ -19,8 +22,6 @@ namespace internal {
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
enum CallbackResult { KEEP_SLOT, REMOVE_SLOT };
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
bucket[i] = nullptr;
......@@ -213,6 +214,124 @@ class SlotSet : public Malloced {
Address page_start_;
};
enum SlotType {
EMBEDDED_OBJECT_SLOT,
OBJECT_SLOT,
RELOCATED_CODE_OBJECT,
CELL_TARGET_SLOT,
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
DEBUG_TARGET_SLOT,
NUMBER_OF_SLOT_TYPES
};
// Data structure for maintaining a multiset of typed slots in a page.
// Typed slots can only appear in Code and JSFunction objects, so
// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
// The implementation is a chain of chunks, where each chunks is an array of
// encoded (slot type, slot offset) pairs.
// There is no duplicate detection and we do not expect many duplicates because
// typed slots contain V8 internal pointers that are not directly exposed to JS.
class TypedSlotSet {
public:
typedef uint32_t TypedSlot;
static const int kMaxOffset = 1 << 29;
explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
chunk_ = new Chunk(nullptr, kInitialBufferSize);
}
~TypedSlotSet() {
Chunk* chunk = chunk_;
while (chunk != nullptr) {
Chunk* next = chunk->next;
delete chunk;
chunk = next;
}
}
// The slot offset specifies a slot at address page_start_ + offset.
void Insert(SlotType type, int offset) {
TypedSlot slot = ToTypedSlot(type, offset);
if (!chunk_->AddSlot(slot)) {
chunk_ = new Chunk(chunk_, NextCapacity(chunk_->capacity));
bool added = chunk_->AddSlot(slot);
DCHECK(added);
USE(added);
}
}
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
// Returns the new number of slots.
//
// Sample usage:
// Iterate([](SlotType slot_type, Address slot_address) {
// if (good(slot_type, slot_address)) return KEEP_SLOT;
// else return REMOVE_SLOT;
// });
template <typename Callback>
int Iterate(Callback callback) {
STATIC_ASSERT(NUMBER_OF_SLOT_TYPES < 8);
const TypedSlot kRemovedSlot = TypeField::encode(NUMBER_OF_SLOT_TYPES);
Chunk* chunk = chunk_;
int new_count = 0;
while (chunk != nullptr) {
TypedSlot* buffer = chunk->buffer;
int count = chunk->count;
for (int i = 0; i < count; i++) {
TypedSlot slot = buffer[i];
if (slot != kRemovedSlot) {
SlotType type = TypeField::decode(slot);
Address addr = page_start_ + OffsetField::decode(slot);
if (callback(type, addr) == KEEP_SLOT) {
new_count++;
} else {
buffer[i] = kRemovedSlot;
}
}
}
chunk = chunk->next;
}
return new_count;
}
private:
static const int kInitialBufferSize = 100;
static const int kMaxBufferSize = 16 * KB;
static int NextCapacity(int capacity) {
return Min(kMaxBufferSize, capacity * 2);
}
static TypedSlot ToTypedSlot(SlotType type, int offset) {
return TypeField::encode(type) | OffsetField::encode(offset);
}
class OffsetField : public BitField<int, 0, 29> {};
class TypeField : public BitField<SlotType, 29, 3> {};
struct Chunk : Malloced {
explicit Chunk(Chunk* next_chunk, int capacity)
: next(next_chunk), count(0), capacity(capacity) {
buffer = NewArray<TypedSlot>(capacity);
}
bool AddSlot(TypedSlot slot) {
if (count == capacity) return false;
buffer[count++] = slot;
return true;
}
~Chunk() { DeleteArray(buffer); }
Chunk* next;
int count;
int capacity;
TypedSlot* buffer;
};
Address page_start_;
Chunk* chunk_;
};
} // namespace internal
} // namespace v8
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/slots-buffer.h"
#include "src/assembler.h"
#include "src/heap/heap.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
}
bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type,
Address addr, AdditionMode mode) {
SlotsBuffer* buffer = *buffer_address;
if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
allocator->DeallocateChain(buffer_address);
return false;
}
buffer = allocator->AllocateBuffer(buffer);
*buffer_address = buffer;
}
DCHECK(buffer->HasSpaceForTypedSlot());
buffer->Add(reinterpret_cast<ObjectSlot>(type));
buffer->Add(reinterpret_cast<ObjectSlot>(addr));
return true;
}
void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
// Remove entries by replacing them with an old-space slot containing a smi
// that is located in an unmovable page.
const ObjectSlot kRemovedEntry = HeapObject::RawField(
heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
->NeverEvacuate());
while (buffer != NULL) {
SlotsBuffer::ObjectSlot* slots = buffer->slots_;
intptr_t slots_count = buffer->idx_;
for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
ObjectSlot slot = slots[slot_idx];
if (!IsTypedSlot(slot)) {
Object* object = *slot;
// Slots are invalid when they currently:
// - do not point to a heap object (SMI)
// - point to a heap object in new space
// - are not within a live heap object on a valid pointer slot
// - point to a heap object not on an evacuation candidate
// TODO(mlippautz): Move InNewSpace check above IsSlotInLiveObject once
// we filter out unboxed double slots eagerly.
if (!object->IsHeapObject() ||
!heap->mark_compact_collector()->IsSlotInLiveObject(
reinterpret_cast<Address>(slot)) ||
heap->InNewSpace(object) ||
!Page::FromAddress(reinterpret_cast<Address>(object))
->IsEvacuationCandidate()) {
// TODO(hpayer): Instead of replacing slots with kRemovedEntry we
// could shrink the slots buffer in-place.
slots[slot_idx] = kRemovedEntry;
}
} else {
++slot_idx;
DCHECK(slot_idx < slots_count);
}
}
buffer = buffer->next();
}
}
void SlotsBuffer::RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
Address start_slot, Address end_slot) {
// Remove entries by replacing them with an old-space slot containing a smi
// that is located in an unmovable page.
const ObjectSlot kRemovedEntry = HeapObject::RawField(
heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
->NeverEvacuate());
while (buffer != NULL) {
SlotsBuffer::ObjectSlot* slots = buffer->slots_;
intptr_t slots_count = buffer->idx_;
bool is_typed_slot = false;
for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
ObjectSlot slot = slots[slot_idx];
if (!IsTypedSlot(slot)) {
Address slot_address = reinterpret_cast<Address>(slot);
if (slot_address >= start_slot && slot_address < end_slot) {
// TODO(hpayer): Instead of replacing slots with kRemovedEntry we
// could shrink the slots buffer in-place.
slots[slot_idx] = kRemovedEntry;
if (is_typed_slot) {
slots[slot_idx - 1] = kRemovedEntry;
}
}
is_typed_slot = false;
} else {
is_typed_slot = true;
DCHECK(slot_idx < slots_count);
}
}
buffer = buffer->next();
}
}
void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
while (buffer != NULL) {
SlotsBuffer::ObjectSlot* slots = buffer->slots_;
intptr_t slots_count = buffer->idx_;
for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
ObjectSlot slot = slots[slot_idx];
if (!IsTypedSlot(slot)) {
Object* object = *slot;
if (object->IsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(object);
CHECK(!heap->InNewSpace(object));
heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
reinterpret_cast<Address>(slot), heap_object);
}
} else {
++slot_idx;
DCHECK(slot_idx < slots_count);
}
}
buffer = buffer->next();
}
}
SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
return new SlotsBuffer(next_buffer);
}
void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
delete buffer;
}
void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
SlotsBuffer* buffer = *buffer_address;
while (buffer != NULL) {
SlotsBuffer* next_buffer = buffer->next();
DeallocateBuffer(buffer);
buffer = next_buffer;
}
*buffer_address = NULL;
}
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_SLOTS_BUFFER_H_
#define V8_HEAP_SLOTS_BUFFER_H_
#include "src/objects.h"
namespace v8 {
namespace internal {
// Forward declarations.
class SlotsBuffer;
// SlotsBufferAllocator manages the allocation and deallocation of slots buffer
// chunks and links them together. Slots buffer chunks are always created by the
// SlotsBufferAllocator.
class SlotsBufferAllocator {
public:
SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
void DeallocateBuffer(SlotsBuffer* buffer);
void DeallocateChain(SlotsBuffer** buffer_address);
};
// SlotsBuffer records a sequence of slots that has to be updated
// after live objects were relocated from evacuation candidates.
// All slots are either untyped or typed:
// - Untyped slots are expected to contain a tagged object pointer.
// They are recorded by an address.
// - Typed slots are expected to contain an encoded pointer to a heap
// object where the way of encoding depends on the type of the slot.
// They are recorded as a pair (SlotType, slot address).
// We assume that zero-page is never mapped this allows us to distinguish
// untyped slots from typed slots during iteration by a simple comparison:
// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
// is the first element of typed slot's pair.
class SlotsBuffer {
public:
typedef Object** ObjectSlot;
explicit SlotsBuffer(SlotsBuffer* next_buffer)
: idx_(0), chain_length_(1), next_(next_buffer) {
if (next_ != NULL) {
chain_length_ = next_->chain_length_ + 1;
}
}
~SlotsBuffer() {}
void Add(ObjectSlot slot) {
DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
#ifdef DEBUG
if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
DCHECK_NOT_NULL(*slot);
}
#endif
slots_[idx_++] = slot;
}
ObjectSlot Get(intptr_t i) {
DCHECK(i >= 0 && i < kNumberOfElements);
return slots_[i];
}
size_t Size() {
DCHECK(idx_ <= kNumberOfElements);
return idx_;
}
enum SlotType {
EMBEDDED_OBJECT_SLOT,
OBJECT_SLOT,
RELOCATED_CODE_OBJECT,
CELL_TARGET_SLOT,
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
DEBUG_TARGET_SLOT,
NUMBER_OF_SLOT_TYPES
};
static const char* SlotTypeToString(SlotType type) {
switch (type) {
case EMBEDDED_OBJECT_SLOT:
return "EMBEDDED_OBJECT_SLOT";
case OBJECT_SLOT:
return "OBJECT_SLOT";
case RELOCATED_CODE_OBJECT:
return "RELOCATED_CODE_OBJECT";
case CELL_TARGET_SLOT:
return "CELL_TARGET_SLOT";
case CODE_TARGET_SLOT:
return "CODE_TARGET_SLOT";
case CODE_ENTRY_SLOT:
return "CODE_ENTRY_SLOT";
case DEBUG_TARGET_SLOT:
return "DEBUG_TARGET_SLOT";
case NUMBER_OF_SLOT_TYPES:
return "NUMBER_OF_SLOT_TYPES";
}
return "UNKNOWN SlotType";
}
SlotsBuffer* next() { return next_; }
static int SizeOfChain(SlotsBuffer* buffer) {
if (buffer == NULL) return 0;
return static_cast<int>(buffer->idx_ +
(buffer->chain_length_ - 1) * kNumberOfElements);
}
inline bool IsFull() { return idx_ == kNumberOfElements; }
inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
}
INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, ObjectSlot slot,
AdditionMode mode)) {
SlotsBuffer* buffer = *buffer_address;
if (buffer == NULL || buffer->IsFull()) {
if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
allocator->DeallocateChain(buffer_address);
return false;
}
buffer = allocator->AllocateBuffer(buffer);
*buffer_address = buffer;
}
buffer->Add(slot);
return true;
}
static bool IsTypedSlot(ObjectSlot slot);
static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type, Address addr,
AdditionMode mode);
// Eliminates all stale entries from the slots buffer, i.e., slots that
// are not part of live objects anymore. This method must be called after
// marking, when the whole transitive closure is known and must be called
// before sweeping when mark bits are still intact.
static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
// Eliminate all slots that are within the given address range.
static void RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
Address start_slot, Address end_slot);
// Ensures that there are no invalid slots in the chain of slots buffers.
static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
static const int kNumberOfElements = 1021;
private:
static const int kChainLengthThreshold = 15;
intptr_t idx_;
intptr_t chain_length_;
SlotsBuffer* next_;
ObjectSlot slots_[kNumberOfElements];
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_SLOTS_BUFFER_H_
......@@ -147,6 +147,19 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
return NULL;
}
// -----------------------------------------------------------------------------
// LargePageIterator
LargePageIterator::LargePageIterator(LargeObjectSpace* space)
: next_page_(space->first_page()) {}
LargePage* LargePageIterator::next() {
LargePage* result = next_page_;
if (next_page_ != nullptr) {
next_page_ = next_page_->next_page();
}
return result;
}
// -----------------------------------------------------------------------------
// MemoryAllocator
......@@ -308,15 +321,15 @@ Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
}
PointerChunkIterator::PointerChunkIterator(Heap* heap)
MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
: state_(kOldSpaceState),
mode_(mode),
old_iterator_(heap->old_space()),
code_iterator_(heap->code_space()),
map_iterator_(heap->map_space()),
lo_iterator_(heap->lo_space()) {}
MemoryChunk* PointerChunkIterator::next() {
MemoryChunk* MemoryChunkIterator::next() {
switch (state_) {
case kOldSpaceState: {
if (old_iterator_.has_next()) {
......@@ -326,33 +339,34 @@ MemoryChunk* PointerChunkIterator::next() {
// Fall through.
}
case kMapState: {
if (map_iterator_.has_next()) {
if (mode_ != ALL_BUT_MAP_SPACE && map_iterator_.has_next()) {
return map_iterator_.next();
}
state_ = kCodeState;
// Fall through.
}
case kCodeState: {
if (mode_ != ALL_BUT_CODE_SPACE && code_iterator_.has_next()) {
return code_iterator_.next();
}
state_ = kLargeObjectState;
// Fall through.
}
case kLargeObjectState: {
HeapObject* heap_object;
do {
heap_object = lo_iterator_.Next();
if (heap_object == NULL) {
state_ = kFinishedState;
return NULL;
}
// Fixed arrays are the only pointer-containing objects in large
// object space.
} while (!heap_object->IsFixedArray());
MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
return answer;
MemoryChunk* answer = lo_iterator_.next();
if (answer != nullptr) {
return answer;
}
state_ = kFinishedState;
// Fall through;
}
case kFinishedState:
return NULL;
return nullptr;
default:
break;
}
UNREACHABLE();
return NULL;
return nullptr;
}
......
......@@ -8,7 +8,6 @@
#include "src/base/platform/platform.h"
#include "src/full-codegen/full-codegen.h"
#include "src/heap/slot-set.h"
#include "src/heap/slots-buffer.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
#include "src/snapshot/snapshot.h"
......@@ -478,9 +477,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = 0;
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
chunk->slots_buffer_ = nullptr;
chunk->old_to_new_slots_ = nullptr;
chunk->old_to_old_slots_ = nullptr;
chunk->typed_old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
......@@ -732,6 +731,10 @@ LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
MemoryChunk* chunk =
AllocateChunk(object_size, object_size, executable, owner);
if (chunk == NULL) return NULL;
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
return LargePage::Initialize(isolate_->heap(), chunk);
}
......@@ -932,8 +935,6 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// MemoryChunk implementation
void MemoryChunk::ReleaseAllocatedMemory() {
delete slots_buffer_;
slots_buffer_ = nullptr;
delete skip_list_;
skip_list_ = nullptr;
delete mutex_;
......@@ -972,6 +973,15 @@ void MemoryChunk::ReleaseOldToOldSlots() {
old_to_old_slots_ = nullptr;
}
void MemoryChunk::AllocateTypedOldToOldSlots() {
DCHECK(nullptr == typed_old_to_old_slots_);
typed_old_to_old_slots_ = new TypedSlotSet(address());
}
void MemoryChunk::ReleaseTypedOldToOldSlots() {
delete typed_old_to_old_slots_;
typed_old_to_old_slots_ = nullptr;
}
// -----------------------------------------------------------------------------
// PagedSpace implementation
......
......@@ -32,6 +32,7 @@ class SemiSpace;
class SkipList;
class SlotsBuffer;
class SlotSet;
class TypedSlotSet;
class Space;
// -----------------------------------------------------------------------------
......@@ -392,14 +393,14 @@ class MemoryChunk {
+ kPointerSize // Heap* heap_
+ kIntSize; // int progress_bar_
static const size_t kSlotsBufferOffset =
static const size_t kOldToNewSlotsOffset =
kLiveBytesOffset + kIntSize; // int live_byte_count_
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
+ kPointerSize // SlotSet* old_to_new_slots_;
+ kPointerSize // SlotSet* old_to_old_slots_;
+ kPointerSize; // SkipList* skip_list_;
kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_;
+ kPointerSize // SlotSet* old_to_old_slots_;
+ kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
+ kPointerSize; // SkipList* skip_list_;
static const size_t kMinHeaderSize =
kWriteBarrierCounterOffset +
......@@ -509,17 +510,18 @@ class MemoryChunk {
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
inline TypedSlotSet* typed_old_to_old_slots() {
return typed_old_to_old_slots_;
}
void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
void AllocateOldToOldSlots();
void ReleaseOldToOldSlots();
void AllocateTypedOldToOldSlots();
void ReleaseTypedOldToOldSlots();
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
......@@ -593,12 +595,14 @@ class MemoryChunk {
void MarkEvacuationCandidate() {
DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK_NULL(slots_buffer_);
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
SetFlag(EVACUATION_CANDIDATE);
}
void ClearEvacuationCandidate() {
DCHECK(slots_buffer_ == NULL);
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
ClearFlag(EVACUATION_CANDIDATE);
}
......@@ -683,13 +687,12 @@ class MemoryChunk {
// Count of bytes marked black on page.
int live_byte_count_;
SlotsBuffer* slots_buffer_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* old_to_new_slots_;
SlotSet* old_to_old_slots_;
TypedSlotSet* typed_old_to_old_slots_;
SkipList* skip_list_;
......@@ -862,6 +865,12 @@ class LargePage : public MemoryChunk {
inline void set_next_page(LargePage* page) { set_next_chunk(page); }
// A limit to guarantee that we do not overflow typed slot offset in
// the old to old remembered set.
// Note that this limit is higher than what assembler already imposes on
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
private:
static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
......@@ -977,8 +986,8 @@ class MemoryChunkValidator {
STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
offsetof(MemoryChunk, live_byte_count_));
STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
offsetof(MemoryChunk, slots_buffer_));
STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
offsetof(MemoryChunk, old_to_new_slots_));
STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
offsetof(MemoryChunk, write_barrier_counter_));
......@@ -2988,25 +2997,42 @@ class LargeObjectIterator : public ObjectIterator {
LargePage* current_;
};
class LargePageIterator BASE_EMBEDDED {
public:
explicit inline LargePageIterator(LargeObjectSpace* space);
inline LargePage* next();
private:
LargePage* next_page_;
};
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space.
class PointerChunkIterator BASE_EMBEDDED {
// pointers to new space or to evacuation candidates.
class MemoryChunkIterator BASE_EMBEDDED {
public:
inline explicit PointerChunkIterator(Heap* heap);
enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
// Return NULL when the iterator is done.
inline MemoryChunk* next();
private:
enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
enum State {
kOldSpaceState,
kMapState,
kCodeState,
kLargeObjectState,
kFinishedState
};
State state_;
const Mode mode_;
PageIterator old_iterator_;
PageIterator code_iterator_;
PageIterator map_iterator_;
LargeObjectIterator lo_iterator_;
LargePageIterator lo_iterator_;
};
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
......
......@@ -13,22 +13,6 @@
namespace v8 {
namespace internal {
void LocalStoreBuffer::Record(Address addr) {
if (top_->is_full()) top_ = new Node(top_);
top_->buffer[top_->count++] = addr;
}
void LocalStoreBuffer::Process(StoreBuffer* store_buffer) {
Node* current = top_;
while (current != nullptr) {
for (int i = 0; i < current->count; i++) {
Address slot = current->buffer[i];
Page* page = Page::FromAnyPointerAddress(heap_, slot);
RememberedSet<OLD_TO_NEW>::Insert(page, slot);
}
current = current->next;
}
}
} // namespace internal
} // namespace v8
......
......@@ -40,41 +40,6 @@ class StoreBuffer {
base::VirtualMemory* virtual_memory_;
};
class LocalStoreBuffer BASE_EMBEDDED {
public:
explicit LocalStoreBuffer(Heap* heap)
: top_(new Node(nullptr)), heap_(heap) {}
~LocalStoreBuffer() {
Node* current = top_;
while (current != nullptr) {
Node* tmp = current->next;
delete current;
current = tmp;
}
}
inline void Record(Address addr);
inline void Process(StoreBuffer* store_buffer);
private:
static const int kBufferSize = 16 * KB;
struct Node : Malloced {
explicit Node(Node* next_node) : next(next_node), count(0) {}
inline bool is_full() { return count == kBufferSize; }
Node* next;
Address buffer[kBufferSize];
int count;
};
Node* top_;
Heap* heap_;
};
} // namespace internal
} // namespace v8
......
......@@ -168,7 +168,6 @@
'test-sampler-api.cc',
'test-serialize.cc',
'test-simd.cc',
'test-slots-buffer.cc',
'test-strings.cc',
'test-symbols.cc',
'test-strtod.cc',
......
......@@ -29,6 +29,7 @@
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(Regress587004) \
V(Regress589413) \
V(WriteBarriersInCopyJSObject)
#define HEAP_TEST(Name) \
......
......@@ -6533,5 +6533,80 @@ HEAP_TEST(Regress587004) {
heap->CollectGarbage(NEW_SPACE);
}
HEAP_TEST(Regress589413) {
FLAG_stress_compaction = true;
FLAG_manual_evacuation_candidates_selection = true;
FLAG_parallel_compaction = false;
FLAG_concurrent_sweeping = false;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
// Get the heap in clean state.
heap->CollectGarbage(OLD_SPACE);
heap->CollectGarbage(OLD_SPACE);
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
// Fill the new space with byte arrays with elements looking like pointers.
const int M = 256;
ByteArray* byte_array;
while (heap->AllocateByteArray(M).To(&byte_array)) {
for (int j = 0; j < M; j++) {
byte_array->set(j, 0x31);
}
// Add the array in root set.
handle(byte_array);
}
// Make sure the byte arrays will be promoted on the next GC.
heap->CollectGarbage(NEW_SPACE);
// This number is close to large free list category threshold.
const int N = 0x3eee;
{
std::vector<FixedArray*> arrays;
std::set<Page*> pages;
FixedArray* array;
// Fill all pages with fixed arrays.
heap->set_force_oom(true);
while (heap->AllocateFixedArray(N, TENURED).To(&array)) {
arrays.push_back(array);
pages.insert(Page::FromAddress(array->address()));
// Add the array in root set.
handle(array);
}
// Expand and full one complete page with fixed arrays.
heap->set_force_oom(false);
while (heap->AllocateFixedArray(N, TENURED).To(&array)) {
arrays.push_back(array);
pages.insert(Page::FromAddress(array->address()));
// Add the array in root set.
handle(array);
// Do not expand anymore.
heap->set_force_oom(true);
}
// Expand and mark the new page as evacuation candidate.
heap->set_force_oom(false);
{
AlwaysAllocateScope always_allocate(isolate);
Handle<HeapObject> ec_obj = factory->NewFixedArray(5000, TENURED);
Page* ec_page = Page::FromAddress(ec_obj->address());
ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
// Make all arrays point to evacuation candidate so that
// slots are recorded for them.
for (size_t j = 0; j < arrays.size(); j++) {
array = arrays[j];
for (int i = 0; i < N; i++) {
array->set(i, *ec_obj);
}
}
}
SimulateIncrementalMarking(heap);
for (size_t j = 0; j < arrays.size(); j++) {
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(arrays[j], N - 1);
}
}
// Force allocation from the free list.
heap->set_force_oom(true);
heap->CollectGarbage(OLD_SPACE);
}
} // namespace internal
} // namespace v8
......@@ -12,7 +12,6 @@
#include "src/factory.h"
#include "src/field-type.h"
#include "src/global-handles.h"
#include "src/heap/slots-buffer.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
......@@ -1474,18 +1473,11 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj_value)));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger incremental write barrier, which should add a slot to |ec_page|'s
// slots buffer.
// Trigger incremental write barrier, which should add a slot to remembered
// set.
{
int slots_buffer_len = SlotsBuffer::SizeOfChain(ec_page->slots_buffer());
FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
const int n = SlotsBuffer::kNumberOfElements + 10;
for (int i = 0; i < n; i++) {
obj->FastPropertyAtPut(index, *obj_value);
}
// Ensure that the slot was actually added to the |ec_page|'s slots buffer.
CHECK_EQ(slots_buffer_len + n,
SlotsBuffer::SizeOfChain(ec_page->slots_buffer()));
obj->FastPropertyAtPut(index, *obj_value);
}
// Migrate |obj| to |new_map| which should shift fields and put the
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits>
#include <set>
#include "src/globals.h"
#include "src/heap/remembered-set.h"
#include "src/heap/spaces.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
TEST(LocalSlotsBuffer, InsertAndIterate) {
LocalSlotsBuffer buffer;
std::set<Address> untyped;
std::set<std::pair<SlotType, Address> > typed;
for (int k = 1000; k < 10000; k += NUMBER_OF_SLOT_TYPES) {
untyped.insert(reinterpret_cast<Address>(k));
buffer.Record(reinterpret_cast<Address>(k));
for (int i = 0; i < NUMBER_OF_SLOT_TYPES; i++) {
typed.insert(std::make_pair(static_cast<SlotType>(i),
reinterpret_cast<Address>(k + i)));
buffer.Record(static_cast<SlotType>(i), reinterpret_cast<Address>(k + i));
}
}
buffer.Iterate(
[&untyped](Address addr) {
EXPECT_NE(untyped.count(addr), 0);
untyped.erase(addr);
},
[&typed](SlotType type, Address addr) {
EXPECT_NE(typed.count(std::make_pair(type, addr)), 0);
typed.erase(std::make_pair(type, addr));
});
EXPECT_EQ(untyped.size(), 0);
EXPECT_EQ(typed.size(), 0);
}
} // namespace internal
} // namespace v8
......@@ -55,9 +55,9 @@ TEST(SlotSet, Iterate) {
set.Iterate([](Address slot_address) {
uintptr_t intaddr = reinterpret_cast<uintptr_t>(slot_address);
if (intaddr % 3 == 0) {
return SlotSet::KEEP_SLOT;
return KEEP_SLOT;
} else {
return SlotSet::REMOVE_SLOT;
return REMOVE_SLOT;
}
});
......@@ -139,5 +139,33 @@ TEST(SlotSet, RemoveRange) {
}
}
TEST(TypedSlotSet, Iterate) {
TypedSlotSet set(0);
const int kDelta = 10000001;
int added = 0;
for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset; i += kDelta) {
SlotType type = static_cast<SlotType>(i % NUMBER_OF_SLOT_TYPES);
set.Insert(type, i);
++added;
}
int iterated = 0;
set.Iterate([&iterated, kDelta](SlotType type, Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
EXPECT_EQ(i % NUMBER_OF_SLOT_TYPES, static_cast<uint32_t>(type));
EXPECT_EQ(0, i % kDelta);
++iterated;
return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
});
EXPECT_EQ(added, iterated);
iterated = 0;
set.Iterate([&iterated](SlotType type, Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
EXPECT_EQ(0, i % 2);
++iterated;
return KEEP_SLOT;
});
EXPECT_EQ(added / 2, iterated);
}
} // namespace internal
} // namespace v8
......@@ -112,6 +112,7 @@
'heap/heap-unittest.cc',
'heap/scavenge-job-unittest.cc',
'heap/slot-set-unittest.cc',
'heap/remembered-set-unittest.cc',
'locked-queue-unittest.cc',
'run-all-unittests.cc',
'runtime/runtime-interpreter-unittest.cc',
......
......@@ -897,8 +897,6 @@
'../../src/heap/scavenger.cc',
'../../src/heap/scavenger.h',
'../../src/heap/slot-set.h',
'../../src/heap/slots-buffer.cc',
'../../src/heap/slots-buffer.h',
'../../src/heap/spaces-inl.h',
'../../src/heap/spaces.cc',
'../../src/heap/spaces.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment