Commit 9146bc5e authored by ulan's avatar ulan Committed by Commit bot

Revert of Replace slots buffer with remembered set. (patchset #14 id:250001 of...

Revert of Replace slots buffer with remembered set. (patchset #14 id:250001 of https://codereview.chromium.org/1703823002/ )

Reason for revert:
Revert because of canary crashes: crbug.com/589413

Original issue's description:
> Replace slots buffer with remembered set.
>
> Slots pointing to evacuation candidates are now recorded in the new RememberedSet<OLD_TO_OLD>.
>
> The remembered set is extended to support typed slots.
>
> During parallel evacuation all migration slots are recorded in local slots buffers.
> After evacuation all local slots are added to the remembered set.
>
> BUG=chromium:578883
> LOG=NO
>
> Committed: https://crrev.com/2285a99ef6f7d52f4f0c4d88a7db4224443ee152
> Cr-Commit-Position: refs/heads/master@{#34212}

TBR=jochen@chromium.org,hpayer@chromium.org,mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:578883

Review URL: https://codereview.chromium.org/1725073003

Cr-Commit-Position: refs/heads/master@{#34238}
parent 666aec03
......@@ -1072,6 +1072,8 @@ source_set("v8_base") {
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
"src/heap/slot-set.h",
"src/heap/slots-buffer.cc",
"src/heap/slots-buffer.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
......
......@@ -5578,7 +5578,6 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
}
}
......@@ -5591,7 +5590,6 @@ void Heap::ClearRecordedSlotRange(HeapObject* object, Object** start,
Page* page = Page::FromAddress(start_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, start_addr, end_addr);
RememberedSet<OLD_TO_OLD>::RemoveRange(page, start_addr, end_addr);
}
}
......
......@@ -26,10 +26,11 @@ void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
}
}
void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
Object* value) {
if (IsMarking() && value->IsHeapObject()) {
RecordWriteIntoCodeSlow(host, rinfo, value);
RecordWriteIntoCodeSlow(obj, rinfo, value);
}
}
......
......@@ -131,11 +131,13 @@ void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
}
}
void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
RelocInfo* rinfo,
Object* value) {
if (BaseRecordWrite(host, value)) {
// Object is not going to be rescanned. We need to record the slot.
heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
if (BaseRecordWrite(obj, value)) {
// Object is not going to be rescanned. We need to record the slot.
heap_->mark_compact_collector()->RecordRelocSlot(rinfo, value);
}
}
......
......@@ -165,13 +165,15 @@ class IncrementalMarking {
// the incremental cycle (stays white).
INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
Object* value));
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo,
Object* value);
void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
void RecordCodeTargetPatch(Address pc, HeapObject* value);
......
......@@ -6,7 +6,7 @@
#define V8_HEAP_MARK_COMPACT_INL_H_
#include "src/heap/mark-compact.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slots-buffer.h"
#include "src/isolate.h"
namespace v8 {
......@@ -70,11 +70,25 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
RememberedSet<OLD_TO_OLD>::Insert(source_page,
reinterpret_cast<Address>(slot));
if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
target_page->slots_buffer_address(), slot,
SlotsBuffer::FAIL_ON_OVERFLOW)) {
EvictPopularEvacuationCandidate(target_page);
}
}
}
void MarkCompactCollector::ForceRecordSlot(HeapObject* object, Object** slot,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
CHECK(SlotsBuffer::AddTo(slots_buffer_allocator_,
target_page->slots_buffer_address(), slot,
SlotsBuffer::IGNORE_OVERFLOW));
}
}
......
This diff is collapsed.
......@@ -25,7 +25,9 @@ class CodeFlusher;
class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
class LocalSlotsBuffer;
class SlotsBuffer;
class SlotsBufferAllocator;
class Marking : public AllStatic {
public:
......@@ -393,8 +395,8 @@ class MarkCompactCollector {
->IsEvacuationCandidate();
}
void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
void RecordRelocSlot(RelocInfo* rinfo, Object* target);
void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
......@@ -405,8 +407,8 @@ class MarkCompactCollector {
void MigrateObject(HeapObject* dst, HeapObject* src, int size,
AllocationSpace to_old_space,
LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots);
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer);
void InvalidateCode(Code* code);
......@@ -482,8 +484,9 @@ class MarkCompactCollector {
// whole transitive closure is known. They must be called before sweeping
// when mark bits are still intact.
bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
HeapObject* FindBlackObjectBySlotSlow(Address slot);
bool IsSlotInBlackObjectSlow(Page* p, Address slot);
bool IsSlotInLiveObject(Address slot);
void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
// Removes all the slots in the slot buffers that are within the given
// address range.
......@@ -517,7 +520,8 @@ class MarkCompactCollector {
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
void ClearInvalidRememberedSetSlots();
void EvictPopularEvacuationCandidate(Page* page);
void ClearInvalidStoreAndSlotsBufferEntries();
void StartSweeperThreads();
......@@ -546,6 +550,10 @@ class MarkCompactCollector {
bool evacuation_;
SlotsBufferAllocator* slots_buffer_allocator_;
SlotsBuffer* migration_slots_buffer_;
// Finishes GC, performs heap verification if enabled.
void Finish();
......@@ -699,6 +707,9 @@ class MarkCompactCollector {
void EvacuateNewSpacePrologue();
void EvacuateNewSpaceEpilogue();
void AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer);
void EvacuatePagesInParallel();
// The number of parallel compaction tasks, including the main thread.
......@@ -734,8 +745,16 @@ class MarkCompactCollector {
// Updates store buffer and slot buffer for a pointer in a migrating object.
void RecordMigratedSlot(Object* value, Address slot,
LocalSlotsBuffer* old_to_old_slots,
LocalSlotsBuffer* old_to_new_slots);
SlotsBuffer** evacuation_slots_buffer,
LocalStoreBuffer* local_store_buffer);
// Adds the code entry slot to the slots buffer.
void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
SlotsBuffer** evacuation_slots_buffer);
// Adds the slot of a moved code object.
void RecordMigratedCodeObjectSlot(Address code_object,
SlotsBuffer** evacuation_slots_buffer);
#ifdef DEBUG
friend class MarkObjectVisitor;
......@@ -755,6 +774,14 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_;
// The evacuation_slots_buffers_ are used by the compaction threads.
// When a compaction task finishes, it uses
// AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
// evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
// lock.
base::Mutex evacuation_slots_buffers_mutex_;
List<SlotsBuffer*> evacuation_slots_buffers_;
base::SmartPointer<FreeList> free_list_old_space_;
base::SmartPointer<FreeList> free_list_code_space_;
base::SmartPointer<FreeList> free_list_map_space_;
......
......@@ -220,12 +220,11 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
Heap* heap, RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
// TODO(ulan): It could be better to record slots only for strongly embedded
// objects here and record slots for weakly embedded object during clearing
// of non-live references in mark-compact.
if (!host->IsWeakObject(object)) {
if (!rinfo->host()->IsWeakObject(object)) {
StaticVisitor::MarkObject(heap, object);
}
}
......@@ -236,9 +235,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCell(Heap* heap,
RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::CELL);
Cell* cell = rinfo->target_cell();
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, cell);
if (!host->IsWeakObject(cell)) {
heap->mark_compact_collector()->RecordRelocSlot(rinfo, cell);
if (!rinfo->host()->IsWeakObject(cell)) {
StaticVisitor::MarkObject(heap, cell);
}
}
......@@ -250,8 +248,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap,
DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence());
Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
......@@ -271,8 +268,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
......@@ -283,8 +279,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Code* target = rinfo->code_age_stub();
DCHECK(target != NULL);
Code* host = rinfo->host();
heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
......
......@@ -24,7 +24,8 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
if (slots != nullptr) {
slots->Iterate([heap](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, slot) ? KEEP_SLOT : REMOVE_SLOT;
return IsValidSlot(heap, slot) ? SlotSet::KEEP_SLOT
: SlotSet::REMOVE_SLOT;
});
}
}
......@@ -32,24 +33,17 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
template <PointerDirection direction>
void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Iterate(heap, [heap](Address addr) {
HeapObject* obj =
heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr);
if (obj == nullptr) {
// The slot is in dead object.
MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr);
AllocationSpace owner = chunk->owner()->identity();
// The old to old remembered set can have slots in dead objects. This is
// OK because the set is cleared after every mark-compact GC.
// The old to new remembered set is allowed to have slots in dead
// objects only in map and large object space because these spaces cannot
// have raw untaged pointers.
CHECK(direction == OLD_TO_OLD || owner == MAP_SPACE || owner == LO_SPACE);
} else {
int offset = static_cast<int>(addr - obj->address());
CHECK(obj->IsValidSlot(offset));
Object** slot = reinterpret_cast<Object**>(addr);
Object* object = *slot;
if (Page::FromAddress(addr)->owner() != nullptr &&
Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
CHECK(IsValidSlot(heap, slot));
heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
reinterpret_cast<Address>(slot), HeapObject::cast(object));
}
return KEEP_SLOT;
return SlotSet::KEEP_SLOT;
});
}
......@@ -70,7 +64,6 @@ bool RememberedSet<direction>::IsValidSlot(Heap* heap, Object** slot) {
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap);
} // namespace internal
} // namespace v8
......@@ -56,12 +56,10 @@ class RememberedSet {
}
// Iterates and filters the remembered set with the given callback.
// The callback should take (Address slot) and return SlotCallbackResult.
// The callback should take (Address slot) and return SlotSet::CallbackResult.
template <typename Callback>
static void Iterate(Heap* heap, Callback callback) {
MemoryChunkIterator it(heap, direction == OLD_TO_OLD
? MemoryChunkIterator::ALL
: MemoryChunkIterator::ALL_BUT_CODE_SPACE);
PointerChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = GetSlotSet(chunk);
......@@ -91,60 +89,6 @@ class RememberedSet {
});
}
// Given a page and a typed slot in that page, this function adds the slot
// to the remembered set.
static void InsertTyped(Page* page, SlotType slot_type, Address slot_addr) {
STATIC_ASSERT(direction == OLD_TO_OLD);
TypedSlotSet* slot_set = page->typed_old_to_old_slots();
if (slot_set == nullptr) {
page->AllocateTypedOldToOldSlots();
slot_set = page->typed_old_to_old_slots();
}
uintptr_t offset = slot_addr - page->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
slot_set->Insert(slot_type, static_cast<uint32_t>(offset));
}
// Given a page and a range of typed slots in that page, this function removes
// the slots from the remembered set.
static void RemoveRangeTyped(Page* page, Address start, Address end) {
TypedSlotSet* slots = page->typed_old_to_old_slots();
if (slots != nullptr) {
slots->Iterate([start, end](SlotType slot_type, Address slot_addr) {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
});
}
}
// Iterates and filters typed old to old pointers with the given callback.
// The callback should take (SlotType slot_type, Address slot_addr) and
// return SlotCallbackResult.
template <typename Callback>
static void IterateTyped(Heap* heap, Callback callback) {
MemoryChunkIterator it(heap, MemoryChunkIterator::ALL_BUT_MAP_SPACE);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
TypedSlotSet* slots = chunk->typed_old_to_old_slots();
if (slots != nullptr) {
int new_count = slots->Iterate(callback);
if (new_count == 0) {
chunk->ReleaseTypedOldToOldSlots();
}
}
}
}
// Clear all old to old slots from the remembered set.
static void ClearAll(Heap* heap) {
STATIC_ASSERT(direction == OLD_TO_OLD);
MemoryChunkIterator it(heap, MemoryChunkIterator::ALL);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseOldToOldSlots();
chunk->ReleaseTypedOldToOldSlots();
}
}
// Eliminates all stale slots from the remembered set, i.e.
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
......@@ -181,8 +125,8 @@ class RememberedSet {
}
template <typename Callback>
static SlotCallbackResult Wrapper(Heap* heap, Address slot_address,
Callback slot_callback) {
static SlotSet::CallbackResult Wrapper(Heap* heap, Address slot_address,
Callback slot_callback) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* object = *slot;
......@@ -196,97 +140,17 @@ class RememberedSet {
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
if (heap->InToSpace(object)) {
return KEEP_SLOT;
return SlotSet::KEEP_SLOT;
}
} else {
DCHECK(!heap->InNewSpace(object));
}
return REMOVE_SLOT;
return SlotSet::REMOVE_SLOT;
}
static bool IsValidSlot(Heap* heap, Object** slot);
};
// Buffer for keeping thead local migration slots during compaction.
// TODO(ulan): Remove this once every thread gets local pages in compaction
// space.
class LocalSlotsBuffer BASE_EMBEDDED {
public:
LocalSlotsBuffer() : top_(new Node(nullptr)) {}
~LocalSlotsBuffer() {
Node* current = top_;
while (current != nullptr) {
Node* tmp = current->next;
delete current;
current = tmp;
}
}
void Record(Address addr) {
EnsureSpaceFor(1);
uintptr_t entry = reinterpret_cast<uintptr_t>(addr);
DCHECK_GE(entry, static_cast<uintptr_t>(NUMBER_OF_SLOT_TYPES));
Insert(entry);
}
void Record(SlotType type, Address addr) {
EnsureSpaceFor(2);
Insert(static_cast<uintptr_t>(type));
uintptr_t entry = reinterpret_cast<uintptr_t>(addr);
DCHECK_GE(entry, static_cast<uintptr_t>(NUMBER_OF_SLOT_TYPES));
Insert(entry);
}
template <typename UntypedCallback, typename TypedCallback>
void Iterate(UntypedCallback untyped_callback, TypedCallback typed_callback) {
Node* current = top_;
bool typed = false;
SlotType type;
Address addr;
while (current != nullptr) {
for (int i = 0; i < current->count; i++) {
uintptr_t entry = current->buffer[i];
if (entry < NUMBER_OF_SLOT_TYPES) {
DCHECK(!typed);
typed = true;
type = static_cast<SlotType>(entry);
} else {
addr = reinterpret_cast<Address>(entry);
if (typed) {
typed_callback(type, addr);
typed = false;
} else {
untyped_callback(addr);
}
}
}
current = current->next;
}
}
private:
void EnsureSpaceFor(int count) {
if (top_->remaining_free_slots() < count) top_ = new Node(top_);
}
void Insert(uintptr_t entry) { top_->buffer[top_->count++] = entry; }
static const int kBufferSize = 16 * KB;
struct Node : Malloced {
explicit Node(Node* next_node) : next(next_node), count(0) {}
inline int remaining_free_slots() { return kBufferSize - count; }
Node* next;
uintptr_t buffer[kBufferSize];
int count;
};
Node* top_;
};
} // namespace internal
} // namespace v8
......
......@@ -7,13 +7,10 @@
#include "src/allocation.h"
#include "src/base/bits.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Data structure for maintaining a set of slots in a standard (non-large)
// page. The base address of the page must be set with SetPageStart before any
// operation.
......@@ -22,6 +19,8 @@ enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
enum CallbackResult { KEEP_SLOT, REMOVE_SLOT };
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
bucket[i] = nullptr;
......@@ -214,124 +213,6 @@ class SlotSet : public Malloced {
Address page_start_;
};
enum SlotType {
EMBEDDED_OBJECT_SLOT,
OBJECT_SLOT,
RELOCATED_CODE_OBJECT,
CELL_TARGET_SLOT,
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
DEBUG_TARGET_SLOT,
NUMBER_OF_SLOT_TYPES
};
// Data structure for maintaining a multiset of typed slots in a page.
// Typed slots can only appear in Code and JSFunction objects, so
// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
// The implementation is a chain of chunks, where each chunks is an array of
// encoded (slot type, slot offset) pairs.
// There is no duplicate detection and we do not expect many duplicates because
// typed slots contain V8 internal pointers that are not directly exposed to JS.
class TypedSlotSet {
public:
typedef uint32_t TypedSlot;
static const int kMaxOffset = 1 << 29;
explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
chunk_ = new Chunk(nullptr, kInitialBufferSize);
}
~TypedSlotSet() {
Chunk* chunk = chunk_;
while (chunk != nullptr) {
Chunk* next = chunk->next;
delete chunk;
chunk = next;
}
}
// The slot offset specifies a slot at address page_start_ + offset.
void Insert(SlotType type, int offset) {
TypedSlot slot = ToTypedSlot(type, offset);
if (!chunk_->AddSlot(slot)) {
chunk_ = new Chunk(chunk_, NextCapacity(chunk_->capacity));
bool added = chunk_->AddSlot(slot);
DCHECK(added);
USE(added);
}
}
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
// Returns the new number of slots.
//
// Sample usage:
// Iterate([](SlotType slot_type, Address slot_address) {
// if (good(slot_type, slot_address)) return KEEP_SLOT;
// else return REMOVE_SLOT;
// });
template <typename Callback>
int Iterate(Callback callback) {
STATIC_ASSERT(NUMBER_OF_SLOT_TYPES < 8);
const TypedSlot kRemovedSlot = TypeField::encode(NUMBER_OF_SLOT_TYPES);
Chunk* chunk = chunk_;
int new_count = 0;
while (chunk != nullptr) {
TypedSlot* buffer = chunk->buffer;
int count = chunk->count;
for (int i = 0; i < count; i++) {
TypedSlot slot = buffer[i];
if (slot != kRemovedSlot) {
SlotType type = TypeField::decode(slot);
Address addr = page_start_ + OffsetField::decode(slot);
if (callback(type, addr) == KEEP_SLOT) {
new_count++;
} else {
buffer[i] = kRemovedSlot;
}
}
}
chunk = chunk->next;
}
return new_count;
}
private:
static const int kInitialBufferSize = 100;
static const int kMaxBufferSize = 16 * KB;
static int NextCapacity(int capacity) {
return Min(kMaxBufferSize, capacity * 2);
}
static TypedSlot ToTypedSlot(SlotType type, int offset) {
return TypeField::encode(type) | OffsetField::encode(offset);
}
class OffsetField : public BitField<int, 0, 29> {};
class TypeField : public BitField<SlotType, 29, 3> {};
struct Chunk : Malloced {
explicit Chunk(Chunk* next_chunk, int capacity)
: next(next_chunk), count(0), capacity(capacity) {
buffer = NewArray<TypedSlot>(capacity);
}
bool AddSlot(TypedSlot slot) {
if (count == capacity) return false;
buffer[count++] = slot;
return true;
}
~Chunk() { DeleteArray(buffer); }
Chunk* next;
int count;
int capacity;
TypedSlot* buffer;
};
Address page_start_;
Chunk* chunk_;
};
} // namespace internal
} // namespace v8
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/slots-buffer.h"
#include "src/assembler.h"
#include "src/heap/heap.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
}
bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type,
Address addr, AdditionMode mode) {
SlotsBuffer* buffer = *buffer_address;
if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
allocator->DeallocateChain(buffer_address);
return false;
}
buffer = allocator->AllocateBuffer(buffer);
*buffer_address = buffer;
}
DCHECK(buffer->HasSpaceForTypedSlot());
buffer->Add(reinterpret_cast<ObjectSlot>(type));
buffer->Add(reinterpret_cast<ObjectSlot>(addr));
return true;
}
void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
// Remove entries by replacing them with an old-space slot containing a smi
// that is located in an unmovable page.
const ObjectSlot kRemovedEntry = HeapObject::RawField(
heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
->NeverEvacuate());
while (buffer != NULL) {
SlotsBuffer::ObjectSlot* slots = buffer->slots_;
intptr_t slots_count = buffer->idx_;
for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
ObjectSlot slot = slots[slot_idx];
if (!IsTypedSlot(slot)) {
Object* object = *slot;
// Slots are invalid when they currently:
// - do not point to a heap object (SMI)
// - point to a heap object in new space
// - are not within a live heap object on a valid pointer slot
// - point to a heap object not on an evacuation candidate
// TODO(mlippautz): Move InNewSpace check above IsSlotInLiveObject once
// we filter out unboxed double slots eagerly.
if (!object->IsHeapObject() ||
!heap->mark_compact_collector()->IsSlotInLiveObject(
reinterpret_cast<Address>(slot)) ||
heap->InNewSpace(object) ||
!Page::FromAddress(reinterpret_cast<Address>(object))
->IsEvacuationCandidate()) {
// TODO(hpayer): Instead of replacing slots with kRemovedEntry we
// could shrink the slots buffer in-place.
slots[slot_idx] = kRemovedEntry;
}
} else {
++slot_idx;
DCHECK(slot_idx < slots_count);
}
}
buffer = buffer->next();
}
}
void SlotsBuffer::RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
Address start_slot, Address end_slot) {
// Remove entries by replacing them with an old-space slot containing a smi
// that is located in an unmovable page.
const ObjectSlot kRemovedEntry = HeapObject::RawField(
heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
->NeverEvacuate());
while (buffer != NULL) {
SlotsBuffer::ObjectSlot* slots = buffer->slots_;
intptr_t slots_count = buffer->idx_;
bool is_typed_slot = false;
for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
ObjectSlot slot = slots[slot_idx];
if (!IsTypedSlot(slot)) {
Address slot_address = reinterpret_cast<Address>(slot);
if (slot_address >= start_slot && slot_address < end_slot) {
// TODO(hpayer): Instead of replacing slots with kRemovedEntry we
// could shrink the slots buffer in-place.
slots[slot_idx] = kRemovedEntry;
if (is_typed_slot) {
slots[slot_idx - 1] = kRemovedEntry;
}
}
is_typed_slot = false;
} else {
is_typed_slot = true;
DCHECK(slot_idx < slots_count);
}
}
buffer = buffer->next();
}
}
void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
while (buffer != NULL) {
SlotsBuffer::ObjectSlot* slots = buffer->slots_;
intptr_t slots_count = buffer->idx_;
for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
ObjectSlot slot = slots[slot_idx];
if (!IsTypedSlot(slot)) {
Object* object = *slot;
if (object->IsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(object);
CHECK(!heap->InNewSpace(object));
heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
reinterpret_cast<Address>(slot), heap_object);
}
} else {
++slot_idx;
DCHECK(slot_idx < slots_count);
}
}
buffer = buffer->next();
}
}
SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
return new SlotsBuffer(next_buffer);
}
void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
delete buffer;
}
void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
SlotsBuffer* buffer = *buffer_address;
while (buffer != NULL) {
SlotsBuffer* next_buffer = buffer->next();
DeallocateBuffer(buffer);
buffer = next_buffer;
}
*buffer_address = NULL;
}
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_SLOTS_BUFFER_H_
#define V8_HEAP_SLOTS_BUFFER_H_
#include "src/objects.h"
namespace v8 {
namespace internal {
// Forward declarations.
class SlotsBuffer;
// SlotsBufferAllocator manages the allocation and deallocation of slots buffer
// chunks and links them together. Slots buffer chunks are always created by the
// SlotsBufferAllocator.
class SlotsBufferAllocator {
public:
SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
void DeallocateBuffer(SlotsBuffer* buffer);
void DeallocateChain(SlotsBuffer** buffer_address);
};
// SlotsBuffer records a sequence of slots that has to be updated
// after live objects were relocated from evacuation candidates.
// All slots are either untyped or typed:
// - Untyped slots are expected to contain a tagged object pointer.
// They are recorded by an address.
// - Typed slots are expected to contain an encoded pointer to a heap
// object where the way of encoding depends on the type of the slot.
// They are recorded as a pair (SlotType, slot address).
// We assume that zero-page is never mapped this allows us to distinguish
// untyped slots from typed slots during iteration by a simple comparison:
// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
// is the first element of typed slot's pair.
class SlotsBuffer {
public:
typedef Object** ObjectSlot;
explicit SlotsBuffer(SlotsBuffer* next_buffer)
: idx_(0), chain_length_(1), next_(next_buffer) {
if (next_ != NULL) {
chain_length_ = next_->chain_length_ + 1;
}
}
~SlotsBuffer() {}
void Add(ObjectSlot slot) {
DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
#ifdef DEBUG
if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
DCHECK_NOT_NULL(*slot);
}
#endif
slots_[idx_++] = slot;
}
ObjectSlot Get(intptr_t i) {
DCHECK(i >= 0 && i < kNumberOfElements);
return slots_[i];
}
size_t Size() {
DCHECK(idx_ <= kNumberOfElements);
return idx_;
}
enum SlotType {
EMBEDDED_OBJECT_SLOT,
OBJECT_SLOT,
RELOCATED_CODE_OBJECT,
CELL_TARGET_SLOT,
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
DEBUG_TARGET_SLOT,
NUMBER_OF_SLOT_TYPES
};
static const char* SlotTypeToString(SlotType type) {
switch (type) {
case EMBEDDED_OBJECT_SLOT:
return "EMBEDDED_OBJECT_SLOT";
case OBJECT_SLOT:
return "OBJECT_SLOT";
case RELOCATED_CODE_OBJECT:
return "RELOCATED_CODE_OBJECT";
case CELL_TARGET_SLOT:
return "CELL_TARGET_SLOT";
case CODE_TARGET_SLOT:
return "CODE_TARGET_SLOT";
case CODE_ENTRY_SLOT:
return "CODE_ENTRY_SLOT";
case DEBUG_TARGET_SLOT:
return "DEBUG_TARGET_SLOT";
case NUMBER_OF_SLOT_TYPES:
return "NUMBER_OF_SLOT_TYPES";
}
return "UNKNOWN SlotType";
}
SlotsBuffer* next() { return next_; }
static int SizeOfChain(SlotsBuffer* buffer) {
if (buffer == NULL) return 0;
return static_cast<int>(buffer->idx_ +
(buffer->chain_length_ - 1) * kNumberOfElements);
}
inline bool IsFull() { return idx_ == kNumberOfElements; }
inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
}
INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, ObjectSlot slot,
AdditionMode mode)) {
SlotsBuffer* buffer = *buffer_address;
if (buffer == NULL || buffer->IsFull()) {
if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
allocator->DeallocateChain(buffer_address);
return false;
}
buffer = allocator->AllocateBuffer(buffer);
*buffer_address = buffer;
}
buffer->Add(slot);
return true;
}
static bool IsTypedSlot(ObjectSlot slot);
static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type, Address addr,
AdditionMode mode);
// Eliminates all stale entries from the slots buffer, i.e., slots that
// are not part of live objects anymore. This method must be called after
// marking, when the whole transitive closure is known and must be called
// before sweeping when mark bits are still intact.
static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
// Eliminate all slots that are within the given address range.
static void RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
Address start_slot, Address end_slot);
// Ensures that there are no invalid slots in the chain of slots buffers.
static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
static const int kNumberOfElements = 1021;
private:
static const int kChainLengthThreshold = 15;
intptr_t idx_;
intptr_t chain_length_;
SlotsBuffer* next_;
ObjectSlot slots_[kNumberOfElements];
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_SLOTS_BUFFER_H_
......@@ -147,19 +147,6 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
return NULL;
}
// -----------------------------------------------------------------------------
// LargePageIterator
LargePageIterator::LargePageIterator(LargeObjectSpace* space)
: next_page_(space->first_page()) {}
LargePage* LargePageIterator::next() {
LargePage* result = next_page_;
if (next_page_ != nullptr) {
next_page_ = next_page_->next_page();
}
return result;
}
// -----------------------------------------------------------------------------
// MemoryAllocator
......@@ -321,15 +308,15 @@ Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
}
MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
PointerChunkIterator::PointerChunkIterator(Heap* heap)
: state_(kOldSpaceState),
mode_(mode),
old_iterator_(heap->old_space()),
code_iterator_(heap->code_space()),
map_iterator_(heap->map_space()),
lo_iterator_(heap->lo_space()) {}
MemoryChunk* MemoryChunkIterator::next() {
MemoryChunk* PointerChunkIterator::next() {
switch (state_) {
case kOldSpaceState: {
if (old_iterator_.has_next()) {
......@@ -339,34 +326,33 @@ MemoryChunk* MemoryChunkIterator::next() {
// Fall through.
}
case kMapState: {
if (mode_ != ALL_BUT_MAP_SPACE && map_iterator_.has_next()) {
if (map_iterator_.has_next()) {
return map_iterator_.next();
}
state_ = kCodeState;
// Fall through.
}
case kCodeState: {
if (mode_ != ALL_BUT_CODE_SPACE && code_iterator_.has_next()) {
return code_iterator_.next();
}
state_ = kLargeObjectState;
// Fall through.
}
case kLargeObjectState: {
MemoryChunk* answer = lo_iterator_.next();
if (answer != nullptr) {
return answer;
}
state_ = kFinishedState;
// Fall through;
HeapObject* heap_object;
do {
heap_object = lo_iterator_.Next();
if (heap_object == NULL) {
state_ = kFinishedState;
return NULL;
}
// Fixed arrays are the only pointer-containing objects in large
// object space.
} while (!heap_object->IsFixedArray());
MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
return answer;
}
case kFinishedState:
return nullptr;
return NULL;
default:
break;
}
UNREACHABLE();
return nullptr;
return NULL;
}
......
......@@ -8,6 +8,7 @@
#include "src/base/platform/platform.h"
#include "src/full-codegen/full-codegen.h"
#include "src/heap/slot-set.h"
#include "src/heap/slots-buffer.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
#include "src/snapshot/snapshot.h"
......@@ -477,9 +478,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = 0;
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
chunk->slots_buffer_ = nullptr;
chunk->old_to_new_slots_ = nullptr;
chunk->old_to_old_slots_ = nullptr;
chunk->typed_old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
......@@ -731,10 +732,6 @@ LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
MemoryChunk* chunk =
AllocateChunk(object_size, object_size, executable, owner);
if (chunk == NULL) return NULL;
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
return LargePage::Initialize(isolate_->heap(), chunk);
}
......@@ -935,6 +932,8 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// MemoryChunk implementation
void MemoryChunk::ReleaseAllocatedMemory() {
delete slots_buffer_;
slots_buffer_ = nullptr;
delete skip_list_;
skip_list_ = nullptr;
delete mutex_;
......@@ -973,15 +972,6 @@ void MemoryChunk::ReleaseOldToOldSlots() {
old_to_old_slots_ = nullptr;
}
void MemoryChunk::AllocateTypedOldToOldSlots() {
DCHECK(nullptr == typed_old_to_old_slots_);
typed_old_to_old_slots_ = new TypedSlotSet(address());
}
void MemoryChunk::ReleaseTypedOldToOldSlots() {
delete typed_old_to_old_slots_;
typed_old_to_old_slots_ = nullptr;
}
// -----------------------------------------------------------------------------
// PagedSpace implementation
......
......@@ -32,7 +32,6 @@ class SemiSpace;
class SkipList;
class SlotsBuffer;
class SlotSet;
class TypedSlotSet;
class Space;
// -----------------------------------------------------------------------------
......@@ -393,14 +392,14 @@ class MemoryChunk {
+ kPointerSize // Heap* heap_
+ kIntSize; // int progress_bar_
static const size_t kOldToNewSlotsOffset =
static const size_t kSlotsBufferOffset =
kLiveBytesOffset + kIntSize; // int live_byte_count_
static const size_t kWriteBarrierCounterOffset =
kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_;
+ kPointerSize // SlotSet* old_to_old_slots_;
+ kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
+ kPointerSize; // SkipList* skip_list_;
kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
+ kPointerSize // SlotSet* old_to_new_slots_;
+ kPointerSize // SlotSet* old_to_old_slots_;
+ kPointerSize; // SkipList* skip_list_;
static const size_t kMinHeaderSize =
kWriteBarrierCounterOffset +
......@@ -510,18 +509,17 @@ class MemoryChunk {
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
inline TypedSlotSet* typed_old_to_old_slots() {
return typed_old_to_old_slots_;
}
void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
void AllocateOldToOldSlots();
void ReleaseOldToOldSlots();
void AllocateTypedOldToOldSlots();
void ReleaseTypedOldToOldSlots();
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
......@@ -595,14 +593,12 @@ class MemoryChunk {
void MarkEvacuationCandidate() {
DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
DCHECK_NULL(slots_buffer_);
SetFlag(EVACUATION_CANDIDATE);
}
void ClearEvacuationCandidate() {
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
DCHECK(slots_buffer_ == NULL);
ClearFlag(EVACUATION_CANDIDATE);
}
......@@ -687,12 +683,13 @@ class MemoryChunk {
// Count of bytes marked black on page.
int live_byte_count_;
SlotsBuffer* slots_buffer_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* old_to_new_slots_;
SlotSet* old_to_old_slots_;
TypedSlotSet* typed_old_to_old_slots_;
SkipList* skip_list_;
......@@ -865,12 +862,6 @@ class LargePage : public MemoryChunk {
inline void set_next_page(LargePage* page) { set_next_chunk(page); }
// A limit to guarantee that we do not overflow typed slot offset in
// the old to old remembered set.
// Note that this limit is higher than what assembler already imposes on
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
private:
static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
......@@ -986,8 +977,8 @@ class MemoryChunkValidator {
STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
offsetof(MemoryChunk, live_byte_count_));
STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
offsetof(MemoryChunk, old_to_new_slots_));
STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
offsetof(MemoryChunk, slots_buffer_));
STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
offsetof(MemoryChunk, write_barrier_counter_));
......@@ -2997,42 +2988,25 @@ class LargeObjectIterator : public ObjectIterator {
LargePage* current_;
};
class LargePageIterator BASE_EMBEDDED {
public:
explicit inline LargePageIterator(LargeObjectSpace* space);
inline LargePage* next();
private:
LargePage* next_page_;
};
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class MemoryChunkIterator BASE_EMBEDDED {
// pointers to new space.
class PointerChunkIterator BASE_EMBEDDED {
public:
enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
inline explicit PointerChunkIterator(Heap* heap);
// Return NULL when the iterator is done.
inline MemoryChunk* next();
private:
enum State {
kOldSpaceState,
kMapState,
kCodeState,
kLargeObjectState,
kFinishedState
};
enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
State state_;
const Mode mode_;
PageIterator old_iterator_;
PageIterator code_iterator_;
PageIterator map_iterator_;
LargePageIterator lo_iterator_;
LargeObjectIterator lo_iterator_;
};
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
......
......@@ -13,6 +13,22 @@
namespace v8 {
namespace internal {
void LocalStoreBuffer::Record(Address addr) {
if (top_->is_full()) top_ = new Node(top_);
top_->buffer[top_->count++] = addr;
}
void LocalStoreBuffer::Process(StoreBuffer* store_buffer) {
Node* current = top_;
while (current != nullptr) {
for (int i = 0; i < current->count; i++) {
Address slot = current->buffer[i];
Page* page = Page::FromAnyPointerAddress(heap_, slot);
RememberedSet<OLD_TO_NEW>::Insert(page, slot);
}
current = current->next;
}
}
} // namespace internal
} // namespace v8
......
......@@ -40,6 +40,41 @@ class StoreBuffer {
base::VirtualMemory* virtual_memory_;
};
class LocalStoreBuffer BASE_EMBEDDED {
public:
explicit LocalStoreBuffer(Heap* heap)
: top_(new Node(nullptr)), heap_(heap) {}
~LocalStoreBuffer() {
Node* current = top_;
while (current != nullptr) {
Node* tmp = current->next;
delete current;
current = tmp;
}
}
inline void Record(Address addr);
inline void Process(StoreBuffer* store_buffer);
private:
static const int kBufferSize = 16 * KB;
struct Node : Malloced {
explicit Node(Node* next_node) : next(next_node), count(0) {}
inline bool is_full() { return count == kBufferSize; }
Node* next;
Address buffer[kBufferSize];
int count;
};
Node* top_;
Heap* heap_;
};
} // namespace internal
} // namespace v8
......
......@@ -168,6 +168,7 @@
'test-sampler-api.cc',
'test-serialize.cc',
'test-simd.cc',
'test-slots-buffer.cc',
'test-strings.cc',
'test-symbols.cc',
'test-strtod.cc',
......
......@@ -12,6 +12,7 @@
#include "src/factory.h"
#include "src/field-type.h"
#include "src/global-handles.h"
#include "src/heap/slots-buffer.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
......@@ -1473,11 +1474,18 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj_value)));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger incremental write barrier, which should add a slot to remembered
// set.
// Trigger incremental write barrier, which should add a slot to |ec_page|'s
// slots buffer.
{
int slots_buffer_len = SlotsBuffer::SizeOfChain(ec_page->slots_buffer());
FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
obj->FastPropertyAtPut(index, *obj_value);
const int n = SlotsBuffer::kNumberOfElements + 10;
for (int i = 0; i < n; i++) {
obj->FastPropertyAtPut(index, *obj_value);
}
// Ensure that the slot was actually added to the |ec_page|'s slots buffer.
CHECK_EQ(slots_buffer_len + n,
SlotsBuffer::SizeOfChain(ec_page->slots_buffer()));
}
// Migrate |obj| to |new_map| which should shift fields and put the
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits>
#include <set>
#include "src/globals.h"
#include "src/heap/remembered-set.h"
#include "src/heap/spaces.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
TEST(LocalSlotsBuffer, InsertAndIterate) {
LocalSlotsBuffer buffer;
std::set<Address> untyped;
std::set<std::pair<SlotType, Address> > typed;
for (int k = 1000; k < 10000; k += NUMBER_OF_SLOT_TYPES) {
untyped.insert(reinterpret_cast<Address>(k));
buffer.Record(reinterpret_cast<Address>(k));
for (int i = 0; i < NUMBER_OF_SLOT_TYPES; i++) {
typed.insert(std::make_pair(static_cast<SlotType>(i),
reinterpret_cast<Address>(k + i)));
buffer.Record(static_cast<SlotType>(i), reinterpret_cast<Address>(k + i));
}
}
buffer.Iterate(
[&untyped](Address addr) {
EXPECT_NE(untyped.count(addr), 0);
untyped.erase(addr);
},
[&typed](SlotType type, Address addr) {
EXPECT_NE(typed.count(std::make_pair(type, addr)), 0);
typed.erase(std::make_pair(type, addr));
});
EXPECT_EQ(untyped.size(), 0);
EXPECT_EQ(typed.size(), 0);
}
} // namespace internal
} // namespace v8
......@@ -55,9 +55,9 @@ TEST(SlotSet, Iterate) {
set.Iterate([](Address slot_address) {
uintptr_t intaddr = reinterpret_cast<uintptr_t>(slot_address);
if (intaddr % 3 == 0) {
return KEEP_SLOT;
return SlotSet::KEEP_SLOT;
} else {
return REMOVE_SLOT;
return SlotSet::REMOVE_SLOT;
}
});
......@@ -139,33 +139,5 @@ TEST(SlotSet, RemoveRange) {
}
}
TEST(TypedSlotSet, Iterate) {
TypedSlotSet set(0);
const int kDelta = 10000001;
int added = 0;
for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset; i += kDelta) {
SlotType type = static_cast<SlotType>(i % NUMBER_OF_SLOT_TYPES);
set.Insert(type, i);
++added;
}
int iterated = 0;
set.Iterate([&iterated, kDelta](SlotType type, Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
EXPECT_EQ(i % NUMBER_OF_SLOT_TYPES, static_cast<uint32_t>(type));
EXPECT_EQ(0, i % kDelta);
++iterated;
return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
});
EXPECT_EQ(added, iterated);
iterated = 0;
set.Iterate([&iterated](SlotType type, Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
EXPECT_EQ(0, i % 2);
++iterated;
return KEEP_SLOT;
});
EXPECT_EQ(added / 2, iterated);
}
} // namespace internal
} // namespace v8
......@@ -111,7 +111,6 @@
'heap/heap-unittest.cc',
'heap/scavenge-job-unittest.cc',
'heap/slot-set-unittest.cc',
'heap/remembered-set-unittest.cc',
'locked-queue-unittest.cc',
'run-all-unittests.cc',
'runtime/runtime-interpreter-unittest.cc',
......
......@@ -897,6 +897,8 @@
'../../src/heap/scavenger.cc',
'../../src/heap/scavenger.h',
'../../src/heap/slot-set.h',
'../../src/heap/slots-buffer.cc',
'../../src/heap/slots-buffer.h',
'../../src/heap/spaces-inl.h',
'../../src/heap/spaces.cc',
'../../src/heap/spaces.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment