Commit 4fdc19ae authored by ulan's avatar ulan Committed by Commit bot

Add a generic remembered set class.

This new class provides a unified interface for recording and iterating slots in store and slots buffers:

RememberedSet<OLD_TO_NEW>::Insert(page, slot);
RememberedSet<OLD_TO_OLD>::Insert(page, slot);

RememberedSet<OLD_TO_NEW>::Iterate(heap, callback);
RememberedSet<OLD_TO_OLD>::Iterate(heap, callback);

After this change the store buffer is responsible only for collecting slots from the generated code.

Subsequent CLs will remove the slots buffer.

BUG=chromium:578883
LOG=NO

Review URL: https://codereview.chromium.org/1683653002

Cr-Commit-Position: refs/heads/master@{#34031}
parent 3aa2dd34
......@@ -1067,6 +1067,8 @@ source_set("v8_base") {
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
"src/heap/remembered-set.cc",
"src/heap/remembered-set.h",
"src/heap/scavenge-job.h",
"src/heap/scavenge-job.cc",
"src/heap/scavenger-inl.h",
......
......@@ -395,7 +395,9 @@ void Heap::RecordWrite(Object* object, int offset, Object* o) {
if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
return;
}
store_buffer_.Mark(HeapObject::cast(object)->address() + offset);
Page* page = Page::FromAddress(reinterpret_cast<Address>(object));
Address slot = HeapObject::cast(object)->address() + offset;
RememberedSet<OLD_TO_NEW>::Insert(page, slot);
}
......
......@@ -27,6 +27,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/remembered-set.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
......@@ -462,6 +463,7 @@ void Heap::GarbageCollectionPrologue() {
}
CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
store_buffer()->MoveEntriesToRememberedSet();
}
......@@ -1678,7 +1680,8 @@ void Heap::Scavenge() {
// Copy objects reachable from the old generation.
GCTracer::Scope gc_scope(tracer(),
GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject);
RememberedSet<OLD_TO_NEW>::IterateWithWrapper(this,
Scavenger::ScavengeObject);
}
{
......@@ -4460,8 +4463,6 @@ void Heap::Verify() {
CHECK(HasBeenSetUp());
HandleScope scope(isolate());
store_buffer()->Verify();
if (mark_compact_collector()->sweeping_in_progress()) {
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
......@@ -4509,14 +4510,11 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
Address end, bool record_slots,
ObjectSlotCallback callback) {
Address slot_address = start;
Page* page = Page::FromAddress(start);
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* target = *slot;
// If the store buffer becomes overfull we mark pages as being exempt from
// the store buffer. These pages are scanned to find pointers that point
// to the new space. In that case we may hit newly promoted objects and
// fix the pointers before the promotion queue gets to them. Thus the 'if'.
if (target->IsHeapObject()) {
if (Heap::InFromSpace(target)) {
callback(reinterpret_cast<HeapObject**>(slot),
......@@ -4525,7 +4523,7 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
if (InNewSpace(new_target)) {
SLOW_DCHECK(Heap::InToSpace(new_target));
SLOW_DCHECK(new_target->IsHeapObject());
store_buffer_.Mark(reinterpret_cast<Address>(slot));
RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
} else if (record_slots &&
......@@ -5527,7 +5525,11 @@ void Heap::CheckHandleCount() {
void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
if (!InNewSpace(object)) {
store_buffer()->Remove(reinterpret_cast<Address>(slot));
store_buffer()->MoveEntriesToRememberedSet();
Address slot_addr = reinterpret_cast<Address>(slot);
Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
}
}
......
......@@ -291,6 +291,7 @@ class Scavenger;
class ScavengeJob;
class WeakObjectRetainer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
......
......@@ -315,7 +315,7 @@ void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
heap_->store_buffer()->ClearInvalidStoreBufferEntries();
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
}
{
......@@ -344,7 +344,7 @@ static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
heap()->store_buffer()->VerifyValidStoreBufferEntries();
RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
......@@ -2550,7 +2550,8 @@ void MarkCompactCollector::RecordMigratedSlot(
if (compaction_in_progress_) {
local_store_buffer->Record(slot);
} else {
heap_->store_buffer()->Mark(slot);
Page* page = Page::FromAddress(slot);
RememberedSet<OLD_TO_NEW>::Insert(page, slot);
}
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
......@@ -3089,7 +3090,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
newspace_evacuation_candidates_(newspace_evacuation_candidates),
compaction_spaces_(collector->heap()),
local_slots_buffer_(nullptr),
local_store_buffer_(),
local_store_buffer_(collector->heap()),
local_pretenuring_feedback_(HashMap::PointersMatch,
kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
......@@ -3738,7 +3739,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer);
}
{
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/remembered-set.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
namespace v8 {
namespace internal {
template <PointerDirection direction>
void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
STATIC_ASSERT(direction == OLD_TO_NEW);
PageIterator it(heap->old_space());
MemoryChunk* chunk;
while (it.has_next()) {
chunk = it.next();
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate([heap](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
return IsValidSlot(heap, slot) ? SlotSet::KEEP_SLOT
: SlotSet::REMOVE_SLOT;
});
}
}
}
template <PointerDirection direction>
void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Iterate(heap, [heap](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
Object* object = *slot;
if (Page::FromAddress(addr)->owner() != nullptr &&
Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
CHECK(IsValidSlot(heap, slot));
heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
reinterpret_cast<Address>(slot), HeapObject::cast(object));
}
return SlotSet::KEEP_SLOT;
});
}
template <PointerDirection direction>
bool RememberedSet<direction>::IsValidSlot(Heap* heap, Object** slot) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Object* object = *slot;
if (!heap->InNewSpace(object)) {
return false;
}
HeapObject* heap_object = HeapObject::cast(object);
// If the target object is not black, the source slot must be part
// of a non-black (dead) object.
return Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
heap->mark_compact_collector()->IsSlotInLiveObject(
reinterpret_cast<Address>(slot));
}
template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
} // namespace internal
} // namespace v8
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_REMEMBERED_SET_H
#define V8_REMEMBERED_SET_H
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
enum PointerDirection { OLD_TO_OLD, OLD_TO_NEW };
template <PointerDirection direction>
class RememberedSet {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
static void Insert(Page* page, Address slot_addr) {
DCHECK(page->Contains(slot_addr));
SlotSet* slot_set = GetSlotSet(page);
if (slot_set == nullptr) {
slot_set = AllocateSlotSet(page);
}
uintptr_t offset = slot_addr - page->address();
slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
}
// Given a page and a slot in that page, this function removes the slot from
// the remembered set.
// If the slot was never added, then the function does nothing.
static void Remove(Page* page, Address slot_addr) {
DCHECK(page->Contains(slot_addr));
SlotSet* slot_set = GetSlotSet(page);
if (slot_set != nullptr) {
uintptr_t offset = slot_addr - page->address();
slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
}
}
// Iterates and filters the remembered set with the given callback.
// The callback should take (Address slot) and return SlotSet::CallbackResult.
template <typename Callback>
static void Iterate(Heap* heap, Callback callback) {
PointerChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
int new_count = 0;
for (size_t page = 0; page < pages; page++) {
new_count += slots[page].Iterate(callback);
}
if (new_count == 0) {
ReleaseSlotSet(chunk);
}
}
}
}
// Iterates and filters the remembered set with the given callback.
// The callback should take (HeapObject** slot, HeapObject* target) and
// update the slot.
// A special wrapper takes care of filtering the slots based on their values.
// For OLD_TO_NEW case: slots that do not point to the ToSpace after
// callback invocation will be removed from the set.
template <typename Callback>
static void IterateWithWrapper(Heap* heap, Callback callback) {
Iterate(heap, [heap, callback](Address addr) {
return Wrapper(heap, addr, callback);
});
}
// Eliminates all stale slots from the remembered set, i.e.
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
// must be called before sweeping when mark bits are still intact.
static void ClearInvalidSlots(Heap* heap);
static void VerifyValidSlots(Heap* heap);
private:
static SlotSet* GetSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
return chunk->old_to_old_slots();
} else {
return chunk->old_to_new_slots();
}
}
static void ReleaseSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
chunk->ReleaseOldToOldSlots();
} else {
chunk->ReleaseOldToNewSlots();
}
}
static SlotSet* AllocateSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
chunk->AllocateOldToOldSlots();
return chunk->old_to_old_slots();
} else {
chunk->AllocateOldToNewSlots();
return chunk->old_to_new_slots();
}
}
template <typename Callback>
static SlotSet::CallbackResult Wrapper(Heap* heap, Address slot_address,
Callback slot_callback) {
STATIC_ASSERT(direction == OLD_TO_NEW);
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* object = *slot;
if (heap->InFromSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
DCHECK(heap_object->IsHeapObject());
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
object = *slot;
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
if (heap->InToSpace(object)) {
return SlotSet::KEEP_SLOT;
}
} else {
DCHECK(!heap->InNewSpace(object));
}
return SlotSet::REMOVE_SLOT;
}
static bool IsValidSlot(Heap* heap, Object** slot);
};
} // namespace internal
} // namespace v8
#endif // V8_REMEMBERED_SET_H
......@@ -111,6 +111,7 @@ class SlotSet : public Malloced {
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
// Returns the new number of slots.
//
// Sample usage:
// Iterate([](Address slot_address) {
......@@ -118,10 +119,11 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT;
// });
template <typename Callback>
void Iterate(Callback callback) {
int Iterate(Callback callback) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
if (bucket[bucket_index] != nullptr) {
bool bucket_is_empty = true;
int in_bucket_count = 0;
uint32_t* current_bucket = bucket[bucket_index];
int cell_offset = bucket_index * kBitsPerBucket;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
......@@ -134,7 +136,7 @@ class SlotSet : public Malloced {
uint32_t bit_mask = 1u << bit_offset;
uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
if (callback(page_start_ + slot) == KEEP_SLOT) {
bucket_is_empty = false;
++in_bucket_count;
} else {
new_cell ^= bit_mask;
}
......@@ -145,11 +147,13 @@ class SlotSet : public Malloced {
}
}
}
if (bucket_is_empty) {
if (in_bucket_count == 0) {
ReleaseBucket(bucket_index);
}
new_count += in_bucket_count;
}
}
return new_count;
}
private:
......
......@@ -296,22 +296,16 @@ bool PagedSpace::Contains(Object* o) {
}
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
OffsetFrom(addr) & ~Page::kPageAlignmentMask);
if (maybe->owner() != NULL) return maybe;
LargeObjectIterator iterator(heap->lo_space());
for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
// Fixed arrays are the only pointer-containing objects in large object
// space.
if (o->IsFixedArray()) {
MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
if (chunk->Contains(addr)) {
return chunk;
}
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
uintptr_t offset = addr - chunk->address();
if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
chunk = heap->lo_space()->FindPage(addr);
}
}
UNREACHABLE();
return NULL;
return chunk;
}
Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
}
......
......@@ -480,6 +480,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->InitializeReservedMemory();
chunk->slots_buffer_ = nullptr;
chunk->old_to_new_slots_ = nullptr;
chunk->old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
......@@ -944,17 +945,22 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete mutex_;
mutex_ = nullptr;
ReleaseOldToNewSlots();
ReleaseOldToOldSlots();
}
void MemoryChunk::AllocateOldToNewSlots() {
size_t pages = (size_ + Page::kPageSize - 1) / Page::kPageSize;
DCHECK(owner() == heap_->lo_space() || pages == 1);
static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK(pages > 0);
DCHECK(nullptr == old_to_new_slots_);
old_to_new_slots_ = new SlotSet[pages];
SlotSet* slot_set = new SlotSet[pages];
for (size_t i = 0; i < pages; i++) {
old_to_new_slots_[i].SetPageStart(address() + i * Page::kPageSize);
slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
}
return slot_set;
}
void MemoryChunk::AllocateOldToNewSlots() {
DCHECK(nullptr == old_to_new_slots_);
old_to_new_slots_ = AllocateSlotSet(size_, address());
}
void MemoryChunk::ReleaseOldToNewSlots() {
......@@ -962,6 +968,15 @@ void MemoryChunk::ReleaseOldToNewSlots() {
old_to_new_slots_ = nullptr;
}
void MemoryChunk::AllocateOldToOldSlots() {
DCHECK(nullptr == old_to_old_slots_);
old_to_old_slots_ = AllocateSlotSet(size_, address());
}
void MemoryChunk::ReleaseOldToOldSlots() {
delete[] old_to_old_slots_;
old_to_old_slots_ = nullptr;
}
// -----------------------------------------------------------------------------
// PagedSpace implementation
......
......@@ -398,6 +398,7 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
+ kPointerSize // SlotSet* old_to_new_slots_;
+ kPointerSize // SlotSet* old_to_old_slots_;
+ kPointerSize; // SkipList* skip_list_;
static const size_t kMinHeaderSize =
......@@ -437,7 +438,6 @@ class MemoryChunk {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
// Only works for addresses in pointer spaces, not data or code spaces.
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
static inline void UpdateHighWaterMark(Address mark) {
......@@ -514,9 +514,12 @@ class MemoryChunk {
inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
void AllocateOldToOldSlots();
void ReleaseOldToOldSlots();
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
......@@ -640,6 +643,8 @@ class MemoryChunk {
kPageHeaderTag);
}
bool HasPageHeader() { return owner() != nullptr; }
void InsertAfter(MemoryChunk* other);
void Unlink();
......@@ -684,6 +689,7 @@ class MemoryChunk {
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* old_to_new_slots_;
SlotSet* old_to_old_slots_;
SkipList* skip_list_;
......@@ -744,6 +750,9 @@ class Page : public MemoryChunk {
return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
}
// Only works for addresses in pointer spaces, not code space.
inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
// Returns the page containing an allocation top. Because an allocation
// top address can be the upper bound of the page, we need to subtract
// it with kPointerSize first. The address ranges from
......
......@@ -6,31 +6,13 @@
#define V8_STORE_BUFFER_INL_H_
#include "src/heap/heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
namespace v8 {
namespace internal {
uint32_t StoreBuffer::AddressToSlotSetAndOffset(Address addr, SlotSet** slots) {
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
uintptr_t offset = addr - chunk->address();
if (offset < MemoryChunk::kHeaderSize || chunk->owner() == nullptr) {
chunk = heap_->lo_space()->FindPage(addr);
offset = addr - chunk->address();
}
if (chunk->old_to_new_slots() == nullptr) {
chunk->AllocateOldToNewSlots();
}
if (offset < Page::kPageSize) {
*slots = chunk->old_to_new_slots();
} else {
*slots = &chunk->old_to_new_slots()[offset / Page::kPageSize];
offset = offset % Page::kPageSize;
}
return static_cast<uint32_t>(offset);
}
void LocalStoreBuffer::Record(Address addr) {
if (top_->is_full()) top_ = new Node(top_);
top_->buffer[top_->count++] = addr;
......@@ -40,19 +22,14 @@ void LocalStoreBuffer::Process(StoreBuffer* store_buffer) {
Node* current = top_;
while (current != nullptr) {
for (int i = 0; i < current->count; i++) {
store_buffer->Mark(current->buffer[i]);
Address slot = current->buffer[i];
Page* page = Page::FromAnyPointerAddress(heap_, slot);
RememberedSet<OLD_TO_NEW>::Insert(page, slot);
}
current = current->next;
}
}
void StoreBuffer::Mark(Address addr) {
SlotSet* slots;
uint32_t offset;
offset = AddressToSlotSetAndOffset(addr, &slots);
slots->Insert(offset);
}
} // namespace internal
} // namespace v8
......
......@@ -59,169 +59,21 @@ void StoreBuffer::TearDown() {
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->InsertEntriesFromBuffer();
isolate->heap()->store_buffer()->MoveEntriesToRememberedSet();
isolate->counters()->store_buffer_overflows()->Increment();
}
void StoreBuffer::Remove(Address addr) {
InsertEntriesFromBuffer();
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
DCHECK_EQ(chunk->owner()->identity(), OLD_SPACE);
uintptr_t offset = addr - chunk->address();
DCHECK_LT(offset, static_cast<uintptr_t>(Page::kPageSize));
if (chunk->old_to_new_slots() == nullptr) return;
chunk->old_to_new_slots()->Remove(static_cast<uint32_t>(offset));
}
#ifdef VERIFY_HEAP
void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
LargeObjectIterator it(space);
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
if (object->IsFixedArray()) {
Address slot_address = object->address();
Address end = object->address() + object->Size();
while (slot_address < end) {
HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
// When we are not in GC the Heap::InNewSpace() predicate
// checks that pointers which satisfy predicate point into
// the active semispace.
Object* object = *slot;
heap_->InNewSpace(object);
slot_address += kPointerSize;
}
}
}
}
#endif
void StoreBuffer::Verify() {
#ifdef VERIFY_HEAP
VerifyPointers(heap_->lo_space());
#endif
}
void StoreBuffer::InsertEntriesFromBuffer() {
void StoreBuffer::MoveEntriesToRememberedSet() {
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
if (top == start_) return;
// There's no check of the limit in the loop below so we check here for
// the worst case (compaction doesn't eliminate any pointers).
DCHECK(top <= limit_);
heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
Page* last_page = nullptr;
SlotSet* last_slot_set = nullptr;
for (Address* current = start_; current < top; current++) {
DCHECK(!heap_->code_space()->Contains(*current));
Address addr = *current;
Page* page = Page::FromAddress(addr);
SlotSet* slot_set;
uint32_t offset;
if (page == last_page) {
slot_set = last_slot_set;
offset = static_cast<uint32_t>(addr - page->address());
} else {
offset = AddressToSlotSetAndOffset(addr, &slot_set);
last_page = page;
last_slot_set = slot_set;
}
slot_set->Insert(offset);
}
}
static SlotSet::CallbackResult ProcessOldToNewSlot(
Heap* heap, Address slot_address, ObjectSlotCallback slot_callback) {
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* object = *slot;
if (heap->InFromSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
DCHECK(heap_object->IsHeapObject());
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
object = *slot;
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
if (heap->InToSpace(object)) {
return SlotSet::KEEP_SLOT;
}
} else {
DCHECK(!heap->InNewSpace(object));
}
return SlotSet::REMOVE_SLOT;
}
void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
Heap* heap = heap_;
Iterate([heap, slot_callback](Address addr) {
return ProcessOldToNewSlot(heap, addr, slot_callback);
});
}
template <typename Callback>
void StoreBuffer::Iterate(Callback callback) {
InsertEntriesFromBuffer();
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
if (chunk->old_to_new_slots() != nullptr) {
SlotSet* slots = chunk->old_to_new_slots();
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
slots[page].Iterate(callback);
}
}
}
}
void StoreBuffer::ClearInvalidStoreBufferEntries() {
InsertEntriesFromBuffer();
Heap* heap = heap_;
PageIterator it(heap->old_space());
MemoryChunk* chunk;
while (it.has_next()) {
chunk = it.next();
if (chunk->old_to_new_slots() != nullptr) {
SlotSet* slots = chunk->old_to_new_slots();
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
if (pages > 1) {
// Large pages were processed above.
continue;
}
slots->Iterate([heap](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
Object* object = *slot;
if (heap->InNewSpace(object)) {
DCHECK(object->IsHeapObject());
// If the target object is not black, the source slot must be part
// of a non-black (dead) object.
HeapObject* heap_object = HeapObject::cast(object);
bool live = Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
heap->mark_compact_collector()->IsSlotInLiveObject(addr);
return live ? SlotSet::KEEP_SLOT : SlotSet::REMOVE_SLOT;
}
return SlotSet::REMOVE_SLOT;
});
}
}
}
void StoreBuffer::VerifyValidStoreBufferEntries() {
Heap* heap = heap_;
Iterate([heap](Address addr) {
Object** slot = reinterpret_cast<Object**>(addr);
Object* object = *slot;
if (Page::FromAddress(addr)->owner() != nullptr &&
Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
CHECK(object->IsHeapObject());
CHECK(heap->InNewSpace(object));
heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
reinterpret_cast<Address>(slot), HeapObject::cast(object));
Page* page = Page::FromAnyPointerAddress(heap_, addr);
RememberedSet<OLD_TO_NEW>::Insert(page, addr);
}
return SlotSet::KEEP_SLOT;
});
}
} // namespace internal
......
......@@ -14,14 +14,8 @@
namespace v8 {
namespace internal {
class Page;
class PagedSpace;
class StoreBuffer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
// Used to implement the write barrier by collecting addresses of pointers
// between spaces.
// Intermediate buffer that accumulates old-to-new stores from the generated
// code. On buffer overflow the slots are moved to the remembered set.
class StoreBuffer {
public:
explicit StoreBuffer(Heap* heap);
......@@ -33,25 +27,7 @@ class StoreBuffer {
static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
// This is used to add addresses to the store buffer non-concurrently.
inline void Mark(Address addr);
// Removes the given slot from the store buffer non-concurrently. If the
// slot was never added to the store buffer, then the function does nothing.
void Remove(Address addr);
// Slots that do not point to the ToSpace after callback invocation will be
// removed from the set.
void IteratePointersToNewSpace(ObjectSlotCallback callback);
void Verify();
// Eliminates all stale store buffer entries from the store buffer, i.e.,
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
// must be called before sweeping when mark bits are still intact.
void ClearInvalidStoreBufferEntries();
void VerifyValidStoreBufferEntries();
void MoveEntriesToRememberedSet();
private:
Heap* heap_;
......@@ -62,27 +38,13 @@ class StoreBuffer {
Address* limit_;
base::VirtualMemory* virtual_memory_;
// Used for synchronization of concurrent store buffer access.
base::Mutex mutex_;
void InsertEntriesFromBuffer();
inline uint32_t AddressToSlotSetAndOffset(Address slot_address,
SlotSet** slots);
template <typename Callback>
void Iterate(Callback callback);
#ifdef VERIFY_HEAP
void VerifyPointers(LargeObjectSpace* space);
#endif
};
class LocalStoreBuffer BASE_EMBEDDED {
public:
LocalStoreBuffer() : top_(new Node(nullptr)) {}
explicit LocalStoreBuffer(Heap* heap)
: top_(new Node(nullptr)), heap_(heap) {}
~LocalStoreBuffer() {
Node* current = top_;
......@@ -110,6 +72,7 @@ class LocalStoreBuffer BASE_EMBEDDED {
};
Node* top_;
Heap* heap_;
};
} // namespace internal
......
......@@ -891,6 +891,8 @@
'../../src/heap/objects-visiting-inl.h',
'../../src/heap/objects-visiting.cc',
'../../src/heap/objects-visiting.h',
'../../src/heap/remembered-set.cc',
'../../src/heap/remembered-set.h',
'../../src/heap/scavenge-job.h',
'../../src/heap/scavenge-job.cc',
'../../src/heap/scavenger-inl.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment