Commit 7a5a777c authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Add mechanism for tracking invalidated slots per memory chunk.

For correct slots recording in concurrent marker, we need to resolve
the race that happens when
1) the mutator is invalidating slots for double unboxing or string
conversions
2) and the concurrent marker is recording these slots.

This patch adds a data-structure for tracking the invalidated objects.
Thus we can allow the concurrent marker to record slots without
worrying about clearing them. During old-to-old pointer updating phase
we re-check all slots that belong to the invalidated objects.

BUG=chromium:694255

Change-Id: Ifc3d82918cd3b96e5a5fb7125691626a56f4ab83
Reviewed-on: https://chromium-review.googlesource.com/591810Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47049}
parent 896afdde
......@@ -1581,6 +1581,9 @@ v8_source_set("v8_base") {
"src/heap/incremental-marking-job.h",
"src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
"src/heap/invalidated-slots-inl.h",
"src/heap/invalidated-slots.cc",
"src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.h",
"src/heap/local-allocator.h",
"src/heap/mark-compact-inl.h",
......
......@@ -4605,10 +4605,17 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
void Heap::NotifyObjectLayoutChange(HeapObject* object,
void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
const DisallowHeapAllocation&) {
DCHECK(InOldSpace(object) || InNewSpace(object));
if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object);
if (InOldSpace(object)) {
// The concurrent marker might have recorded slots for the object.
// Register this object as invalidated to filter out the slots.
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
chunk->RegisterObjectWithInvalidatedSlots(object, size);
}
}
#ifdef VERIFY_HEAP
DCHECK(pending_layout_change_object_ == nullptr);
......
......@@ -1186,7 +1186,8 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
void NotifyObjectLayoutChange(HeapObject* object,
// The old size is the size of the object before layout change.
void NotifyObjectLayoutChange(HeapObject* object, int old_size,
const DisallowHeapAllocation&);
#ifdef VERIFY_HEAP
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INVALIDATED_SLOTS_INL_H
#define V8_INVALIDATED_SLOTS_INL_H
#include <map>
#include "src/allocation.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
#include "src/objects-body-descriptors-inl.h"
#include "src/objects-body-descriptors.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
bool InvalidatedSlotsFilter::IsValid(Address slot) {
#ifdef DEBUG
DCHECK_LT(slot, sentinel_);
// Slots must come in non-decreasing order.
DCHECK_LE(last_slot_, slot);
last_slot_ = slot;
#endif
while (slot >= invalidated_end_) {
++iterator_;
if (iterator_ != iterator_end_) {
// Invalidated ranges must not overlap.
DCHECK_LE(invalidated_end_, iterator_->first->address());
invalidated_start_ = iterator_->first->address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}
// Now the invalidated region ends after the slot.
if (slot < invalidated_start_) {
// The invalidated region starts after the slot.
return true;
}
// The invalidated region includes the slot.
// Ask the object if the slot is valid.
if (invalidated_object_ == nullptr) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
invalidated_object_size_ =
invalidated_object_->SizeFromMap(invalidated_object_->map());
}
int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0);
return offset < invalidated_object_size_ &&
invalidated_object_->IsValidSlot(offset);
}
} // namespace internal
} // namespace v8
#endif // V8_INVALIDATED_SLOTS_INL_H
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
chunk->owner()->identity() == OLD_SPACE);
InvalidatedSlots* invalidated_slots =
chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first->address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
// These values will be lazily set when needed.
invalidated_object_ = nullptr;
invalidated_object_size_ = 0;
#ifdef DEBUG
last_slot_ = chunk->area_start();
#endif
}
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INVALIDATED_SLOTS_H
#define V8_INVALIDATED_SLOTS_H
#include <map>
#include <stack>
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
class HeapObject;
// This data structure stores objects that went through object layout change
// that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout
// change.
using InvalidatedSlots = std::map<HeapObject*, int>;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
// The sequence of queried slot must be non-decreasing. This allows fast
// implementation with complexity O(m*log(m) + n), where
// m is the number of invalidated objects in the memory chunk.
// n is the number of IsValid queries.
class InvalidatedSlotsFilter {
public:
explicit InvalidatedSlotsFilter(MemoryChunk* chunk);
inline bool IsValid(Address slot);
private:
InvalidatedSlots::const_iterator iterator_;
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
Address invalidated_start_;
Address invalidated_end_;
HeapObject* invalidated_object_;
int invalidated_object_size_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_INVALIDATED_SLOTS_H
......@@ -21,6 +21,8 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/local-allocator.h"
#include "src/heap/mark-compact-inl.h"
......@@ -3264,6 +3266,14 @@ void MarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
#ifdef DEBUG
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL(p->invalidated_slots());
}
#endif
}
class Evacuator : public Malloced {
......@@ -4131,13 +4141,21 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[](Address slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(
reinterpret_cast<Object**>(slot));
},
SlotSet::PREFREE_EMPTY_BUCKETS);
InvalidatedSlotsFilter filter(chunk_);
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[&filter](Address slot) {
if (!filter.IsValid(slot)) return REMOVE_SLOT;
return UpdateSlot<AccessMode::NON_ATOMIC>(
reinterpret_cast<Object**>(slot));
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots() != nullptr) {
// The invalidated slots are not needed after old-to-old slots were
// processsed.
chunk_->ReleaseInvalidatedSlots();
}
}
......
......@@ -120,7 +120,8 @@ class RememberedSet : public AllStatic {
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
if (slots != nullptr || typed_slots != nullptr) {
if (slots != nullptr || typed_slots != nullptr ||
chunk->invalidated_slots() != nullptr) {
callback(chunk);
}
}
......@@ -230,6 +231,7 @@ class RememberedSet : public AllStatic {
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
chunk->ReleaseInvalidatedSlots();
}
}
......
......@@ -549,6 +549,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
nullptr);
base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
chunk->invalidated_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
......@@ -1216,6 +1217,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
}
......@@ -1286,6 +1288,28 @@ void MemoryChunk::ReleaseTypedSlotSet() {
}
}
InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
DCHECK_NULL(invalidated_slots_);
invalidated_slots_ = new InvalidatedSlots();
return invalidated_slots_;
}
void MemoryChunk::ReleaseInvalidatedSlots() {
if (invalidated_slots_) {
delete invalidated_slots_;
invalidated_slots_ = nullptr;
}
}
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
int size) {
if (invalidated_slots() == nullptr) {
AllocateInvalidatedSlots();
}
int old_size = (*invalidated_slots())[object];
(*invalidated_slots())[object] = std::max(old_size, size);
}
void MemoryChunk::AllocateLocalTracker() {
DCHECK_NULL(local_tracker_);
local_tracker_ = new LocalArrayBufferTracker(heap());
......
......@@ -6,6 +6,7 @@
#define V8_HEAP_SPACES_H_
#include <list>
#include <map>
#include <memory>
#include <unordered_set>
......@@ -19,6 +20,7 @@
#include "src/flags.h"
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
#include "src/list.h"
#include "src/objects.h"
......@@ -354,7 +356,8 @@ class MemoryChunk {
+ kIntptrSize // intptr_t live_byte_count_
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kPointerSize // SkipList* skip_list_
+ kPointerSize // InvalidatedSlots* invalidated_slots_
+ kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::RecursiveMutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_
......@@ -472,6 +475,11 @@ class MemoryChunk {
template <RememberedSetType type>
void ReleaseTypedSlotSet();
InvalidatedSlots* AllocateInvalidatedSlots();
void ReleaseInvalidatedSlots();
void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
void AllocateLocalTracker();
void ReleaseLocalTracker();
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
......@@ -631,6 +639,7 @@ class MemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_;
SkipList* skip_list_;
......
......@@ -2598,7 +2598,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect();
if (has_pointers) {
heap->NotifyObjectLayoutChange(this, no_allocation);
heap->NotifyObjectLayoutChange(this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
......@@ -2674,7 +2674,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool has_pointers = StringShape(this).IsIndirect();
if (has_pointers) {
heap->NotifyObjectLayoutChange(this, no_allocation);
heap->NotifyObjectLayoutChange(this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
......@@ -3980,7 +3980,9 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Heap* heap = isolate->heap();
heap->NotifyObjectLayoutChange(*object, no_allocation);
int old_instance_size = old_map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
......@@ -4014,7 +4016,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// Create filler object past the new instance size.
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_map->instance_size() - new_instance_size;
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) {
......@@ -4096,11 +4098,12 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap();
heap->NotifyObjectLayoutChange(*object, no_allocation);
int old_instance_size = map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
int instance_size_delta = map->instance_size() - new_instance_size;
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) {
......@@ -17071,11 +17074,11 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
if (!string->IsInternalizedString()) {
DisallowHeapAllocation no_gc;
isolate->heap()->NotifyObjectLayoutChange(string, no_gc);
int old_size = string->Size();
isolate->heap()->NotifyObjectLayoutChange(string, old_size, no_gc);
bool one_byte = internalized->IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map();
int old_size = string->Size();
DCHECK(old_size >= ThinString::kSize);
string->synchronized_set_map(*map);
ThinString* thin = ThinString::cast(string);
......
......@@ -160,7 +160,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Zap the property to avoid keeping objects alive. Zapping is not necessary
// for properties stored in the descriptor array.
if (details.location() == kField) {
isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation);
isolate->heap()->NotifyObjectLayoutChange(*receiver, map->instance_size(),
no_allocation);
Object* filler = isolate->heap()->one_pointer_filler_map();
FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index());
JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);
......
......@@ -1027,6 +1027,9 @@
'heap/incremental-marking-job.h',
'heap/incremental-marking.cc',
'heap/incremental-marking.h',
'heap/invalidated-slots-inl.h',
'heap/invalidated-slots.cc',
'heap/invalidated-slots.h',
'heap/item-parallel-job.h',
'heap/local-allocator.h',
'heap/mark-compact-inl.h',
......
......@@ -78,6 +78,7 @@ v8_executable("cctest") {
"heap/test-concurrent-marking.cc",
"heap/test-heap.cc",
"heap/test-incremental-marking.cc",
"heap/test-invalidated-slots.cc",
"heap/test-lab.cc",
"heap/test-mark-compact.cc",
"heap/test-page-promotion.cc",
......
......@@ -96,6 +96,7 @@
'heap/test-concurrent-marking.cc',
'heap/test-heap.cc',
'heap/test-incremental-marking.cc',
'heap/test-invalidated-slots.cc',
'heap/test-lab.cc',
'heap/test-mark-compact.cc',
'heap/test-page-promotion.cc',
......
......@@ -16,6 +16,7 @@
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
V(InvalidatedSlots) \
V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \
V(MarkCompactCollector) \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdlib.h>
#include "src/v8.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
HEAP_TEST(InvalidatedSlots) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
PagedSpace* old_space = heap->old_space();
Page* page;
std::vector<ByteArray*> byte_arrays;
const int kLength = 256 - ByteArray::kHeaderSize;
const int kSize = ByteArray::SizeFor(kLength);
CHECK_EQ(kSize, 256);
// Fill a page with byte arrays.
{
AlwaysAllocateScope always_allocate(isolate);
heap::SimulateFullSpace(old_space);
ByteArray* byte_array;
CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array));
byte_arrays.push_back(byte_array);
page = Page::FromAddress(byte_array->address());
CHECK_EQ(page->area_size() % kSize, 0u);
size_t n = page->area_size() / kSize;
for (size_t i = 1; i < n; i++) {
CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array));
byte_arrays.push_back(byte_array);
CHECK_EQ(page, Page::FromAddress(byte_array->address()));
}
}
CHECK_NULL(page->invalidated_slots());
{
// Without invalidated slots on the page, the filter considers
// all slots as valid.
InvalidatedSlotsFilter filter(page);
for (auto byte_array : byte_arrays) {
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + kSize;
for (Address addr = start; addr < end; addr += kPointerSize) {
CHECK(filter.IsValid(addr));
}
}
}
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], kSize);
}
{
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + kSize;
for (Address addr = start; addr < end; addr += kPointerSize) {
if (i % 2 == 0) {
CHECK(!filter.IsValid(addr));
} else {
CHECK(filter.IsValid(addr));
}
}
}
}
// Register the remaining byte arrays as invalidated.
for (size_t i = 1; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], kSize);
}
{
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + kSize;
for (Address addr = start; addr < end; addr += kPointerSize) {
CHECK(!filter.IsValid(addr));
}
}
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment