Commit 0400fc20 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Optimize marking of descriptor arrays.

Now a descriptor array tracks the number of descriptors that were
already marked. The marking visitor of a map only marks the subset
of the descriptors that it needs and that are not already marked.

If a descriptor array is shared between M maps and has N descriptos,
then the number of marking operations is reduced from O(M*N) to O(N).

This patch also adds a marking barrier for descriptors.

The marked descriptor counter in a descriptor array is not cleared
after mark-compact GC. Instead, it embeds two bits from the global
mark-compact epoch counter and is considered 0 if the bits do not match
the current value of the global epoch counter.

Bug: v8:8486
Change-Id: I2a7822a6833f3143e1d351e5e4819c2ef2c07fb0
Reviewed-on: https://chromium-review.googlesource.com/c/1382746
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58372}
parent 27cfcf56
...@@ -81,7 +81,7 @@ class ConcurrentMarkingVisitor final ...@@ -81,7 +81,7 @@ class ConcurrentMarkingVisitor final
ConcurrentMarking::MarkingWorklist* bailout, ConcurrentMarking::MarkingWorklist* bailout,
MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects, MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id, ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled) bool embedder_tracing_enabled, unsigned mark_compact_epoch)
: shared_(shared, task_id), : shared_(shared, task_id),
bailout_(bailout, task_id), bailout_(bailout, task_id),
weak_objects_(weak_objects), weak_objects_(weak_objects),
...@@ -89,7 +89,8 @@ class ConcurrentMarkingVisitor final ...@@ -89,7 +89,8 @@ class ConcurrentMarkingVisitor final
marking_state_(memory_chunk_data), marking_state_(memory_chunk_data),
memory_chunk_data_(memory_chunk_data), memory_chunk_data_(memory_chunk_data),
task_id_(task_id), task_id_(task_id),
embedder_tracing_enabled_(embedder_tracing_enabled) {} embedder_tracing_enabled_(embedder_tracing_enabled),
mark_compact_epoch_(mark_compact_epoch) {}
template <typename T, typename = typename std::enable_if< template <typename T, typename = typename std::enable_if<
std::is_base_of<Object, T>::value>::type> std::is_base_of<Object, T>::value>::type>
...@@ -380,6 +381,29 @@ class ConcurrentMarkingVisitor final ...@@ -380,6 +381,29 @@ class ConcurrentMarkingVisitor final
return 0; return 0;
} }
void VisitDescriptors(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
mark_compact_epoch_, new_marked);
if (old_marked < new_marked) {
VisitPointers(
descriptor_array,
MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
}
}
int VisitDescriptorArray(Map map, DescriptorArray array) {
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array->map_slot());
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
VisitPointers(array, array->GetFirstPointerSlot(),
array->GetDescriptorSlot(0));
VisitDescriptors(array, array->number_of_descriptors());
return size;
}
int VisitTransitionArray(Map map, TransitionArray array) { int VisitTransitionArray(Map map, TransitionArray array) {
if (!ShouldVisit(array)) return 0; if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array->map_slot()); VisitMapPointer(array, array->map_slot());
...@@ -593,6 +617,7 @@ class ConcurrentMarkingVisitor final ...@@ -593,6 +617,7 @@ class ConcurrentMarkingVisitor final
int task_id_; int task_id_;
SlotSnapshot slot_snapshot_; SlotSnapshot slot_snapshot_;
bool embedder_tracing_enabled_; bool embedder_tracing_enabled_;
const unsigned mark_compact_epoch_;
}; };
// Strings can change maps due to conversion to thin string or external strings. // Strings can change maps due to conversion to thin string or external strings.
...@@ -675,7 +700,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) { ...@@ -675,7 +700,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
int kObjectsUntilInterrupCheck = 1000; int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor( ConcurrentMarkingVisitor visitor(
shared_, bailout_, &task_state->memory_chunk_data, weak_objects_, shared_, bailout_, &task_state->memory_chunk_data, weak_objects_,
embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse()); embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse(),
task_state->mark_compact_epoch);
double time_ms; double time_ms;
size_t marked_bytes = 0; size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) { if (FLAG_trace_concurrent_marking) {
...@@ -804,6 +830,8 @@ void ConcurrentMarking::ScheduleTasks() { ...@@ -804,6 +830,8 @@ void ConcurrentMarking::ScheduleTasks() {
"Scheduling concurrent marking task %d\n", i); "Scheduling concurrent marking task %d\n", i);
} }
task_state_[i].preemption_request = false; task_state_[i].preemption_request = false;
task_state_[i].mark_compact_epoch =
heap_->mark_compact_collector()->epoch();
is_pending_[i] = true; is_pending_[i] = true;
++pending_task_count_; ++pending_task_count_;
auto task = auto task =
......
...@@ -106,6 +106,7 @@ class ConcurrentMarking { ...@@ -106,6 +106,7 @@ class ConcurrentMarking {
std::atomic<bool> preemption_request; std::atomic<bool> preemption_request;
MemoryChunkDataMap memory_chunk_data; MemoryChunkDataMap memory_chunk_data;
size_t marked_bytes = 0; size_t marked_bytes = 0;
unsigned mark_compact_epoch;
char cache_line_padding[64]; char cache_line_padding[64];
}; };
class Task; class Task;
......
...@@ -192,6 +192,17 @@ inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo, ...@@ -192,6 +192,17 @@ inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo,
Heap::MarkingBarrierForCodeSlow(host, rinfo, object); Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
} }
inline void MarkingBarrierForDescriptorArray(Heap* heap,
HeapObject* descriptor_array,
int number_of_own_descriptors) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(descriptor_array);
if (!chunk->IsMarking()) return;
Heap::MarkingBarrierForDescriptorArraySlow(heap, descriptor_array,
number_of_own_descriptors);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -63,7 +63,8 @@ void MarkingBarrier(HeapObjectPtr* object, MaybeObjectSlot slot, ...@@ -63,7 +63,8 @@ void MarkingBarrier(HeapObjectPtr* object, MaybeObjectSlot slot,
MaybeObject value); MaybeObject value);
void MarkingBarrierForElements(Heap* heap, HeapObject* object); void MarkingBarrierForElements(Heap* heap, HeapObject* object);
void MarkingBarrierForCode(Code host, RelocInfo* rinfo, HeapObject* object); void MarkingBarrierForCode(Code host, RelocInfo* rinfo, HeapObject* object);
void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject* descriptor_array,
int number_of_own_descriptors);
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -5664,6 +5664,21 @@ void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo, ...@@ -5664,6 +5664,21 @@ void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object); heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
} }
void Heap::MarkingBarrierForDescriptorArraySlow(
Heap* heap, HeapObject* raw_descriptor_array,
int number_of_own_descriptors) {
DCHECK(heap->incremental_marking()->IsMarking());
DescriptorArray descriptor_array =
DescriptorArray::cast(raw_descriptor_array);
int16_t raw_marked = descriptor_array->raw_number_of_marked_descriptors();
if (NumberOfMarkedDescriptors::decode(heap->mark_compact_collector()->epoch(),
raw_marked) <
number_of_own_descriptors) {
heap->incremental_marking()->VisitDescriptors(descriptor_array,
number_of_own_descriptors);
}
}
bool Heap::PageFlagsAreConsistent(HeapObject* object) { bool Heap::PageFlagsAreConsistent(HeapObject* object) {
Heap* heap = Heap::FromWritableHeapObject(object); Heap* heap = Heap::FromWritableHeapObject(object);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object); MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
......
...@@ -355,6 +355,8 @@ class Heap { ...@@ -355,6 +355,8 @@ class Heap {
V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host, V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo, RelocInfo* rinfo,
HeapObject* value); HeapObject* value);
V8_EXPORT_PRIVATE static void MarkingBarrierForDescriptorArraySlow(
Heap* heap, HeapObject* descriptor_array, int number_of_own_descriptors);
V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject* object); V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject* object);
// Notifies the heap that is ok to start marking or other activities that // Notifies the heap that is ok to start marking or other activities that
......
...@@ -800,6 +800,13 @@ void IncrementalMarking::RevisitObject(HeapObject* obj) { ...@@ -800,6 +800,13 @@ void IncrementalMarking::RevisitObject(HeapObject* obj) {
visitor.Visit(map, obj); visitor.Visit(map, obj);
} }
void IncrementalMarking::VisitDescriptors(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
marking_state());
visitor.VisitDescriptors(descriptor_array, number_of_own_descriptors);
}
template <WorklistToProcess worklist_to_process> template <WorklistToProcess worklist_to_process>
intptr_t IncrementalMarking::ProcessMarkingWorklist( intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) { intptr_t bytes_to_process, ForceCompletionAction completion) {
......
...@@ -200,6 +200,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -200,6 +200,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObjectSlot slot, V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObjectSlot slot,
MaybeObject value); MaybeObject value);
void RevisitObject(HeapObject* obj); void RevisitObject(HeapObject* obj);
// Ensures that all descriptors int range [0, number_of_own_descripts)
// are visited.
void VisitDescriptors(DescriptorArray array, int number_of_own_descriptors);
void RecordWriteSlow(HeapObject* obj, HeapObjectSlot slot, Object* value); void RecordWriteSlow(HeapObject* obj, HeapObjectSlot slot, Object* value);
void RecordWriteIntoCode(Code host, RelocInfo* rinfo, HeapObject* value); void RecordWriteIntoCode(Code host, RelocInfo* rinfo, HeapObject* value);
......
...@@ -46,7 +46,8 @@ MarkingVisitor<fixed_array_mode, retaining_path_mode, ...@@ -46,7 +46,8 @@ MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState* marking_state) MarkingState* marking_state)
: heap_(collector->heap()), : heap_(collector->heap()),
collector_(collector), collector_(collector),
marking_state_(marking_state) {} marking_state_(marking_state),
mark_compact_epoch_(collector->epoch()) {}
template <FixedArrayVisitationMode fixed_array_mode, template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState> TraceRetainingPathMode retaining_path_mode, typename MarkingState>
...@@ -59,6 +60,18 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, ...@@ -59,6 +60,18 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
return size; return size;
} }
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitDescriptorArray(Map map,
DescriptorArray array) {
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
VisitPointers(array, array->GetFirstPointerSlot(),
array->GetDescriptorSlot(0));
VisitDescriptors(array, array->number_of_descriptors());
return size;
}
template <FixedArrayVisitationMode fixed_array_mode, template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState> TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>:: int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
...@@ -177,6 +190,8 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, ...@@ -177,6 +190,8 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
// and back pointers in a special way to make these links weak. // and back pointers in a special way to make these links weak.
int size = Map::BodyDescriptor::SizeOf(map, object); int size = Map::BodyDescriptor::SizeOf(map, object);
if (object->CanTransition()) { if (object->CanTransition()) {
// Maps that can transition share their descriptor arrays and require
// special visiting logic to avoid memory leaks.
MarkMapContents(object); MarkMapContents(object);
} else { } else {
Map::BodyDescriptor::IterateBody(map, object, size, this); Map::BodyDescriptor::IterateBody(map, object, size, this);
...@@ -395,21 +410,15 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode, ...@@ -395,21 +410,15 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
// descriptors that belong to this map are marked. The first time a non-empty // descriptors that belong to this map are marked. The first time a non-empty
// descriptor array is marked, its header is also visited. The slot holding // descriptor array is marked, its header is also visited. The slot holding
// the descriptor array will be implicitly recorded when the pointer fields of // the descriptor array will be implicitly recorded when the pointer fields of
// this map are visited. Prototype maps don't keep track of transitions, so // this map are visited.
// just mark the entire descriptor array. DescriptorArray descriptors = map->instance_descriptors();
if (!map->is_prototype_map()) { if (MarkObjectWithoutPush(map, descriptors)) {
DescriptorArray descriptors = map->instance_descriptors(); VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
if (MarkObjectWithoutPush(map, descriptors)) { descriptors->GetDescriptorSlot(0));
VisitPointers(descriptors, descriptors->GetFirstPointerSlot(), }
descriptors->GetDescriptorSlot(0)); int number_of_own_descriptors = map->NumberOfOwnDescriptors();
} if (number_of_own_descriptors) {
int start = 0; VisitDescriptors(descriptors, number_of_own_descriptors);
int end = map->NumberOfOwnDescriptors();
if (start < end) {
VisitPointers(descriptors,
MaybeObjectSlot(descriptors->GetDescriptorSlot(start)),
MaybeObjectSlot(descriptors->GetDescriptorSlot(end)));
}
} }
// Mark the pointer fields of the Map. Since the transitions array has // Mark the pointer fields of the Map. Since the transitions array has
...@@ -419,6 +428,22 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode, ...@@ -419,6 +428,22 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
map->map(), map, Map::BodyDescriptor::SizeOf(map->map(), map), this); map->map(), map, Map::BodyDescriptor::SizeOf(map->map(), map), this);
} }
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitDescriptors(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
mark_compact_epoch_, new_marked);
if (old_marked < new_marked) {
VisitPointers(
descriptor_array,
MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
}
}
void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) { void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
if (marking_state()->WhiteToGrey(obj)) { if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj); marking_worklist()->Push(obj);
......
...@@ -849,6 +849,8 @@ void MarkCompactCollector::VerifyMarking() { ...@@ -849,6 +849,8 @@ void MarkCompactCollector::VerifyMarking() {
void MarkCompactCollector::Finish() { void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
epoch_++;
#ifdef DEBUG #ifdef DEBUG
heap()->VerifyCountersBeforeConcurrentSweeping(); heap()->VerifyCountersBeforeConcurrentSweeping();
#endif #endif
......
...@@ -728,6 +728,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -728,6 +728,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VerifyMarkbitsAreClean(LargeObjectSpace* space); void VerifyMarkbitsAreClean(LargeObjectSpace* space);
#endif #endif
unsigned epoch() const { return epoch_; }
private: private:
explicit MarkCompactCollector(Heap* heap); explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override; ~MarkCompactCollector() override;
...@@ -923,6 +925,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -923,6 +925,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MarkingState marking_state_; MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_; NonAtomicMarkingState non_atomic_marking_state_;
// Counts the number of mark-compact collections. This is used for marking
// descriptor arrays. See NumberOfMarkedDescriptors. Only lower two bits are
// used, so it is okay if this counter overflows and wraps around.
unsigned epoch_ = 0;
friend class EphemeronHashTableMarkingTask; friend class EphemeronHashTableMarkingTask;
friend class FullEvacuator; friend class FullEvacuator;
friend class Heap; friend class Heap;
...@@ -946,6 +953,7 @@ class MarkingVisitor final ...@@ -946,6 +953,7 @@ class MarkingVisitor final
V8_INLINE bool ShouldVisitMapPointer() { return false; } V8_INLINE bool ShouldVisitMapPointer() { return false; }
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object); V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object); V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
V8_INLINE int VisitFixedArray(Map map, FixedArray object); V8_INLINE int VisitFixedArray(Map map, FixedArray object);
V8_INLINE int VisitJSApiObject(Map map, JSObject object); V8_INLINE int VisitJSApiObject(Map map, JSObject object);
...@@ -981,6 +989,9 @@ class MarkingVisitor final ...@@ -981,6 +989,9 @@ class MarkingVisitor final
void VisitCustomWeakPointers(HeapObject* host, ObjectSlot start, void VisitCustomWeakPointers(HeapObject* host, ObjectSlot start,
ObjectSlot end) final {} ObjectSlot end) final {}
V8_INLINE void VisitDescriptors(DescriptorArray descriptor_array,
int number_of_own_descriptors);
private: private:
// Granularity in which FixedArrays are scanned if |fixed_array_mode| // Granularity in which FixedArrays are scanned if |fixed_array_mode|
// is true. // is true.
...@@ -1015,6 +1026,7 @@ class MarkingVisitor final ...@@ -1015,6 +1026,7 @@ class MarkingVisitor final
Heap* const heap_; Heap* const heap_;
MarkCompactCollector* const collector_; MarkCompactCollector* const collector_;
MarkingState* const marking_state_; MarkingState* const marking_state_;
const unsigned mark_compact_epoch_;
}; };
class EvacuationScope { class EvacuationScope {
......
...@@ -10783,7 +10783,7 @@ void DescriptorArray::Initialize(EnumCache enum_cache, ...@@ -10783,7 +10783,7 @@ void DescriptorArray::Initialize(EnumCache enum_cache,
DCHECK_LE(nof_descriptors + slack, kMaxNumberOfDescriptors); DCHECK_LE(nof_descriptors + slack, kMaxNumberOfDescriptors);
set_number_of_all_descriptors(nof_descriptors + slack); set_number_of_all_descriptors(nof_descriptors + slack);
set_number_of_descriptors(nof_descriptors); set_number_of_descriptors(nof_descriptors);
set_number_of_marked_descriptors(0); set_raw_number_of_marked_descriptors(0);
set_filler16bits(0); set_filler16bits(0);
set_enum_cache(enum_cache); set_enum_cache(enum_cache);
MemsetTagged(GetDescriptorSlot(0), undefined_value, MemsetTagged(GetDescriptorSlot(0), undefined_value,
...@@ -10872,6 +10872,28 @@ void DescriptorArray::Sort() { ...@@ -10872,6 +10872,28 @@ void DescriptorArray::Sort() {
DCHECK(IsSortedNoDuplicates()); DCHECK(IsSortedNoDuplicates());
} }
int16_t DescriptorArray::UpdateNumberOfMarkedDescriptors(
unsigned mark_compact_epoch, int16_t new_marked) {
STATIC_ASSERT(kMaxNumberOfDescriptors <=
NumberOfMarkedDescriptors::kMaxNumberOfMarkedDescriptors);
int16_t old_raw_marked = raw_number_of_marked_descriptors();
int16_t old_marked =
NumberOfMarkedDescriptors::decode(mark_compact_epoch, old_raw_marked);
int16_t new_raw_marked =
NumberOfMarkedDescriptors::encode(mark_compact_epoch, new_marked);
while (old_marked < new_marked) {
int16_t actual_raw_marked = CompareAndSwapRawNumberOfMarkedDescriptors(
old_raw_marked, new_raw_marked);
if (actual_raw_marked == old_raw_marked) {
break;
}
old_raw_marked = actual_raw_marked;
old_marked =
NumberOfMarkedDescriptors::decode(mark_compact_epoch, old_raw_marked);
}
return old_marked;
}
Handle<AccessorPair> AccessorPair::Copy(Isolate* isolate, Handle<AccessorPair> AccessorPair::Copy(Isolate* isolate,
Handle<AccessorPair> pair) { Handle<AccessorPair> pair) {
Handle<AccessorPair> copy = isolate->factory()->NewAccessorPair(); Handle<AccessorPair> copy = isolate->factory()->NewAccessorPair();
...@@ -12792,7 +12814,8 @@ void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors, ...@@ -12792,7 +12814,8 @@ void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) { int number_of_own_descriptors) {
set_raw_instance_descriptors(descriptors); set_raw_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(number_of_own_descriptors); SetNumberOfOwnDescriptors(number_of_own_descriptors);
// TODO(ulan): Add marking write barrier. MarkingBarrierForDescriptorArray(isolate->heap(), descriptors,
number_of_own_descriptors);
} }
static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) { static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
......
...@@ -34,8 +34,8 @@ RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_all_descriptors, ...@@ -34,8 +34,8 @@ RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_all_descriptors,
kNumberOfAllDescriptorsOffset) kNumberOfAllDescriptorsOffset)
RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_descriptors, RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_descriptors,
kNumberOfDescriptorsOffset) kNumberOfDescriptorsOffset)
RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_marked_descriptors, RELAXED_INT16_ACCESSORS(DescriptorArray, raw_number_of_marked_descriptors,
kNumberOfMarkedDescriptorsOffset) kRawNumberOfMarkedDescriptorsOffset)
RELAXED_INT16_ACCESSORS(DescriptorArray, filler16bits, kFiller16BitsOffset) RELAXED_INT16_ACCESSORS(DescriptorArray, filler16bits, kFiller16BitsOffset)
inline int16_t DescriptorArray::number_of_slack_descriptors() const { inline int16_t DescriptorArray::number_of_slack_descriptors() const {
...@@ -46,6 +46,14 @@ inline int DescriptorArray::number_of_entries() const { ...@@ -46,6 +46,14 @@ inline int DescriptorArray::number_of_entries() const {
return number_of_descriptors(); return number_of_descriptors();
} }
inline int16_t DescriptorArray::CompareAndSwapRawNumberOfMarkedDescriptors(
int16_t expected, int16_t value) {
return base::Relaxed_CompareAndSwap(
reinterpret_cast<base::Atomic16*>(
FIELD_ADDR(this, kRawNumberOfMarkedDescriptorsOffset)),
expected, value);
}
void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) { void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) {
set_enum_cache(array->enum_cache()); set_enum_cache(array->enum_cache());
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/objects.h" #include "src/objects.h"
#include "src/objects/fixed-array.h" #include "src/objects/fixed-array.h"
#include "src/objects/struct.h" #include "src/objects/struct.h"
#include "src/utils.h"
// Has to be the last include (doesn't have include guards): // Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h" #include "src/objects/object-macros.h"
...@@ -40,7 +41,7 @@ class EnumCache : public Tuple2 { ...@@ -40,7 +41,7 @@ class EnumCache : public Tuple2 {
// Header: // Header:
// [16:0 bits]: number_of_all_descriptors (including slack) // [16:0 bits]: number_of_all_descriptors (including slack)
// [32:16 bits]: number_of_descriptors // [32:16 bits]: number_of_descriptors
// [48:32 bits]: number_of_marked_descriptors (used by GC) // [48:32 bits]: raw_number_of_marked_descriptors (used by GC)
// [64:48 bits]: alignment filler // [64:48 bits]: alignment filler
// [kEnumCacheOffset]: enum cache // [kEnumCacheOffset]: enum cache
// Elements: // Elements:
...@@ -135,13 +136,13 @@ class DescriptorArray : public HeapObjectPtr { ...@@ -135,13 +136,13 @@ class DescriptorArray : public HeapObjectPtr {
static const int kNotFound = -1; static const int kNotFound = -1;
// Layout description. // Layout description.
#define DESCRIPTOR_ARRAY_FIELDS(V) \ #define DESCRIPTOR_ARRAY_FIELDS(V) \
V(kNumberOfAllDescriptorsOffset, kUInt16Size) \ V(kNumberOfAllDescriptorsOffset, kUInt16Size) \
V(kNumberOfDescriptorsOffset, kUInt16Size) \ V(kNumberOfDescriptorsOffset, kUInt16Size) \
V(kNumberOfMarkedDescriptorsOffset, kUInt16Size) \ V(kRawNumberOfMarkedDescriptorsOffset, kUInt16Size) \
V(kFiller16BitsOffset, kUInt16Size) \ V(kFiller16BitsOffset, kUInt16Size) \
V(kPointersStartOffset, 0) \ V(kPointersStartOffset, 0) \
V(kEnumCacheOffset, kTaggedSize) \ V(kEnumCacheOffset, kTaggedSize) \
V(kHeaderSize, 0) V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
...@@ -152,7 +153,13 @@ class DescriptorArray : public HeapObjectPtr { ...@@ -152,7 +153,13 @@ class DescriptorArray : public HeapObjectPtr {
STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize)); STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
// Garbage collection support. // Garbage collection support.
DECL_INT16_ACCESSORS(number_of_marked_descriptors) DECL_INT16_ACCESSORS(raw_number_of_marked_descriptors)
// Atomic compare-and-swap operation on the raw_number_of_marked_descriptors.
int16_t CompareAndSwapRawNumberOfMarkedDescriptors(int16_t expected,
int16_t value);
int16_t UpdateNumberOfMarkedDescriptors(unsigned mark_compact_epoch,
int16_t number_of_marked_descriptors);
static constexpr int SizeFor(int number_of_all_descriptors) { static constexpr int SizeFor(int number_of_all_descriptors) {
return offset(number_of_all_descriptors * kEntrySize); return offset(number_of_all_descriptors * kEntrySize);
} }
...@@ -222,6 +229,43 @@ class DescriptorArray : public HeapObjectPtr { ...@@ -222,6 +229,43 @@ class DescriptorArray : public HeapObjectPtr {
OBJECT_CONSTRUCTORS(DescriptorArray, HeapObjectPtr); OBJECT_CONSTRUCTORS(DescriptorArray, HeapObjectPtr);
}; };
class NumberOfMarkedDescriptors {
public:
// Bit positions for |bit_field|.
#define BIT_FIELD_FIELDS(V, _) \
V(Epoch, unsigned, 2, _) \
V(Marked, int16_t, 14, _)
DEFINE_BIT_FIELDS(BIT_FIELD_FIELDS)
#undef BIT_FIELD_FIELDS
static const int kMaxNumberOfMarkedDescriptors = Marked::kMax;
// Decodes the raw value of the number of marked descriptors for the
// given mark compact garbage collection epoch.
static inline int16_t decode(unsigned mark_compact_epoch, int16_t raw_value) {
unsigned epoch_from_value = Epoch::decode(static_cast<uint16_t>(raw_value));
int16_t marked_from_value =
Marked::decode(static_cast<uint16_t>(raw_value));
unsigned actual_epoch = mark_compact_epoch & Epoch::kMask;
if (actual_epoch == epoch_from_value) return marked_from_value;
// If the epochs do not match, then either the raw_value is zero (freshly
// allocated descriptor array) or the epoch from value lags by 1.
DCHECK_IMPLIES(raw_value != 0,
Epoch::decode(epoch_from_value + 1) == actual_epoch);
// Not matching epochs means that the no descriptors were marked in the
// current epoch.
return 0;
}
// Encodes the number of marked descriptors for the given mark compact
// garbage collection epoch.
static inline int16_t encode(unsigned mark_compact_epoch, int16_t value) {
// TODO(ulan): avoid casting to int16_t by adding support for uint16_t
// atomics.
return static_cast<int16_t>(
Epoch::encode(mark_compact_epoch & Epoch::kMask) |
Marked::encode(value));
}
};
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -641,9 +641,14 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) { ...@@ -641,9 +641,14 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
DescriptorArray descriptors = instance_descriptors(); DescriptorArray descriptors = instance_descriptors();
int number_of_own_descriptors = NumberOfOwnDescriptors(); int number_of_own_descriptors = NumberOfOwnDescriptors();
DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors); DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
descriptors->Append(desc); {
SetNumberOfOwnDescriptors(number_of_own_descriptors + 1); // The following two operations need to happen before the marking write
// barrier.
descriptors->Append(desc);
SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
MarkingBarrierForDescriptorArray(isolate->heap(), descriptors,
number_of_own_descriptors + 1);
}
// Properly mark the map if the {desc} is an "interesting symbol". // Properly mark the map if the {desc} is an "interesting symbol".
if (desc->GetKey()->IsInterestingSymbol()) { if (desc->GetKey()->IsInterestingSymbol()) {
set_may_have_interesting_symbols(true); set_may_have_interesting_symbols(true);
......
...@@ -566,6 +566,10 @@ void Serializer::ObjectSerializer::Serialize() { ...@@ -566,6 +566,10 @@ void Serializer::ObjectSerializer::Serialize() {
SeqOneByteString::cast(object_)->clear_padding(); SeqOneByteString::cast(object_)->clear_padding();
} else if (object_->IsSeqTwoByteString()) { } else if (object_->IsSeqTwoByteString()) {
SeqTwoByteString::cast(object_)->clear_padding(); SeqTwoByteString::cast(object_)->clear_padding();
} else if (object_->IsDescriptorArray()) {
// Reset the marking state of the descriptor array.
DescriptorArray descriptor_array = DescriptorArray::cast(object_);
descriptor_array->set_raw_number_of_marked_descriptors(0);
} }
} }
if (object_->IsJSTypedArray()) { if (object_->IsJSTypedArray()) {
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
V(TestNewSpaceRefsInCopiedCode) \ V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \ V(GCFlags) \
V(MarkCompactCollector) \ V(MarkCompactCollector) \
V(MarkCompactEpochCounter) \
V(NoPromotion) \ V(NoPromotion) \
V(NumberStringCacheSize) \ V(NumberStringCacheSize) \
V(ObjectGroups) \ V(ObjectGroups) \
......
...@@ -6186,6 +6186,24 @@ HEAP_TEST(RegressMissingWriteBarrierInAllocate) { ...@@ -6186,6 +6186,24 @@ HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
CHECK(object->map()->IsMap()); CHECK(object->map()->IsMap());
} }
HEAP_TEST(MarkCompactEpochCounter) {
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
unsigned epoch0 = heap->mark_compact_collector()->epoch();
CcTest::CollectGarbage(OLD_SPACE);
unsigned epoch1 = heap->mark_compact_collector()->epoch();
CHECK_EQ(epoch0 + 1, epoch1);
heap::SimulateIncrementalMarking(heap, true);
CcTest::CollectGarbage(OLD_SPACE);
unsigned epoch2 = heap->mark_compact_collector()->epoch();
CHECK_EQ(epoch1 + 1, epoch2);
CcTest::CollectGarbage(NEW_SPACE);
unsigned epoch3 = heap->mark_compact_collector()->epoch();
CHECK_EQ(epoch2, epoch3);
}
UNINITIALIZED_TEST(ReinitializeStringHashSeed) { UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
// Enable rehashing and create an isolate and context. // Enable rehashing and create an isolate and context.
i::FLAG_rehash_snapshot = true; i::FLAG_rehash_snapshot = true;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment