Commit d7b08b6d authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Support concurrent marking of maps

The concurrent marker can now visit maps and mark the subset of
descriptors that a map requires.

Bug: v8:8486
Change-Id: I184442ec90a8f60c73cfe16d6e9d66f4efa92a01
Reviewed-on: https://chromium-review.googlesource.com/c/1384320
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58466}
parent 9a0fcfd8
...@@ -356,20 +356,36 @@ class ConcurrentMarkingVisitor final ...@@ -356,20 +356,36 @@ class ConcurrentMarkingVisitor final
} }
int VisitMap(Map meta_map, Map map) { int VisitMap(Map meta_map, Map map) {
if (marking_state_.IsGrey(map)) { if (!ShouldVisit(map)) return 0;
// Maps have ad-hoc weakness for descriptor arrays. They also clear the int size = Map::BodyDescriptor::SizeOf(meta_map, map);
// code-cache. Conservatively visit strong fields skipping the if (map->CanTransition()) {
// descriptor array field and the code cache field. // Maps that can transition share their descriptor arrays and require
VisitMapPointer(map, map->map_slot()); // special visiting logic to avoid memory leaks.
VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset)); // Since descriptor arrays are potentially shared, ensure that only the
VisitPointer( // descriptors that belong to this map are marked. The first time a
map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset)); // non-empty descriptor array is marked, its header is also visited. The
VisitPointer(map, HeapObject::RawMaybeWeakField( // slot holding the descriptor array will be implicitly recorded when the
map, Map::kTransitionsOrPrototypeInfoOffset)); // pointer fields of this map are visited.
VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset)); DescriptorArray descriptors = map->synchronized_instance_descriptors();
bailout_.Push(map); MarkDescriptorArrayBlack(descriptors);
} int number_of_own_descriptors = map->NumberOfOwnDescriptors();
return 0; if (number_of_own_descriptors) {
// It is possible that the concurrent marker observes the
// number_of_own_descriptors out of sync with the descriptors. In that
// case the marking write barrier for the descriptor array will ensure
// that all required descriptors are marked. The concurrent marker
// just should avoid crashing in that case. That's why we need the
// std::min<int>() below.
VisitDescriptors(descriptors,
std::min<int>(number_of_own_descriptors,
descriptors->number_of_descriptors()));
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
}
Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
return size;
} }
void VisitDescriptors(DescriptorArray descriptor_array, void VisitDescriptors(DescriptorArray descriptor_array,
...@@ -473,6 +489,14 @@ class ConcurrentMarkingVisitor final ...@@ -473,6 +489,14 @@ class ConcurrentMarkingVisitor final
} }
} }
void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
marking_state_.WhiteToGrey(descriptors);
if (marking_state_.GreyToBlack(descriptors)) {
VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
descriptors->GetDescriptorSlot(0));
}
}
private: private:
// Helper class for collecting in-object slot addresses and values. // Helper class for collecting in-object slot addresses and values.
class SlotSnapshottingVisitor final : public ObjectVisitor { class SlotSnapshottingVisitor final : public ObjectVisitor {
......
...@@ -184,17 +184,29 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>:: ...@@ -184,17 +184,29 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
template <FixedArrayVisitationMode fixed_array_mode, template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState> TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode, int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitMap(Map map, Map object) { MarkingState>::VisitMap(Map meta_map, Map map) {
// When map collection is enabled we have to mark through map's transitions int size = Map::BodyDescriptor::SizeOf(meta_map, map);
// and back pointers in a special way to make these links weak. if (map->CanTransition()) {
int size = Map::BodyDescriptor::SizeOf(map, object);
if (object->CanTransition()) {
// Maps that can transition share their descriptor arrays and require // Maps that can transition share their descriptor arrays and require
// special visiting logic to avoid memory leaks. // special visiting logic to avoid memory leaks.
MarkMapContents(object); // Since descriptor arrays are potentially shared, ensure that only the
} else { // descriptors that belong to this map are marked. The first time a
Map::BodyDescriptor::IterateBody(map, object, size, this); // non-empty descriptor array is marked, its header is also visited. The
// slot holding the descriptor array will be implicitly recorded when the
// pointer fields of this map are visited.
DescriptorArray descriptors = map->instance_descriptors();
MarkDescriptorArrayBlack(map, descriptors);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
DCHECK_LE(number_of_own_descriptors,
descriptors->number_of_descriptors());
VisitDescriptors(descriptors, number_of_own_descriptors);
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
} }
Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
return size; return size;
} }
...@@ -406,30 +418,6 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>:: ...@@ -406,30 +418,6 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
return object_size; return object_size;
} }
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::MarkMapContents(Map map) {
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a non-empty
// descriptor array is marked, its header is also visited. The slot holding
// the descriptor array will be implicitly recorded when the pointer fields of
// this map are visited.
DescriptorArray descriptors = map->instance_descriptors();
MarkDescriptorArrayBlack(map, descriptors);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
VisitDescriptors(descriptors, number_of_own_descriptors);
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
Map::BodyDescriptor::IterateBody(
map->map(), map, Map::BodyDescriptor::SizeOf(map->map(), map), this);
}
template <FixedArrayVisitationMode fixed_array_mode, template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState> TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>:: void MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
......
...@@ -1013,8 +1013,6 @@ class MarkingVisitor final ...@@ -1013,8 +1013,6 @@ class MarkingVisitor final
template <typename T> template <typename T>
V8_INLINE int VisitEmbedderTracingSubclass(Map map, T object); V8_INLINE int VisitEmbedderTracingSubclass(Map map, T object);
V8_INLINE void MarkMapContents(Map map);
// Marks the object grey and pushes it on the marking work list. // Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj); V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
......
...@@ -12826,7 +12826,7 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) { ...@@ -12826,7 +12826,7 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors, void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) { int number_of_own_descriptors) {
set_raw_instance_descriptors(descriptors); set_synchronized_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(number_of_own_descriptors); SetNumberOfOwnDescriptors(number_of_own_descriptors);
MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors, MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
number_of_own_descriptors); number_of_own_descriptors);
......
...@@ -30,7 +30,20 @@ namespace internal { ...@@ -30,7 +30,20 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject) OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR2(Map) CAST_ACCESSOR2(Map)
ACCESSORS2(Map, raw_instance_descriptors, DescriptorArray, kDescriptorsOffset) DescriptorArray Map::instance_descriptors() const {
return DescriptorArray::cast(READ_FIELD(this, kDescriptorsOffset));
}
DescriptorArray Map::synchronized_instance_descriptors() const {
return DescriptorArray::cast(ACQUIRE_READ_FIELD(this, kDescriptorsOffset));
}
void Map::set_synchronized_instance_descriptors(DescriptorArray value,
WriteBarrierMode mode) {
RELEASE_WRITE_FIELD(this, kDescriptorsOffset, value);
CONDITIONAL_WRITE_BARRIER(this, kDescriptorsOffset, value, mode);
}
// A freshly allocated layout descriptor can be set on an existing map. // A freshly allocated layout descriptor can be set on an existing map.
// We need to use release-store and acquire-load accessor pairs to ensure // We need to use release-store and acquire-load accessor pairs to ensure
// that the concurrent marking thread observes initializing stores of the // that the concurrent marking thread observes initializing stores of the
...@@ -77,10 +90,6 @@ BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols, ...@@ -77,10 +90,6 @@ BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols,
BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter, BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
Map::ConstructionCounterBits) Map::ConstructionCounterBits)
DescriptorArray Map::instance_descriptors() const {
return raw_instance_descriptors();
}
InterceptorInfo Map::GetNamedInterceptor() { InterceptorInfo Map::GetNamedInterceptor() {
DCHECK(has_named_interceptor()); DCHECK(has_named_interceptor());
FunctionTemplateInfo info = GetFunctionTemplateInfo(); FunctionTemplateInfo info = GetFunctionTemplateInfo();
...@@ -623,13 +632,13 @@ void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors, ...@@ -623,13 +632,13 @@ void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
void Map::set_bit_field3(uint32_t bits) { void Map::set_bit_field3(uint32_t bits) {
if (kInt32Size != kTaggedSize) { if (kInt32Size != kTaggedSize) {
WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0); RELAXED_WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
} }
WRITE_UINT32_FIELD(this, kBitField3Offset, bits); RELAXED_WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
} }
uint32_t Map::bit_field3() const { uint32_t Map::bit_field3() const {
return READ_UINT32_FIELD(this, kBitField3Offset); return RELAXED_READ_UINT32_FIELD(this, kBitField3Offset);
} }
LayoutDescriptor Map::GetLayoutDescriptor() const { LayoutDescriptor Map::GetLayoutDescriptor() const {
......
...@@ -575,6 +575,7 @@ class Map : public HeapObject { ...@@ -575,6 +575,7 @@ class Map : public HeapObject {
// [instance descriptors]: describes the object. // [instance descriptors]: describes the object.
inline DescriptorArray instance_descriptors() const; inline DescriptorArray instance_descriptors() const;
inline DescriptorArray synchronized_instance_descriptors() const;
void SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors, void SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors); int number_of_own_descriptors);
...@@ -971,7 +972,8 @@ class Map : public HeapObject { ...@@ -971,7 +972,8 @@ class Map : public HeapObject {
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value); MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value);
// Use the high-level instance_descriptors/SetInstanceDescriptors instead. // Use the high-level instance_descriptors/SetInstanceDescriptors instead.
DECL_ACCESSORS2(raw_instance_descriptors, DescriptorArray) inline void set_synchronized_instance_descriptors(
DescriptorArray array, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
static const int kFastPropertiesSoftLimit = 12; static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128; static const int kMaxFastProperties = 128;
......
...@@ -455,9 +455,18 @@ ...@@ -455,9 +455,18 @@
#define READ_UINT32_FIELD(p, offset) \ #define READ_UINT32_FIELD(p, offset) \
(*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset))) (*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset)))
#define RELAXED_READ_UINT32_FIELD(p, offset) \
static_cast<uint32_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
#define WRITE_UINT32_FIELD(p, offset, value) \ #define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
#define RELAXED_WRITE_UINT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
#define READ_INT32_FIELD(p, offset) \ #define READ_INT32_FIELD(p, offset) \
(*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset))) (*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
...@@ -468,13 +477,13 @@ ...@@ -468,13 +477,13 @@
#define WRITE_INT32_FIELD(p, offset, value) \ #define WRITE_INT32_FIELD(p, offset, value) \
(*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
#define RELAXED_WRITE_INT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
#define RELEASE_WRITE_INT32_FIELD(p, offset, value) \ #define RELEASE_WRITE_INT32_FIELD(p, offset, value) \
base::Release_Store( \ base::Release_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value))
#define RELAXED_WRITE_INT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \ reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value)); static_cast<base::Atomic32>(value));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment