Commit d7b08b6d authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Support concurrent marking of maps

The concurrent marker can now visit maps and mark the subset of
descriptors that a map requires.

Bug: v8:8486
Change-Id: I184442ec90a8f60c73cfe16d6e9d66f4efa92a01
Reviewed-on: https://chromium-review.googlesource.com/c/1384320
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58466}
parent 9a0fcfd8
......@@ -356,20 +356,36 @@ class ConcurrentMarkingVisitor final
}
int VisitMap(Map meta_map, Map map) {
if (marking_state_.IsGrey(map)) {
// Maps have ad-hoc weakness for descriptor arrays. They also clear the
// code-cache. Conservatively visit strong fields skipping the
// descriptor array field and the code cache field.
VisitMapPointer(map, map->map_slot());
VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset));
VisitPointer(
map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset));
VisitPointer(map, HeapObject::RawMaybeWeakField(
map, Map::kTransitionsOrPrototypeInfoOffset));
VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
bailout_.Push(map);
}
return 0;
if (!ShouldVisit(map)) return 0;
int size = Map::BodyDescriptor::SizeOf(meta_map, map);
if (map->CanTransition()) {
// Maps that can transition share their descriptor arrays and require
// special visiting logic to avoid memory leaks.
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a
// non-empty descriptor array is marked, its header is also visited. The
// slot holding the descriptor array will be implicitly recorded when the
// pointer fields of this map are visited.
DescriptorArray descriptors = map->synchronized_instance_descriptors();
MarkDescriptorArrayBlack(descriptors);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
// It is possible that the concurrent marker observes the
// number_of_own_descriptors out of sync with the descriptors. In that
// case the marking write barrier for the descriptor array will ensure
// that all required descriptors are marked. The concurrent marker
// just should avoid crashing in that case. That's why we need the
// std::min<int>() below.
VisitDescriptors(descriptors,
std::min<int>(number_of_own_descriptors,
descriptors->number_of_descriptors()));
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
}
Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
return size;
}
void VisitDescriptors(DescriptorArray descriptor_array,
......@@ -473,6 +489,14 @@ class ConcurrentMarkingVisitor final
}
}
void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
marking_state_.WhiteToGrey(descriptors);
if (marking_state_.GreyToBlack(descriptors)) {
VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
descriptors->GetDescriptorSlot(0));
}
}
private:
// Helper class for collecting in-object slot addresses and values.
class SlotSnapshottingVisitor final : public ObjectVisitor {
......
......@@ -184,17 +184,29 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitMap(Map map, Map object) {
// When map collection is enabled we have to mark through map's transitions
// and back pointers in a special way to make these links weak.
int size = Map::BodyDescriptor::SizeOf(map, object);
if (object->CanTransition()) {
MarkingState>::VisitMap(Map meta_map, Map map) {
int size = Map::BodyDescriptor::SizeOf(meta_map, map);
if (map->CanTransition()) {
// Maps that can transition share their descriptor arrays and require
// special visiting logic to avoid memory leaks.
MarkMapContents(object);
} else {
Map::BodyDescriptor::IterateBody(map, object, size, this);
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a
// non-empty descriptor array is marked, its header is also visited. The
// slot holding the descriptor array will be implicitly recorded when the
// pointer fields of this map are visited.
DescriptorArray descriptors = map->instance_descriptors();
MarkDescriptorArrayBlack(map, descriptors);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
DCHECK_LE(number_of_own_descriptors,
descriptors->number_of_descriptors());
VisitDescriptors(descriptors, number_of_own_descriptors);
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
}
Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
return size;
}
......@@ -406,30 +418,6 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
return object_size;
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::MarkMapContents(Map map) {
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a non-empty
// descriptor array is marked, its header is also visited. The slot holding
// the descriptor array will be implicitly recorded when the pointer fields of
// this map are visited.
DescriptorArray descriptors = map->instance_descriptors();
MarkDescriptorArrayBlack(map, descriptors);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
VisitDescriptors(descriptors, number_of_own_descriptors);
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
Map::BodyDescriptor::IterateBody(
map->map(), map, Map::BodyDescriptor::SizeOf(map->map(), map), this);
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
......
......@@ -1013,8 +1013,6 @@ class MarkingVisitor final
template <typename T>
V8_INLINE int VisitEmbedderTracingSubclass(Map map, T object);
V8_INLINE void MarkMapContents(Map map);
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
......
......@@ -12826,7 +12826,7 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
set_raw_instance_descriptors(descriptors);
set_synchronized_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(number_of_own_descriptors);
MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
number_of_own_descriptors);
......
......@@ -30,7 +30,20 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR2(Map)
ACCESSORS2(Map, raw_instance_descriptors, DescriptorArray, kDescriptorsOffset)
DescriptorArray Map::instance_descriptors() const {
return DescriptorArray::cast(READ_FIELD(this, kDescriptorsOffset));
}
DescriptorArray Map::synchronized_instance_descriptors() const {
return DescriptorArray::cast(ACQUIRE_READ_FIELD(this, kDescriptorsOffset));
}
void Map::set_synchronized_instance_descriptors(DescriptorArray value,
WriteBarrierMode mode) {
RELEASE_WRITE_FIELD(this, kDescriptorsOffset, value);
CONDITIONAL_WRITE_BARRIER(this, kDescriptorsOffset, value, mode);
}
// A freshly allocated layout descriptor can be set on an existing map.
// We need to use release-store and acquire-load accessor pairs to ensure
// that the concurrent marking thread observes initializing stores of the
......@@ -77,10 +90,6 @@ BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols,
BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
Map::ConstructionCounterBits)
DescriptorArray Map::instance_descriptors() const {
return raw_instance_descriptors();
}
InterceptorInfo Map::GetNamedInterceptor() {
DCHECK(has_named_interceptor());
FunctionTemplateInfo info = GetFunctionTemplateInfo();
......@@ -623,13 +632,13 @@ void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
void Map::set_bit_field3(uint32_t bits) {
if (kInt32Size != kTaggedSize) {
WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
RELAXED_WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
}
WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
RELAXED_WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
}
uint32_t Map::bit_field3() const {
return READ_UINT32_FIELD(this, kBitField3Offset);
return RELAXED_READ_UINT32_FIELD(this, kBitField3Offset);
}
LayoutDescriptor Map::GetLayoutDescriptor() const {
......
......@@ -575,6 +575,7 @@ class Map : public HeapObject {
// [instance descriptors]: describes the object.
inline DescriptorArray instance_descriptors() const;
inline DescriptorArray synchronized_instance_descriptors() const;
void SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors);
......@@ -971,7 +972,8 @@ class Map : public HeapObject {
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value);
// Use the high-level instance_descriptors/SetInstanceDescriptors instead.
DECL_ACCESSORS2(raw_instance_descriptors, DescriptorArray)
inline void set_synchronized_instance_descriptors(
DescriptorArray array, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
......
......@@ -455,9 +455,18 @@
#define READ_UINT32_FIELD(p, offset) \
(*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset)))
#define RELAXED_READ_UINT32_FIELD(p, offset) \
static_cast<uint32_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
#define RELAXED_WRITE_UINT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
#define READ_INT32_FIELD(p, offset) \
(*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
......@@ -468,13 +477,13 @@
#define WRITE_INT32_FIELD(p, offset, value) \
(*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
#define RELAXED_WRITE_INT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
#define RELEASE_WRITE_INT32_FIELD(p, offset, value) \
base::Release_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value))
#define RELAXED_WRITE_INT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment