Commit 9d6b5456 authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by V8 LUCI CQ

[compiler] Move ShrinkInstanceSize to MapUpdater and acquire its lock

The instance_size of a live map can change via ShrinkInstanceSize. This
change was outside of the scope of the MapUpdater. In order to have a
consistent view of the data, the concurrent reader will access the map
updater lock if needed.

Also refactor MapUpdaterMutexDepthScope (now named
`MapUpdaterGuardIfNeeded`) so that A) it's not possible to forget to
lock it, and B) add V8_NODISCARD to the class.

As a second refactor use std::function in TraverseCallback.

Bug: v8:7790
Change-Id: I57dd00699ccb1c9f132a950db93704b07ca115ac
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2862765Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74592}
parent 0a661a9a
......@@ -719,9 +719,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
Handle<Map> map, Handle<Name> name, AccessMode access_mode) const {
CHECK(name->IsUniqueName());
JSHeapBroker::MapUpdaterMutexDepthScope mumd_scope(broker());
base::SharedMutexGuardIf<base::kShared> mutex_guard(
isolate()->map_updater_access(), mumd_scope.should_lock());
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(
broker(), isolate()->map_updater_access());
if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) {
return Invalid();
......
......@@ -1365,9 +1365,8 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
// while the lock is held the Map object may not be modified (except in
// benign ways).
// TODO(jgruber): Consider removing this lock by being smrt.
JSHeapBroker::MapUpdaterMutexDepthScope mumd_scope(broker);
base::SharedMutexGuardIf<base::kShared> mutex_guard(
broker->isolate()->map_updater_access(), mumd_scope.should_lock());
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(
broker, broker->isolate()->map_updater_access());
instance_type_ = object->instance_type();
instance_size_ = object->instance_size();
......
......@@ -7,6 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/compiler/access-info.h"
#include "src/compiler/feedback-source.h"
......@@ -351,15 +352,19 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
RootIndexMap const& root_index_map() { return root_index_map_; }
class MapUpdaterMutexDepthScope final {
// Locks {mutex} through the duration of this scope iff it is the first
// occurrence. This is done to have a recursive shared lock on {mutex}.
class V8_NODISCARD MapUpdaterGuardIfNeeded final {
public:
explicit MapUpdaterMutexDepthScope(JSHeapBroker* ptr)
explicit MapUpdaterGuardIfNeeded(JSHeapBroker* ptr,
base::SharedMutex* mutex)
: ptr_(ptr),
initial_map_updater_mutex_depth_(ptr->map_updater_mutex_depth_) {
initial_map_updater_mutex_depth_(ptr->map_updater_mutex_depth_),
shared_mutex(mutex, should_lock()) {
ptr_->map_updater_mutex_depth_++;
}
~MapUpdaterMutexDepthScope() {
~MapUpdaterGuardIfNeeded() {
ptr_->map_updater_mutex_depth_--;
DCHECK_EQ(initial_map_updater_mutex_depth_,
ptr_->map_updater_mutex_depth_);
......@@ -372,6 +377,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
private:
JSHeapBroker* const ptr_;
const int initial_map_updater_mutex_depth_;
base::SharedMutexGuardIf<base::kShared> shared_mutex;
};
private:
......
......@@ -6,6 +6,7 @@
#include <queue>
#include "src/base/platform/mutex.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
......@@ -300,6 +301,23 @@ MapUpdater::State MapUpdater::Normalize(const char* reason) {
return state_; // Done.
}
void MapUpdater::ShrinkInstanceSize(base::SharedMutex* map_updater_access,
Map map, int slack) {
DCHECK_GE(slack, 0);
#ifdef DEBUG
int old_visitor_id = Map::GetVisitorId(map);
int new_unused = map.UnusedPropertyFields() - slack;
#endif
{
base::SharedMutexGuard<base::kExclusive> mutex_guard(map_updater_access);
map.set_instance_size(map.InstanceSizeFromSlack(slack));
}
map.set_construction_counter(Map::kNoSlackTracking);
DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
DCHECK_EQ(new_unused, map.UnusedPropertyFields());
}
MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
// Updating deprecated maps in-place doesn't make sense.
if (old_map_->is_deprecated()) return state_;
......
......@@ -80,6 +80,9 @@ class V8_EXPORT_PRIVATE MapUpdater {
Representation new_representation,
Handle<FieldType> new_field_type);
static void ShrinkInstanceSize(base::SharedMutex* map_updater_access, Map map,
int slack);
private:
enum State {
kInitialized,
......
......@@ -2204,13 +2204,6 @@ bool Map::EquivalentToForNormalization(const Map other,
JSObject::GetEmbedderFieldCount(other);
}
static void GetMinInobjectSlack(Map map, void* data) {
int slack = map.UnusedPropertyFields();
if (*reinterpret_cast<int*>(data) > slack) {
*reinterpret_cast<int*>(data) = slack;
}
}
int Map::ComputeMinObjectSlack(Isolate* isolate) {
DisallowGarbageCollection no_gc;
// Has to be an initial map.
......@@ -2218,27 +2211,13 @@ int Map::ComputeMinObjectSlack(Isolate* isolate) {
int slack = UnusedPropertyFields();
TransitionsAccessor transitions(isolate, *this, &no_gc);
transitions.TraverseTransitionTree(&GetMinInobjectSlack, &slack);
TransitionsAccessor::TraverseCallback callback = [&](Map map) {
slack = std::min(slack, map.UnusedPropertyFields());
};
transitions.TraverseTransitionTree(callback);
return slack;
}
static void ShrinkInstanceSize(Map map, void* data) {
int slack = *reinterpret_cast<int*>(data);
DCHECK_GE(slack, 0);
#ifdef DEBUG
int old_visitor_id = Map::GetVisitorId(map);
int new_unused = map.UnusedPropertyFields() - slack;
#endif
map.set_instance_size(map.InstanceSizeFromSlack(slack));
map.set_construction_counter(Map::kNoSlackTracking);
DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
DCHECK_EQ(new_unused, map.UnusedPropertyFields());
}
static void StopSlackTracking(Map map, void* data) {
map.set_construction_counter(Map::kNoSlackTracking);
}
void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
DisallowGarbageCollection no_gc;
// Has to be an initial map.
......@@ -2246,12 +2225,19 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
int slack = ComputeMinObjectSlack(isolate);
TransitionsAccessor transitions(isolate, *this, &no_gc);
TransitionsAccessor::TraverseCallback callback;
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
transitions.TraverseTransitionTree(&ShrinkInstanceSize, &slack);
callback = [&](Map map) {
MapUpdater::ShrinkInstanceSize(isolate->map_updater_access(), map, slack);
};
} else {
transitions.TraverseTransitionTree(&StopSlackTracking, nullptr);
callback = [](Map map) {
// Stop slack tracking for this map.
map.set_construction_counter(Map::kNoSlackTracking);
};
}
transitions.TraverseTransitionTree(callback);
}
void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
......
......@@ -510,7 +510,7 @@ void TransitionsAccessor::EnsureHasFullTransitionArray() {
}
void TransitionsAccessor::TraverseTransitionTreeInternal(
TraverseCallback callback, void* data, DisallowGarbageCollection* no_gc) {
TraverseCallback callback, DisallowGarbageCollection* no_gc) {
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
......@@ -520,7 +520,7 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
Map simple_target =
Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
TransitionsAccessor(isolate_, simple_target, no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
.TraverseTransitionTreeInternal(callback, no_gc);
break;
}
case kFullTransitionArray: {
......@@ -533,7 +533,7 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
HeapObject heap_object;
if (target->GetHeapObjectIfWeak(&heap_object)) {
TransitionsAccessor(isolate_, Map::cast(heap_object), no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
.TraverseTransitionTreeInternal(callback, no_gc);
} else {
DCHECK(target->IsCleared());
}
......@@ -541,12 +541,12 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
}
for (int i = 0; i < transitions().number_of_transitions(); ++i) {
TransitionsAccessor(isolate_, transitions().GetTarget(i), no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
.TraverseTransitionTreeInternal(callback, no_gc);
}
break;
}
}
callback(map_, data);
callback(map_);
}
#ifdef DEBUG
......
......@@ -100,13 +100,13 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
PropertyAttributes* out_integrity_level = nullptr);
// ===== ITERATION =====
using TraverseCallback = void (*)(Map map, void* data);
using TraverseCallback = std::function<void(Map)>;
// Traverse the transition tree in postorder.
void TraverseTransitionTree(TraverseCallback callback, void* data) {
void TraverseTransitionTree(TraverseCallback callback) {
// Make sure that we do not allocate in the callback.
DisallowGarbageCollection no_gc;
TraverseTransitionTreeInternal(callback, data, &no_gc);
TraverseTransitionTreeInternal(callback, &no_gc);
}
// ===== PROTOTYPE TRANSITIONS =====
......@@ -192,7 +192,7 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
void SetPrototypeTransitions(Handle<WeakFixedArray> proto_transitions);
WeakFixedArray GetPrototypeTransitions();
void TraverseTransitionTreeInternal(TraverseCallback callback, void* data,
void TraverseTransitionTreeInternal(TraverseCallback callback,
DisallowGarbageCollection* no_gc);
Isolate* isolate_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment