Commit 14fda804 authored by Zhi An Ng's avatar Zhi An Ng Committed by V8 LUCI CQ

Revert "[heap] Introduce ParkedSharedMutexGuardIf and use it in compiler"

This reverts commit 4cd856ee.

Reason for revert: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20UBSan/16843/overview

Original change's description:
> [heap] Introduce ParkedSharedMutexGuardIf and use it in compiler
>
> In some cases it could happen that the concurrent compiler tries to get
> a shared lock on a mutex that is already exclusively held by the main
> thread. The background thread will then block itself until the
> main thread leaves the critical section. If the main thread then also
> starts a GC while holding the lock, this will result in a deadlock.
>
> A GC can't start until the background thread reaches a safepoint and
> the main thread can't leave the critical section before the GC ran.
>
> This CL introduces a new version of SharedMutexGuard named
> RecursiveSharedMutexGuardIfNeeded. This class will park the thread
> when blocking is needed and will unpark the thread again as soon as
> the lock was acquired successfully. This resolves the deadlock on
> safepointing.
>
> Turbofan can then simply use that class internally for
> MapUpdaterGuardIfNeeded.
>
> Bug: v8:10315, chromium:1218318
> Change-Id: Ice04b222cc979e4905791118caede26e71fca6de
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2953288
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Georg Neis <neis@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#75107}

Bug: v8:10315
Bug: chromium:1218318
Change-Id: Ied5d8d8f3e4c7e036a5a42a25c43e8ca1ecc1218
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2956698
Auto-Submit: Zhi An Ng <zhin@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/master@{#75108}
parent 4cd856ee
......@@ -356,12 +356,11 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// occurrence. This is done to have a recursive shared lock on {mutex}.
class V8_NODISCARD RecursiveSharedMutexGuardIfNeeded {
protected:
RecursiveSharedMutexGuardIfNeeded(LocalIsolate* local_isolate,
base::SharedMutex* mutex,
RecursiveSharedMutexGuardIfNeeded(base::SharedMutex* mutex,
int* mutex_depth_address)
: mutex_depth_address_(mutex_depth_address),
initial_mutex_depth_(*mutex_depth_address_),
shared_mutex_guard_(local_isolate, mutex, initial_mutex_depth_ == 0) {
shared_mutex_guard_(mutex, initial_mutex_depth_ == 0) {
(*mutex_depth_address_)++;
}
......@@ -374,7 +373,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
private:
int* const mutex_depth_address_;
const int initial_mutex_depth_;
ParkedSharedMutexGuardIf<base::kShared> shared_mutex_guard_;
base::SharedMutexGuardIf<base::kShared> shared_mutex_guard_;
};
class MapUpdaterGuardIfNeeded final
......@@ -382,7 +381,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
public:
explicit MapUpdaterGuardIfNeeded(JSHeapBroker* broker)
: RecursiveSharedMutexGuardIfNeeded(
broker->local_isolate(), broker->isolate()->map_updater_access(),
broker->isolate()->map_updater_access(),
&broker->map_updater_mutex_depth_) {}
};
......@@ -391,7 +390,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
public:
explicit BoilerplateMigrationGuardIfNeeded(JSHeapBroker* broker)
: RecursiveSharedMutexGuardIfNeeded(
broker->local_isolate(),
broker->isolate()->boilerplate_migration_access(),
&broker->boilerplate_migration_mutex_depth_) {}
};
......
......@@ -65,49 +65,6 @@ class V8_NODISCARD ParkedMutexGuard {
base::Mutex* mutex_;
};
template <base::MutexSharedType kIsShared,
base::NullBehavior Behavior = base::NullBehavior::kRequireNotNull>
class V8_NODISCARD ParkedSharedMutexGuardIf final {
public:
ParkedSharedMutexGuardIf(LocalIsolate* local_isolate,
base::SharedMutex* mutex, bool enable_mutex)
: ParkedSharedMutexGuardIf(local_isolate->heap(), mutex, enable_mutex) {}
ParkedSharedMutexGuardIf(LocalHeap* local_heap, base::SharedMutex* mutex,
bool enable_mutex) {
DCHECK_IMPLIES(Behavior == base::NullBehavior::kRequireNotNull,
mutex != nullptr);
if (!enable_mutex) return;
mutex_ = mutex;
if (kIsShared) {
if (!mutex_->TryLockShared()) {
ParkedScope scope(local_heap);
mutex_->LockShared();
}
} else {
if (!mutex_->TryLockExclusive()) {
ParkedScope scope(local_heap);
mutex_->LockExclusive();
}
}
}
ParkedSharedMutexGuardIf(const ParkedSharedMutexGuardIf&) = delete;
ParkedSharedMutexGuardIf& operator=(const ParkedSharedMutexGuardIf&) = delete;
~ParkedSharedMutexGuardIf() {
if (!mutex_) return;
if (kIsShared) {
mutex_->UnlockShared();
} else {
mutex_->UnlockExclusive();
}
}
private:
base::SharedMutex* mutex_ = nullptr;
};
} // namespace internal
} // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment