Commit 4b5ac613 authored by Camillo's avatar Camillo Committed by V8 LUCI CQ

[profiler] Guard all current_profiles_ access by Mutex

Refactor the code to use RecursiveMutextGuard to make it more readable
and less error prone.

This is a tentative fix for a rare deadlock that appears in
test-cpu-profiler/CrossScriptInliningCallerLineNumbers.

Bug: v8:11191
Change-Id: Ia32e7f61167f95e0fce142992c83ddff11959222
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3779690
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Reviewed-by: 's avatarPatrick Thier <pthier@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81991}
parent e01e3a38
......@@ -568,7 +568,7 @@ void CpuProfiler::DisableLogging() {
code_observer_->ClearCodeMap();
}
base::TimeDelta CpuProfiler::ComputeSamplingInterval() const {
base::TimeDelta CpuProfiler::ComputeSamplingInterval() {
return profiles_->GetCommonSamplingInterval();
}
......
......@@ -386,7 +386,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
void DisableLogging();
// Computes a sampling interval sufficient to accomodate attached profiles.
base::TimeDelta ComputeSamplingInterval() const;
base::TimeDelta ComputeSamplingInterval();
// Dynamically updates the sampler to use a sampling interval sufficient for
// child profiles.
void AdjustSamplingInterval();
......
......@@ -887,7 +887,7 @@ size_t CodeMap::GetEstimatedMemoryUsage() const {
}
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
: profiler_(nullptr), current_profiles_semaphore_(1), isolate_(isolate) {
: profiler_(nullptr), current_profiles_mutex_(), isolate_(isolate) {
USE(isolate_);
}
......@@ -906,10 +906,8 @@ CpuProfilingResult CpuProfilesCollection::StartProfiling(
CpuProfilingResult CpuProfilesCollection::StartProfiling(
ProfilerId id, const char* title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
current_profiles_semaphore_.Wait();
base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
current_profiles_semaphore_.Signal();
return {
0,
CpuProfilingStatus::kErrorTooManyProfilers,
......@@ -921,7 +919,6 @@ CpuProfilingResult CpuProfilesCollection::StartProfiling(
strcmp(profile->title(), title) == 0) ||
profile->id() == id) {
// Ignore attempts to start profile with the same title or id
current_profiles_semaphore_.Signal();
// ... though return kAlreadyStarted to force it collect a sample.
return {
profile->id(),
......@@ -933,7 +930,6 @@ CpuProfilingResult CpuProfilesCollection::StartProfiling(
CpuProfile* profile = new CpuProfile(profiler_, id, title, std::move(options),
std::move(delegate));
current_profiles_.emplace_back(profile);
current_profiles_semaphore_.Signal();
return {
profile->id(),
......@@ -942,7 +938,7 @@ CpuProfilingResult CpuProfilesCollection::StartProfiling(
}
CpuProfile* CpuProfilesCollection::StopProfiling(ProfilerId id) {
current_profiles_semaphore_.Wait();
base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
CpuProfile* profile = nullptr;
auto it = std::find_if(
......@@ -956,37 +952,27 @@ CpuProfile* CpuProfilesCollection::StopProfiling(ProfilerId id) {
// Convert reverse iterator to matching forward iterator.
current_profiles_.erase(--(it.base()));
}
current_profiles_semaphore_.Signal();
return profile;
}
CpuProfile* CpuProfilesCollection::Lookup(const char* title) {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
if (title == nullptr) {
return nullptr;
}
if (title == nullptr) return nullptr;
// http://crbug/51594, edge case console.profile may provide an empty title
// and must not crash
const bool empty_title = title[0] == '\0';
base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
auto it = std::find_if(
current_profiles_.rbegin(), current_profiles_.rend(),
[&](const std::unique_ptr<CpuProfile>& p) {
return (empty_title ||
(p->title() != nullptr && strcmp(p->title(), title) == 0));
});
if (it != current_profiles_.rend()) {
return it->get();
}
if (it != current_profiles_.rend()) return it->get();
return nullptr;
}
bool CpuProfilesCollection::IsLastProfileLeft(ProfilerId id) {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
if (current_profiles_.size() != 1) return false;
return id == current_profiles_[0]->id();
}
......@@ -1011,7 +997,7 @@ int64_t GreatestCommonDivisor(int64_t a, int64_t b) {
} // namespace
base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() {
DCHECK(profiler_);
int64_t base_sampling_interval_us =
......@@ -1019,16 +1005,19 @@ base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
if (base_sampling_interval_us == 0) return base::TimeDelta();
int64_t interval_us = 0;
for (const auto& profile : current_profiles_) {
// Snap the profile's requested sampling interval to the next multiple of
// the base sampling interval.
int64_t profile_interval_us =
std::max<int64_t>(
(profile->sampling_interval_us() + base_sampling_interval_us - 1) /
base_sampling_interval_us,
1) *
base_sampling_interval_us;
interval_us = GreatestCommonDivisor(interval_us, profile_interval_us);
{
base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
for (const auto& profile : current_profiles_) {
// Snap the profile's requested sampling interval to the next multiple of
// the base sampling interval.
int64_t profile_interval_us =
std::max<int64_t>((profile->sampling_interval_us() +
base_sampling_interval_us - 1) /
base_sampling_interval_us,
1) *
base_sampling_interval_us;
interval_us = GreatestCommonDivisor(interval_us, profile_interval_us);
}
}
return base::TimeDelta::FromMicroseconds(interval_us);
}
......@@ -1041,8 +1030,8 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
const ProfileStackTrace empty_path;
base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
ContextFilter& context_filter = profile->context_filter();
// If the context filter check failed, omit the contents of the stack.
......@@ -1061,16 +1050,14 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
accepts_embedder_context ? embedder_state_tag
: EmbedderStateTag::EMPTY);
}
current_profiles_semaphore_.Signal();
}
void CpuProfilesCollection::UpdateNativeContextAddressForCurrentProfiles(
Address from, Address to) {
current_profiles_semaphore_.Wait();
base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
profile->context_filter().OnMoveEvent(from, to);
}
current_profiles_semaphore_.Signal();
}
} // namespace internal
......
......@@ -564,7 +564,7 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
// Finds a common sampling interval dividing each CpuProfile's interval,
// rounded up to the nearest multiple of the CpuProfiler's sampling interval.
// Returns 0 if no profiles are attached.
base::TimeDelta GetCommonSamplingInterval() const;
base::TimeDelta GetCommonSamplingInterval();
// Called from profile generator thread.
void AddPathToCurrentProfiles(
......@@ -591,7 +591,7 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
// Accessed by VM thread and profile generator thread.
std::vector<std::unique_ptr<CpuProfile>> current_profiles_;
base::Semaphore current_profiles_semaphore_;
base::RecursiveMutex current_profiles_mutex_;
static std::atomic<ProfilerId> last_id_;
Isolate* isolate_;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment