Commit 4a5e2302 authored by Igor Sheludko's avatar Igor Sheludko Committed by V8 LUCI CQ

[heap] Make some mutexes recursive

Namely the ones that might be locked for a second time by the sampling
profiler while iterating the call stack.

Bug: v8:12966
Change-Id: I081de804143e5ca4da4e2296919428b2c1bff1b0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3707105Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Auto-Submit: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81192}
parent 635f12b3
......@@ -12,7 +12,7 @@ namespace v8 {
namespace internal {
void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
base::MutexGuard guard(&code_object_registry_mutex_);
base::RecursiveMutexGuard guard(&code_object_registry_mutex_);
if (is_sorted_) {
is_sorted_ =
(code_object_registry_.empty() || code_object_registry_.back() < code);
......@@ -21,7 +21,7 @@ void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
}
void CodeObjectRegistry::ReinitializeFrom(std::vector<Address>&& code_objects) {
base::MutexGuard guard(&code_object_registry_mutex_);
base::RecursiveMutexGuard guard(&code_object_registry_mutex_);
#if DEBUG
Address last_start = kNullAddress;
......@@ -36,7 +36,7 @@ void CodeObjectRegistry::ReinitializeFrom(std::vector<Address>&& code_objects) {
}
bool CodeObjectRegistry::Contains(Address object) const {
base::MutexGuard guard(&code_object_registry_mutex_);
base::RecursiveMutexGuard guard(&code_object_registry_mutex_);
if (!is_sorted_) {
std::sort(code_object_registry_.begin(), code_object_registry_.end());
is_sorted_ = true;
......@@ -47,7 +47,7 @@ bool CodeObjectRegistry::Contains(Address object) const {
Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
Address address) const {
base::MutexGuard guard(&code_object_registry_mutex_);
base::RecursiveMutexGuard guard(&code_object_registry_mutex_);
if (!is_sorted_) {
std::sort(code_object_registry_.begin(), code_object_registry_.end());
is_sorted_ = true;
......
......@@ -30,7 +30,11 @@ class V8_EXPORT_PRIVATE CodeObjectRegistry {
// that it can be lazily sorted during GetCodeObjectStartFromInnerAddress.
mutable std::vector<Address> code_object_registry_;
mutable bool is_sorted_ = true;
mutable base::Mutex code_object_registry_mutex_;
// The mutex has to be recursive because profiler tick might happen while
// holding this lock, then the profiler will try to iterate the call stack
// which might end up calling GetCodeObjectStartFromInnerAddress() and thus
// trying to lock the mutex for a second time.
mutable base::RecursiveMutex code_object_registry_mutex_;
};
} // namespace internal
......
......@@ -202,7 +202,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
{
base::MutexGuard guard(&allocation_mutex_);
base::RecursiveMutexGuard guard(&allocation_mutex_);
AddPage(page, object_size);
}
......@@ -220,7 +220,7 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() const {
}
LargePage* CodeLargeObjectSpace::FindPage(Address a) {
base::MutexGuard guard(&allocation_mutex_);
base::RecursiveMutexGuard guard(&allocation_mutex_);
const Address key = BasicMemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key);
if (it != chunk_map_.end()) {
......
......@@ -151,7 +151,11 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
std::atomic<size_t> size_; // allocated bytes
int page_count_; // number of chunks
std::atomic<size_t> objects_size_; // size of objects
base::Mutex allocation_mutex_;
// The mutex has to be recursive because profiler tick might happen while
// holding this lock, then the profiler will try to iterate the call stack
// which might end up calling CodeLargeObjectSpace::FindPage() and thus
// trying to lock the mutex for a second time.
base::RecursiveMutex allocation_mutex_;
// Current potentially uninitialized object. Protected by
// pending_allocation_mutex_.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment