Commit f2fd431a authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Use base::SharedMutex in Heap::IsPendingAllocation

Use a read-write lock for protecting original_top, original_limit and
pending_object for all spaces. This way Heap::IsPendingAllocation is
always guaranteed to read a consistent top/limit-pair and also the
last values for those fields.

The main thread will acquire an exclusive lock to update those fields.
Concurrent Turbofan threads will use shared locks to read them.

This may be quite expensive on the Turbofan-side, so landing this CL
should help us figure out how big of a regression this simple fix would
be. For main thread execution performance is supposed to be okay, since
this is only used on the allocation slow path.

Bug: v8:11778, chromium:1213266
Change-Id: I9464f53fd50057ec2540ab5b79f74ee52a5d7500
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2903143
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74814}
parent 2542ce25
......@@ -11,6 +11,7 @@
// Avoid including anything but `heap.h` from `src/heap` where possible.
#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
#include "src/base/sanitizer/msan.h"
#include "src/common/assert-scope.h"
......@@ -24,6 +25,7 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/new-spaces-inl.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/third-party/heap-api.h"
......@@ -583,6 +585,11 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
}
bool Heap::IsPendingAllocation(HeapObject object) {
if (ReadOnlyHeap::Contains(object)) return false;
// Prevents concurrent modification by main thread
base::SharedMutexGuard<base::kShared> guard(&pending_allocation_mutex_);
// TODO(ulan): Optimize this function to perform 3 loads at most.
Address addr = object.address();
Address top, limit;
......@@ -590,14 +597,16 @@ bool Heap::IsPendingAllocation(HeapObject object) {
if (new_space_) {
top = new_space_->original_top_acquire();
limit = new_space_->original_limit_relaxed();
DCHECK_LE(top, limit);
if (top && top <= addr && addr < limit) return true;
}
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
top = space->original_top_acquire();
limit = space->original_limit_relaxed();
top = space->original_top();
limit = space->original_limit();
DCHECK_LE(top, limit);
if (top && top <= addr && addr < limit) return true;
}
if (addr == lo_space_->pending_object()) return true;
......
......@@ -20,6 +20,7 @@
#include "src/base/atomic-utils.h"
#include "src/base/enum-set.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/builtins/accessors.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
......@@ -2398,6 +2399,9 @@ class Heap {
HeapObject pending_layout_change_object_;
// This mutex protects original_top/limit and pending_object for all spaces.
base::SharedMutex pending_allocation_mutex_;
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
bool unprotected_memory_chunks_registry_enabled_ = false;
......@@ -2441,6 +2445,7 @@ class Heap {
friend class ScavengeTaskObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class LargeObjectSpace;
friend class LocalHeap;
friend class MarkingBarrier;
friend class OldLargeObjectSpace;
......
......@@ -142,7 +142,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
pending_object_.store(object.address(), std::memory_order_release);
UpdatePendingObject(object);
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
......@@ -437,6 +437,12 @@ void LargeObjectSpace::Print() {
}
#endif // DEBUG
void LargeObjectSpace::UpdatePendingObject(HeapObject object) {
base::SharedMutexGuard<base::kExclusive> guard(
&heap_->pending_allocation_mutex_);
pending_object_.store(object.address(), std::memory_order_release);
}
OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {}
......@@ -469,7 +475,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
HeapObject result = page->GetObject();
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE);
pending_object_.store(result.address(), std::memory_order_release);
UpdatePendingObject(result);
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
......
......@@ -130,6 +130,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
LargePage* AllocateLargePage(int object_size, Executability executable);
void UpdatePendingObject(HeapObject object);
std::atomic<size_t> size_; // allocated bytes
int page_count_; // number of chunks
std::atomic<size_t> objects_size_; // size of objects
......
......@@ -473,8 +473,12 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
allocation_info_.Reset(new_top, to_space_.page_high());
// The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run.
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
{
base::SharedMutexGuard<base::kExclusive> guard(
&heap_->pending_allocation_mutex_);
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
}
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
UpdateInlineAllocationLimit(0);
......
......@@ -457,6 +457,8 @@ class V8_EXPORT_PRIVATE NewSpace
SemiSpace& to_space() { return to_space_; }
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(
&heap_->pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
......
......@@ -275,9 +275,11 @@ void PagedSpace::SetTopAndLimit(Address top, Address limit) {
Page::FromAddress(top) == Page::FromAddress(limit - 1));
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
base::SharedMutexGuard<base::kExclusive> guard(
&heap_->pending_allocation_mutex_);
// The order of the following two stores is important.
original_limit_.store(limit, std::memory_order_relaxed);
original_top_.store(top, std::memory_order_release);
original_limit_ = limit;
original_top_ = top;
}
size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
......
......@@ -300,18 +300,16 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetLinearAllocationArea(Address top, Address limit);
Address original_top_acquire() {
return original_top_.load(std::memory_order_acquire);
}
Address original_top() { return original_top_; }
Address original_limit_relaxed() {
return original_limit_.load(std::memory_order_relaxed);
}
Address original_limit() { return original_limit_; }
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(
&heap_->pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
original_top_ = top();
}
private:
......@@ -420,9 +418,9 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Mutex space_mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// These values are protected by Heap::pending_allocation_mutex_.
Address original_top_;
Address original_limit_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment