Commit 9140d001 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Optimize Heap::IsPendingAllocation

IsPendingAllocation will now load the space from the object's page
header first and then only check the object against the current LAB
of that particular space. Previously we were looking up that object
in the LABs of all spaces.

This new design also makes it feasible to have one dedicated mutex for
original_top/original_limit (respectively pending_object) for each
space. This will reduce contention on the mutexes.

Change-Id: I8e7636410259fd03b7970084bfbbaeadb2d8ba61
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2936606
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75054}
parent 8732b2ee
...@@ -136,10 +136,7 @@ void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) { ...@@ -136,10 +136,7 @@ void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) {
} }
PagedSpace* Heap::paged_space(int idx) { PagedSpace* Heap::paged_space(int idx) {
DCHECK_NE(idx, LO_SPACE); DCHECK(idx == OLD_SPACE || idx == CODE_SPACE || idx == MAP_SPACE);
DCHECK_NE(idx, NEW_SPACE);
DCHECK_NE(idx, CODE_LO_SPACE);
DCHECK_NE(idx, NEW_LO_SPACE);
return static_cast<PagedSpace*>(space_[idx]); return static_cast<PagedSpace*>(space_[idx]);
} }
...@@ -586,34 +583,51 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object, ...@@ -586,34 +583,51 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
} }
bool Heap::IsPendingAllocation(HeapObject object) { bool Heap::IsPendingAllocation(HeapObject object) {
if (ReadOnlyHeap::Contains(object)) return false; DCHECK(deserialization_complete());
// Prevents concurrent modification by main thread BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
base::SharedMutexGuard<base::kShared> guard(&pending_allocation_mutex_); if (chunk->InReadOnlySpace()) return false;
// TODO(ulan): Optimize this function to perform 3 loads at most. BaseSpace* base_space = chunk->owner();
Address addr = object.address(); Address addr = object.address();
Address top, limit;
if (new_space_) { switch (base_space->identity()) {
top = new_space_->original_top_acquire(); case NEW_SPACE: {
limit = new_space_->original_limit_relaxed(); base::SharedMutexGuard<base::kShared> guard(
DCHECK_LE(top, limit); new_space_->pending_allocation_mutex());
if (top && top <= addr && addr < limit) return true; Address top = new_space_->original_top_acquire();
} Address limit = new_space_->original_limit_relaxed();
DCHECK_LE(top, limit);
return top && top <= addr && addr < limit;
}
case OLD_SPACE:
case CODE_SPACE:
case MAP_SPACE: {
PagedSpace* paged_space = static_cast<PagedSpace*>(base_space);
base::SharedMutexGuard<base::kShared> guard(
paged_space->pending_allocation_mutex());
Address top = paged_space->original_top();
Address limit = paged_space->original_limit();
DCHECK_LE(top, limit);
return top && top <= addr && addr < limit;
}
PagedSpaceIterator spaces(this); case LO_SPACE:
for (PagedSpace* space = spaces.Next(); space != nullptr; case CODE_LO_SPACE:
space = spaces.Next()) { case NEW_LO_SPACE: {
top = space->original_top(); LargeObjectSpace* large_space =
limit = space->original_limit(); static_cast<LargeObjectSpace*>(base_space);
DCHECK_LE(top, limit); base::SharedMutexGuard<base::kShared> guard(
if (top && top <= addr && addr < limit) return true; large_space->pending_allocation_mutex());
return addr == large_space->pending_object();
}
case RO_SPACE:
UNREACHABLE();
} }
if (addr == lo_space_->pending_object()) return true;
if (new_lo_space_ && addr == new_lo_space_->pending_object()) return true; UNREACHABLE();
if (addr == code_lo_space_->pending_object()) return true;
return false;
} }
void Heap::ExternalStringTable::AddString(String string) { void Heap::ExternalStringTable::AddString(String string) {
......
...@@ -2429,9 +2429,6 @@ class Heap { ...@@ -2429,9 +2429,6 @@ class Heap {
HeapObject pending_layout_change_object_; HeapObject pending_layout_change_object_;
// This mutex protects original_top/limit and pending_object for all spaces.
base::SharedMutex pending_allocation_mutex_;
base::Mutex unprotected_memory_chunks_mutex_; base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_; std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
bool unprotected_memory_chunks_registry_enabled_ = false; bool unprotected_memory_chunks_registry_enabled_ = false;
......
...@@ -438,8 +438,7 @@ void LargeObjectSpace::Print() { ...@@ -438,8 +438,7 @@ void LargeObjectSpace::Print() {
#endif // DEBUG #endif // DEBUG
void LargeObjectSpace::UpdatePendingObject(HeapObject object) { void LargeObjectSpace::UpdatePendingObject(HeapObject object) {
base::SharedMutexGuard<base::kExclusive> guard( base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
&heap_->pending_allocation_mutex_);
pending_object_.store(object.address(), std::memory_order_release); pending_object_.store(object.address(), std::memory_order_release);
} }
......
...@@ -123,6 +123,10 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -123,6 +123,10 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
pending_object_.store(0, std::memory_order_release); pending_object_.store(0, std::memory_order_release);
} }
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
protected: protected:
LargeObjectSpace(Heap* heap, AllocationSpace id); LargeObjectSpace(Heap* heap, AllocationSpace id);
...@@ -136,8 +140,14 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -136,8 +140,14 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
int page_count_; // number of chunks int page_count_; // number of chunks
std::atomic<size_t> objects_size_; // size of objects std::atomic<size_t> objects_size_; // size of objects
base::Mutex allocation_mutex_; base::Mutex allocation_mutex_;
// Current potentially uninitialized object. Protected by
// pending_allocation_mutex_.
std::atomic<Address> pending_object_; std::atomic<Address> pending_object_;
// Used to protect pending_object_.
base::SharedMutex pending_allocation_mutex_;
private: private:
friend class LargeObjectSpaceObjectIterator; friend class LargeObjectSpaceObjectIterator;
}; };
......
...@@ -474,8 +474,7 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) { ...@@ -474,8 +474,7 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
// The order of the following two stores is important. // The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run. // See the corresponding loads in ConcurrentMarking::Run.
{ {
base::SharedMutexGuard<base::kExclusive> guard( base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
&heap_->pending_allocation_mutex_);
original_limit_.store(limit(), std::memory_order_relaxed); original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release); original_top_.store(top(), std::memory_order_release);
} }
......
...@@ -457,8 +457,7 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -457,8 +457,7 @@ class V8_EXPORT_PRIVATE NewSpace
SemiSpace& to_space() { return to_space_; } SemiSpace& to_space() { return to_space_; }
void MoveOriginalTopForward() { void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard( base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
&heap_->pending_allocation_mutex_);
DCHECK_GE(top(), original_top_); DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_); DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release); original_top_.store(top(), std::memory_order_release);
...@@ -466,6 +465,10 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -466,6 +465,10 @@ class V8_EXPORT_PRIVATE NewSpace
void MaybeFreeUnusedLab(LinearAllocationArea info); void MaybeFreeUnusedLab(LinearAllocationArea info);
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
private: private:
static const int kAllocationBufferParkingThreshold = 4 * KB; static const int kAllocationBufferParkingThreshold = 4 * KB;
...@@ -475,10 +478,14 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -475,10 +478,14 @@ class V8_EXPORT_PRIVATE NewSpace
base::Mutex mutex_; base::Mutex mutex_;
// The top and the limit at the time of setting the linear allocation area. // The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks. // These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_; std::atomic<Address> original_top_;
std::atomic<Address> original_limit_; std::atomic<Address> original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
// The semispaces. // The semispaces.
SemiSpace to_space_; SemiSpace to_space_;
SemiSpace from_space_; SemiSpace from_space_;
......
...@@ -278,7 +278,7 @@ void PagedSpace::SetTopAndLimit(Address top, Address limit) { ...@@ -278,7 +278,7 @@ void PagedSpace::SetTopAndLimit(Address top, Address limit) {
base::Optional<base::SharedMutexGuard<base::kExclusive>> optional_guard; base::Optional<base::SharedMutexGuard<base::kExclusive>> optional_guard;
if (!is_compaction_space()) if (!is_compaction_space())
optional_guard.emplace(&heap_->pending_allocation_mutex_); optional_guard.emplace(&pending_allocation_mutex_);
original_limit_ = limit; original_limit_ = limit;
original_top_ = top; original_top_ = top;
} }
......
...@@ -302,13 +302,16 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -302,13 +302,16 @@ class V8_EXPORT_PRIVATE PagedSpace
Address original_limit() { return original_limit_; } Address original_limit() { return original_limit_; }
void MoveOriginalTopForward() { void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard( base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
&heap_->pending_allocation_mutex_);
DCHECK_GE(top(), original_top_); DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_); DCHECK_LE(top(), original_limit_);
original_top_ = top(); original_top_ = top();
} }
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
private: private:
class ConcurrentAllocationMutex { class ConcurrentAllocationMutex {
public: public:
...@@ -415,10 +418,13 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -415,10 +418,13 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Mutex space_mutex_; base::Mutex space_mutex_;
// The top and the limit at the time of setting the linear allocation area. // The top and the limit at the time of setting the linear allocation area.
// These values are protected by Heap::pending_allocation_mutex_. // These values are protected by pending_allocation_mutex_.
Address original_top_; Address original_top_;
Address original_limit_; Address original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
friend class IncrementalMarking; friend class IncrementalMarking;
friend class MarkCompactCollector; friend class MarkCompactCollector;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment