Commit ea0496e6 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Split NewSpace to base and concrete classes

This CL is the first step towards creating a paged new sapce.
The CL creates a base class for new space that holds all fields not
specific to the semi space based implementation, and methods that do not
interact with the semi spaces. Methods are moved as is to the new base
class.

Future CLs will rename the classes, split/refactor additional methods,
and make other methods virtual.

Bug: v8:12612
Change-Id: Ibd8ec5135d66daf0fd025493bfaff537d61e049f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3584120Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80024}
parent 4441860c
...@@ -38,16 +38,27 @@ bool SemiSpace::ContainsSlow(Address a) const { ...@@ -38,16 +38,27 @@ bool SemiSpace::ContainsSlow(Address a) const {
} }
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// NewSpace // NewSpaceBase
bool NewSpace::Contains(Object o) const { bool NewSpaceBase::Contains(Object o) const {
return o.IsHeapObject() && Contains(HeapObject::cast(o)); return o.IsHeapObject() && Contains(HeapObject::cast(o));
} }
bool NewSpace::Contains(HeapObject o) const { bool NewSpaceBase::Contains(HeapObject o) const {
return BasicMemoryChunk::FromHeapObject(o)->InNewSpace(); return BasicMemoryChunk::FromHeapObject(o)->InNewSpace();
} }
V8_WARN_UNUSED_RESULT inline AllocationResult
NewSpaceBase::AllocateRawSynchronized(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment, origin);
}
// -----------------------------------------------------------------------------
// NewSpace
bool NewSpace::ContainsSlow(Address a) const { bool NewSpace::ContainsSlow(Address a) const {
return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a); return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
} }
...@@ -82,15 +93,6 @@ HeapObject SemiSpaceObjectIterator::Next() { ...@@ -82,15 +93,6 @@ HeapObject SemiSpaceObjectIterator::Next() {
return HeapObject(); return HeapObject();
} }
// -----------------------------------------------------------------------------
// NewSpace
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment, origin);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -445,16 +445,58 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) { ...@@ -445,16 +445,58 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
limit_ = end; limit_ = end;
} }
size_t NewSpace::CommittedPhysicalMemory() const { // -----------------------------------------------------------------------------
if (!base::OS::HasLazyCommits()) return CommittedMemory(); // NewSpaceBase implementation
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
size_t size = to_space_.CommittedPhysicalMemory(); NewSpaceBase::NewSpaceBase(Heap* heap, LinearAllocationArea* allocation_info)
if (from_space_.IsCommitted()) { : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info) {}
size += from_space_.CommittedPhysicalMemory();
void NewSpaceBase::ResetParkedAllocationBuffers() {
parked_allocation_buffers_.clear();
}
void NewSpaceBase::MaybeFreeUnusedLab(LinearAllocationArea info) {
if (allocation_info_->MergeIfAdjacent(info)) {
original_top_.store(allocation_info_->top(), std::memory_order_release);
} }
return size;
#if DEBUG
VerifyTop();
#endif
}
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
void NewSpaceBase::MakeLinearAllocationAreaIterable() {
Address to_top = top();
Page* page = Page::FromAddress(to_top - kTaggedSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
heap_->CreateFillerObjectAt(to_top, remaining_in_page,
ClearRecordedSlots::kNo);
}
}
void NewSpaceBase::FreeLinearAllocationArea() {
MakeLinearAllocationAreaIterable();
UpdateInlineAllocationLimit(0);
} }
#if DEBUG
void NewSpaceBase::VerifyTop() const {
SpaceWithLinearArea::VerifyTop();
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
DCHECK_GE(original_top_, allocation_info_->start());
// Ensure that limit() is <= original_limit_.
DCHECK_LE(allocation_info_->limit(), original_limit_);
}
#endif // DEBUG
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// NewSpace implementation // NewSpace implementation
...@@ -462,7 +504,7 @@ NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator, ...@@ -462,7 +504,7 @@ NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity, size_t initial_semispace_capacity,
size_t max_semispace_capacity, size_t max_semispace_capacity,
LinearAllocationArea* allocation_info) LinearAllocationArea* allocation_info)
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info), : NewSpaceBase(heap, allocation_info),
to_space_(heap, kToSpace), to_space_(heap, kToSpace),
from_space_(heap, kFromSpace) { from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity); DCHECK(initial_semispace_capacity <= max_semispace_capacity);
...@@ -485,10 +527,6 @@ NewSpace::~NewSpace() { ...@@ -485,10 +527,6 @@ NewSpace::~NewSpace() {
from_space_.TearDown(); from_space_.TearDown();
} }
void NewSpace::ResetParkedAllocationBuffers() {
parked_allocation_buffers_.clear();
}
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() { void NewSpace::Grow() {
...@@ -521,6 +559,16 @@ void NewSpace::Shrink() { ...@@ -521,6 +559,16 @@ void NewSpace::Shrink() {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
} }
size_t NewSpace::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.IsCommitted()) {
size += from_space_.CommittedPhysicalMemory();
}
return size;
}
bool NewSpace::Rebalance() { bool NewSpace::Rebalance() {
// Order here is important to make use of the page pool. // Order here is important to make use of the page pool.
return to_space_.EnsureCurrentCapacity() && return to_space_.EnsureCurrentCapacity() &&
...@@ -669,45 +717,15 @@ bool NewSpace::EnsureAllocation(int size_in_bytes, ...@@ -669,45 +717,15 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return true; return true;
} }
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
if (allocation_info_->MergeIfAdjacent(info)) {
original_top_.store(allocation_info_->top(), std::memory_order_release);
}
#if DEBUG
VerifyTop();
#endif
}
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
void NewSpace::MakeLinearAllocationAreaIterable() {
Address to_top = top();
Page* page = Page::FromAddress(to_top - kTaggedSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
heap_->CreateFillerObjectAt(to_top, remaining_in_page,
ClearRecordedSlots::kNo);
}
}
void NewSpace::FreeLinearAllocationArea() {
MakeLinearAllocationAreaIterable();
UpdateInlineAllocationLimit(0);
}
#if DEBUG #if DEBUG
void NewSpace::VerifyTop() const { void NewSpace::VerifyTop() const {
SpaceWithLinearArea::VerifyTop(); NewSpaceBase::VerifyTop();
// Ensure that original_top_ always >= LAB start. The delta between start_ // Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers. // and top_ is still to be processed by allocation observers.
DCHECK_GE(original_top_, allocation_info_->start()); DCHECK_GE(original_top_, allocation_info_->start());
// Ensure that limit() is <= original_limit_, original_limit_ always needs // original_limit_ always needs to be end of curent to space page.
// to be end of curent to space page.
DCHECK_LE(allocation_info_->limit(), original_limit_); DCHECK_LE(allocation_info_->limit(), original_limit_);
DCHECK_EQ(original_limit_, to_space_.page_high()); DCHECK_EQ(original_limit_, to_space_.page_high());
} }
......
...@@ -233,18 +233,79 @@ class SemiSpaceObjectIterator : public ObjectIterator { ...@@ -233,18 +233,79 @@ class SemiSpaceObjectIterator : public ObjectIterator {
Address limit_; Address limit_;
}; };
class NewSpaceBase : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
NewSpaceBase(Heap* heap, LinearAllocationArea* allocation_info);
inline bool Contains(Object o) const;
inline bool Contains(HeapObject o) const;
void ResetParkedAllocationBuffers();
#if DEBUG
void VerifyTop() const override;
#endif // DEBUG
Address original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
}
Address original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed);
}
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
void MaybeFreeUnusedLab(LinearAllocationArea info);
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
// Creates a filler object in the linear allocation area.
void MakeLinearAllocationAreaIterable();
// Creates a filler object in the linear allocation area and closes it.
void FreeLinearAllocationArea() override;
protected:
static const int kAllocationBufferParkingThreshold = 4 * KB;
base::Mutex mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
ParkedAllocationBuffersVector parked_allocation_buffers_;
bool SupportsAllocationObserver() const override { return true; }
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// The young generation space. // The young generation space.
// //
// The new space consists of a contiguous pair of semispaces. It simply // The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace. // forwards most functions to the appropriate semispace.
class V8_EXPORT_PRIVATE NewSpace final class V8_EXPORT_PRIVATE NewSpace final : public NewSpaceBase {
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public: public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
NewSpace(Heap* heap, v8::PageAllocator* page_allocator, NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity, size_t max_semispace_capacity, size_t initial_semispace_capacity, size_t max_semispace_capacity,
LinearAllocationArea* allocation_info); LinearAllocationArea* allocation_info);
...@@ -252,10 +313,6 @@ class V8_EXPORT_PRIVATE NewSpace final ...@@ -252,10 +313,6 @@ class V8_EXPORT_PRIVATE NewSpace final
~NewSpace() override; ~NewSpace() override;
inline bool ContainsSlow(Address a) const; inline bool ContainsSlow(Address a) const;
inline bool Contains(Object o) const;
inline bool Contains(HeapObject o) const;
void ResetParkedAllocationBuffers();
// Flip the pair of spaces. // Flip the pair of spaces.
void Flip(); void Flip();
...@@ -380,16 +437,9 @@ class V8_EXPORT_PRIVATE NewSpace final ...@@ -380,16 +437,9 @@ class V8_EXPORT_PRIVATE NewSpace final
} }
#if DEBUG #if DEBUG
void VerifyTop() const; void VerifyTop() const final;
#endif // DEBUG #endif // DEBUG
Address original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
}
Address original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed);
}
// Return the address of the first allocatable address in the active // Return the address of the first allocatable address in the active
// semispace. This may be the address where the first object resides. // semispace. This may be the address where the first object resides.
Address first_allocatable_address() const { return to_space_.space_start(); } Address first_allocatable_address() const { return to_space_.space_start(); }
...@@ -399,10 +449,6 @@ class V8_EXPORT_PRIVATE NewSpace final ...@@ -399,10 +449,6 @@ class V8_EXPORT_PRIVATE NewSpace final
// Set the age mark in the active semispace. // Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Reset the allocation pointer to the beginning of the active semispace. // Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea(); void ResetLinearAllocationArea();
...@@ -469,53 +515,18 @@ class V8_EXPORT_PRIVATE NewSpace final ...@@ -469,53 +515,18 @@ class V8_EXPORT_PRIVATE NewSpace final
SemiSpace& from_space() { return from_space_; } SemiSpace& from_space() { return from_space_; }
SemiSpace& to_space() { return to_space_; } SemiSpace& to_space() { return to_space_; }
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
void MaybeFreeUnusedLab(LinearAllocationArea info);
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
// Creates a filler object in the linear allocation area.
void MakeLinearAllocationAreaIterable();
// Creates a filler object in the linear allocation area and closes it.
void FreeLinearAllocationArea() override;
private: private:
static const int kAllocationBufferParkingThreshold = 4 * KB;
// Update linear allocation area to match the current to-space page. // Update linear allocation area to match the current to-space page.
void UpdateLinearAllocationArea(Address known_top = 0); void UpdateLinearAllocationArea(Address known_top = 0);
base::Mutex mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
// The semispaces. // The semispaces.
SemiSpace to_space_; SemiSpace to_space_;
SemiSpace from_space_; SemiSpace from_space_;
VirtualMemory reservation_; VirtualMemory reservation_;
ParkedAllocationBuffersVector parked_allocation_buffers_;
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin, AllocationOrigin origin,
int* out_max_aligned_size) final; int* out_max_aligned_size) final;
bool SupportsAllocationObserver() const override { return true; }
friend class SemiSpaceObjectIterator; friend class SemiSpaceObjectIterator;
}; };
......
...@@ -560,7 +560,7 @@ class SpaceWithLinearArea : public Space { ...@@ -560,7 +560,7 @@ class SpaceWithLinearArea : public Space {
int* out_max_aligned_size) = 0; int* out_max_aligned_size) = 0;
#if DEBUG #if DEBUG
V8_EXPORT_PRIVATE void VerifyTop() const; V8_EXPORT_PRIVATE virtual void VerifyTop() const;
#endif // DEBUG #endif // DEBUG
LinearAllocationArea* const allocation_info_; LinearAllocationArea* const allocation_info_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment