Commit a8656e55 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Merge original_top/limit from NewSpace and PagedSpace

SpaceWithLinearArea will holds a ref to a struct containing
original_top_ and original_limit_ as well the lock used to sync them
for querying IsPendingAllocation.

PagedSpace is split into PagedSpaceBase (that holds all funcitonality)
and PagedSpace. The actual fields are owned by PagedSpace and NewSpace.

This is done in preparation for PagedNewSpace to allow PagedSpaceiBase
and NewSpace to share the same original_top_ and original_limit_ fields.

Bug: v8:12612
Change-Id: Iefbbd5209c5553db4ee16cb261734e6479e0f23f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3644795
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80549}
parent 48a41953
......@@ -431,7 +431,7 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) {
switch (base_space->identity()) {
case NEW_SPACE: {
base::SharedMutexGuard<base::kShared> guard(
new_space_->pending_allocation_mutex());
new_space_->linear_area_lock());
Address top = new_space_->original_top_acquire();
Address limit = new_space_->original_limit_relaxed();
DCHECK_LE(top, limit);
......@@ -443,9 +443,9 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) {
case MAP_SPACE: {
PagedSpace* paged_space = static_cast<PagedSpace*>(base_space);
base::SharedMutexGuard<base::kShared> guard(
paged_space->pending_allocation_mutex());
Address top = paged_space->original_top();
Address limit = paged_space->original_limit();
paged_space->linear_area_lock());
Address top = paged_space->original_top_acquire();
Address limit = paged_space->original_limit_relaxed();
DCHECK_LE(top, limit);
return top && top <= addr && addr < limit;
}
......
......@@ -2493,7 +2493,7 @@ class Heap {
friend class NewSpace;
friend class ObjectStatsCollector;
friend class Page;
friend class PagedSpace;
friend class PagedSpaceBase;
friend class PromoteYoungGenerationGC;
friend class ReadOnlyRoots;
friend class Scavenger;
......
......@@ -448,7 +448,8 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
// NewSpace implementation
NewSpace::NewSpace(Heap* heap, LinearAllocationArea* allocation_info)
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info) {}
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info,
linear_area_original_data_) {}
void NewSpace::ResetParkedAllocationBuffers() {
parked_allocation_buffers_.clear();
......@@ -456,7 +457,8 @@ void NewSpace::ResetParkedAllocationBuffers() {
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
if (allocation_info_->MergeIfAdjacent(info)) {
original_top_.store(allocation_info_->top(), std::memory_order_release);
linear_area_original_data_.set_original_top_release(
allocation_info_->top());
}
#if DEBUG
......@@ -484,10 +486,12 @@ void NewSpace::VerifyTop() const {
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
DCHECK_GE(original_top_, allocation_info_->start());
DCHECK_GE(linear_area_original_data_.get_original_top_acquire(),
allocation_info_->start());
// Ensure that limit() is <= original_limit_.
DCHECK_LE(allocation_info_->limit(), original_limit_);
DCHECK_LE(allocation_info_->limit(),
linear_area_original_data_.get_original_limit_relaxed());
}
#endif // DEBUG
......@@ -654,9 +658,9 @@ void SemiSpaceNewSpace::UpdateLinearAllocationArea(Address known_top) {
// The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run.
{
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
base::SharedMutexGuard<base::kExclusive> guard(linear_area_lock());
linear_area_original_data_.set_original_limit_relaxed(limit());
linear_area_original_data_.set_original_top_release(top());
}
to_space_.AddRangeToActiveSystemPages(top(), limit());
......@@ -744,13 +748,9 @@ bool SemiSpaceNewSpace::AddParkedAllocationBuffer(
void SemiSpaceNewSpace::VerifyTop() const {
NewSpace::VerifyTop();
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
DCHECK_GE(original_top_, allocation_info_->start());
// original_limit_ always needs to be end of curent to space page.
DCHECK_LE(allocation_info_->limit(), original_limit_);
DCHECK_EQ(original_limit_, to_space_.page_high());
DCHECK_EQ(linear_area_original_data_.get_original_limit_relaxed(),
to_space_.page_high());
}
#endif // DEBUG
......
......@@ -251,30 +251,12 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
void VerifyTop() const override;
#endif // DEBUG
Address original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
}
Address original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed);
}
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
void MaybeFreeUnusedLab(LinearAllocationArea info);
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
// Creates a filler object in the linear allocation area.
void MakeLinearAllocationAreaIterable();
......@@ -333,14 +315,7 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
base::Mutex mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
LinearAreaOriginalData linear_area_original_data_;
ParkedAllocationBuffersVector parked_allocation_buffers_;
......
......@@ -45,26 +45,26 @@ HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
return HeapObject();
}
bool PagedSpace::Contains(Address addr) const {
bool PagedSpaceBase::Contains(Address addr) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
return Page::FromAddress(addr)->owner() == this;
}
bool PagedSpace::Contains(Object o) const {
bool PagedSpaceBase::Contains(Object o) const {
if (!o.IsHeapObject()) return false;
return Page::FromAddress(o.ptr())->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
void PagedSpaceBase::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
free_list()->RemoveCategory(category);
});
}
size_t PagedSpace::RelinkFreeListCategories(Page* page) {
size_t PagedSpaceBase::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
......@@ -78,7 +78,7 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
return added;
}
bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
bool PagedSpaceBase::TryFreeLast(Address object_address, int object_size) {
if (allocation_info_->top() != kNullAddress) {
return allocation_info_->DecrementTopIfAdjacent(object_address,
object_size);
......@@ -86,10 +86,10 @@ bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
return false;
}
V8_INLINE bool PagedSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
if (!is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
......
This diff is collapsed.
......@@ -41,8 +41,8 @@ class ObjectVisitor;
class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space);
PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space,
PagedSpaceObjectIterator(Heap* heap, const PagedSpaceBase* space);
PagedSpaceObjectIterator(Heap* heap, const PagedSpaceBase* space,
const Page* page);
PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space,
const Page* page, Address start_address);
......@@ -72,7 +72,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
const PagedSpace* const space_;
const PagedSpaceBase* const space_;
ConstPageRange page_range_;
ConstPageRange::iterator current_page_;
#if V8_COMPRESS_POINTERS
......@@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
#endif // V8_COMPRESS_POINTERS
};
class V8_EXPORT_PRIVATE PagedSpace
class V8_EXPORT_PRIVATE PagedSpaceBase
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
......@@ -89,12 +89,13 @@ class V8_EXPORT_PRIVATE PagedSpace
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
PagedSpace(
PagedSpaceBase(
Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list, LinearAllocationArea* allocation_info_,
FreeList* free_list, LinearAllocationArea* allocation_info,
LinearAreaOriginalData& linear_area_original_data,
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
~PagedSpace() override { TearDown(); }
~PagedSpaceBase() override { TearDown(); }
// Checks whether an object/address is in this space.
inline bool Contains(Address a) const;
......@@ -308,21 +309,6 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetLinearAllocationArea(Address top, Address limit);
Address original_top() const { return original_top_; }
Address original_limit() const { return original_limit_; }
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_ = top();
}
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
void ReduceActiveSystemPages(Page* page,
ActiveSystemPages active_system_pages);
......@@ -330,7 +316,7 @@ class V8_EXPORT_PRIVATE PagedSpace
private:
class ConcurrentAllocationMutex {
public:
explicit ConcurrentAllocationMutex(const PagedSpace* space) {
explicit ConcurrentAllocationMutex(const PagedSpaceBase* space) {
if (space->SupportsConcurrentAllocation()) {
guard_.emplace(&space->space_mutex_);
}
......@@ -423,14 +409,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Mutex guarding any concurrent access to the space.
mutable base::Mutex space_mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values are protected by pending_allocation_mutex_.
Address original_top_;
Address original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
std::atomic<size_t> committed_physical_memory_{0};
friend class IncrementalMarking;
......@@ -440,6 +418,20 @@ class V8_EXPORT_PRIVATE PagedSpace
friend class heap::HeapTester;
};
class V8_EXPORT_PRIVATE PagedSpace : public PagedSpaceBase {
public:
// Creates a space with an id.
PagedSpace(
Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list, LinearAllocationArea* allocation_info,
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone)
: PagedSpaceBase(heap, id, executable, free_list, allocation_info,
linear_area_original_data_, compaction_space_kind) {}
private:
LinearAreaOriginalData linear_area_original_data_;
};
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
......
......@@ -41,7 +41,7 @@ class Isolate;
class LargeObjectSpace;
class LargePage;
class Page;
class PagedSpace;
class PagedSpaceBase;
class SemiSpace;
// -----------------------------------------------------------------------------
......@@ -477,11 +477,43 @@ class LocalAllocationBuffer {
LinearAllocationArea allocation_info_;
};
class LinearAreaOriginalData {
public:
Address get_original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
}
Address get_original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed);
}
void set_original_top_release(Address top) {
original_top_.store(top, std::memory_order_release);
}
void set_original_limit_relaxed(Address limit) {
original_limit_.store(limit, std::memory_order_relaxed);
}
base::SharedMutex* linear_area_lock() { return &linear_area_lock_; }
private:
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_ = 0;
std::atomic<Address> original_limit_ = 0;
// Protects original_top_ and original_limit_.
base::SharedMutex linear_area_lock_;
};
class SpaceWithLinearArea : public Space {
public:
SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list,
LinearAllocationArea* allocation_info)
: Space(heap, id, free_list), allocation_info_(allocation_info) {}
LinearAllocationArea* allocation_info,
LinearAreaOriginalData& linear_area_original_data)
: Space(heap, id, free_list),
allocation_info_(allocation_info),
linear_area_original_data_(linear_area_original_data) {}
virtual bool SupportsAllocationObserver() const = 0;
......@@ -546,6 +578,24 @@ class SpaceWithLinearArea : public Space {
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
base::SharedMutex* linear_area_lock() {
return linear_area_original_data_.linear_area_lock();
}
Address original_top_acquire() const {
return linear_area_original_data_.get_original_top_acquire();
}
Address original_limit_relaxed() const {
return linear_area_original_data_.get_original_limit_relaxed();
}
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(linear_area_lock());
DCHECK_GE(top(), linear_area_original_data_.get_original_top_acquire());
DCHECK_LE(top(), linear_area_original_data_.get_original_limit_relaxed());
linear_area_original_data_.set_original_top_release(top());
}
protected:
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
......@@ -581,6 +631,8 @@ class SpaceWithLinearArea : public Space {
#endif // DEBUG
LinearAllocationArea* const allocation_info_;
LinearAreaOriginalData& linear_area_original_data_;
bool use_lab_ = true;
size_t allocations_origins_[static_cast<int>(
......
......@@ -175,7 +175,7 @@ void Sweeper::StartSweeperTasks() {
}
}
Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
Page* Sweeper::GetSweptPageSafe(PagedSpaceBase* space) {
base::MutexGuard guard(&mutex_);
SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
if (!list.empty()) {
......
......@@ -20,7 +20,7 @@ namespace internal {
class InvalidatedSlotsCleanup;
class MajorNonAtomicMarkingState;
class Page;
class PagedSpace;
class PagedSpaceBase;
class Space;
enum class FreeSpaceTreatmentMode { kIgnoreFreeSpace, kZapFreeSpace };
......@@ -106,7 +106,7 @@ class Sweeper {
// Support concurrent sweepers from main thread
void SupportConcurrentSweeping();
Page* GetSweptPageSafe(PagedSpace* space);
Page* GetSweptPageSafe(PagedSpaceBase* space);
private:
class IncrementalSweeperTask;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment