Commit a8656e55 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Merge original_top/limit from NewSpace and PagedSpace

SpaceWithLinearArea will holds a ref to a struct containing
original_top_ and original_limit_ as well the lock used to sync them
for querying IsPendingAllocation.

PagedSpace is split into PagedSpaceBase (that holds all funcitonality)
and PagedSpace. The actual fields are owned by PagedSpace and NewSpace.

This is done in preparation for PagedNewSpace to allow PagedSpaceiBase
and NewSpace to share the same original_top_ and original_limit_ fields.

Bug: v8:12612
Change-Id: Iefbbd5209c5553db4ee16cb261734e6479e0f23f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3644795
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80549}
parent 48a41953
...@@ -431,7 +431,7 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) { ...@@ -431,7 +431,7 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) {
switch (base_space->identity()) { switch (base_space->identity()) {
case NEW_SPACE: { case NEW_SPACE: {
base::SharedMutexGuard<base::kShared> guard( base::SharedMutexGuard<base::kShared> guard(
new_space_->pending_allocation_mutex()); new_space_->linear_area_lock());
Address top = new_space_->original_top_acquire(); Address top = new_space_->original_top_acquire();
Address limit = new_space_->original_limit_relaxed(); Address limit = new_space_->original_limit_relaxed();
DCHECK_LE(top, limit); DCHECK_LE(top, limit);
...@@ -443,9 +443,9 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) { ...@@ -443,9 +443,9 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) {
case MAP_SPACE: { case MAP_SPACE: {
PagedSpace* paged_space = static_cast<PagedSpace*>(base_space); PagedSpace* paged_space = static_cast<PagedSpace*>(base_space);
base::SharedMutexGuard<base::kShared> guard( base::SharedMutexGuard<base::kShared> guard(
paged_space->pending_allocation_mutex()); paged_space->linear_area_lock());
Address top = paged_space->original_top(); Address top = paged_space->original_top_acquire();
Address limit = paged_space->original_limit(); Address limit = paged_space->original_limit_relaxed();
DCHECK_LE(top, limit); DCHECK_LE(top, limit);
return top && top <= addr && addr < limit; return top && top <= addr && addr < limit;
} }
......
...@@ -2493,7 +2493,7 @@ class Heap { ...@@ -2493,7 +2493,7 @@ class Heap {
friend class NewSpace; friend class NewSpace;
friend class ObjectStatsCollector; friend class ObjectStatsCollector;
friend class Page; friend class Page;
friend class PagedSpace; friend class PagedSpaceBase;
friend class PromoteYoungGenerationGC; friend class PromoteYoungGenerationGC;
friend class ReadOnlyRoots; friend class ReadOnlyRoots;
friend class Scavenger; friend class Scavenger;
......
...@@ -448,7 +448,8 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) { ...@@ -448,7 +448,8 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
// NewSpace implementation // NewSpace implementation
NewSpace::NewSpace(Heap* heap, LinearAllocationArea* allocation_info) NewSpace::NewSpace(Heap* heap, LinearAllocationArea* allocation_info)
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info) {} : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info,
linear_area_original_data_) {}
void NewSpace::ResetParkedAllocationBuffers() { void NewSpace::ResetParkedAllocationBuffers() {
parked_allocation_buffers_.clear(); parked_allocation_buffers_.clear();
...@@ -456,7 +457,8 @@ void NewSpace::ResetParkedAllocationBuffers() { ...@@ -456,7 +457,8 @@ void NewSpace::ResetParkedAllocationBuffers() {
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) { void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
if (allocation_info_->MergeIfAdjacent(info)) { if (allocation_info_->MergeIfAdjacent(info)) {
original_top_.store(allocation_info_->top(), std::memory_order_release); linear_area_original_data_.set_original_top_release(
allocation_info_->top());
} }
#if DEBUG #if DEBUG
...@@ -484,10 +486,12 @@ void NewSpace::VerifyTop() const { ...@@ -484,10 +486,12 @@ void NewSpace::VerifyTop() const {
// Ensure that original_top_ always >= LAB start. The delta between start_ // Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers. // and top_ is still to be processed by allocation observers.
DCHECK_GE(original_top_, allocation_info_->start()); DCHECK_GE(linear_area_original_data_.get_original_top_acquire(),
allocation_info_->start());
// Ensure that limit() is <= original_limit_. // Ensure that limit() is <= original_limit_.
DCHECK_LE(allocation_info_->limit(), original_limit_); DCHECK_LE(allocation_info_->limit(),
linear_area_original_data_.get_original_limit_relaxed());
} }
#endif // DEBUG #endif // DEBUG
...@@ -654,9 +658,9 @@ void SemiSpaceNewSpace::UpdateLinearAllocationArea(Address known_top) { ...@@ -654,9 +658,9 @@ void SemiSpaceNewSpace::UpdateLinearAllocationArea(Address known_top) {
// The order of the following two stores is important. // The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run. // See the corresponding loads in ConcurrentMarking::Run.
{ {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_); base::SharedMutexGuard<base::kExclusive> guard(linear_area_lock());
original_limit_.store(limit(), std::memory_order_relaxed); linear_area_original_data_.set_original_limit_relaxed(limit());
original_top_.store(top(), std::memory_order_release); linear_area_original_data_.set_original_top_release(top());
} }
to_space_.AddRangeToActiveSystemPages(top(), limit()); to_space_.AddRangeToActiveSystemPages(top(), limit());
...@@ -744,13 +748,9 @@ bool SemiSpaceNewSpace::AddParkedAllocationBuffer( ...@@ -744,13 +748,9 @@ bool SemiSpaceNewSpace::AddParkedAllocationBuffer(
void SemiSpaceNewSpace::VerifyTop() const { void SemiSpaceNewSpace::VerifyTop() const {
NewSpace::VerifyTop(); NewSpace::VerifyTop();
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
DCHECK_GE(original_top_, allocation_info_->start());
// original_limit_ always needs to be end of curent to space page. // original_limit_ always needs to be end of curent to space page.
DCHECK_LE(allocation_info_->limit(), original_limit_); DCHECK_EQ(linear_area_original_data_.get_original_limit_relaxed(),
DCHECK_EQ(original_limit_, to_space_.page_high()); to_space_.page_high());
} }
#endif // DEBUG #endif // DEBUG
......
...@@ -251,30 +251,12 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) { ...@@ -251,30 +251,12 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
void VerifyTop() const override; void VerifyTop() const override;
#endif // DEBUG #endif // DEBUG
Address original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
}
Address original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed);
}
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized( V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment, int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime); AllocationOrigin origin = AllocationOrigin::kRuntime);
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
void MaybeFreeUnusedLab(LinearAllocationArea info); void MaybeFreeUnusedLab(LinearAllocationArea info);
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
// Creates a filler object in the linear allocation area. // Creates a filler object in the linear allocation area.
void MakeLinearAllocationAreaIterable(); void MakeLinearAllocationAreaIterable();
...@@ -333,14 +315,7 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) { ...@@ -333,14 +315,7 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
base::Mutex mutex_; base::Mutex mutex_;
// The top and the limit at the time of setting the linear allocation area. LinearAreaOriginalData linear_area_original_data_;
// These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
ParkedAllocationBuffersVector parked_allocation_buffers_; ParkedAllocationBuffersVector parked_allocation_buffers_;
......
...@@ -45,26 +45,26 @@ HeapObject PagedSpaceObjectIterator::FromCurrentPage() { ...@@ -45,26 +45,26 @@ HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
return HeapObject(); return HeapObject();
} }
bool PagedSpace::Contains(Address addr) const { bool PagedSpaceBase::Contains(Address addr) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) { if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true; return true;
} }
return Page::FromAddress(addr)->owner() == this; return Page::FromAddress(addr)->owner() == this;
} }
bool PagedSpace::Contains(Object o) const { bool PagedSpaceBase::Contains(Object o) const {
if (!o.IsHeapObject()) return false; if (!o.IsHeapObject()) return false;
return Page::FromAddress(o.ptr())->owner() == this; return Page::FromAddress(o.ptr())->owner() == this;
} }
void PagedSpace::UnlinkFreeListCategories(Page* page) { void PagedSpaceBase::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner()); DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) { page->ForAllFreeListCategories([this](FreeListCategory* category) {
free_list()->RemoveCategory(category); free_list()->RemoveCategory(category);
}); });
} }
size_t PagedSpace::RelinkFreeListCategories(Page* page) { size_t PagedSpaceBase::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner()); DCHECK_EQ(this, page->owner());
size_t added = 0; size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) { page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
...@@ -78,7 +78,7 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) { ...@@ -78,7 +78,7 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
return added; return added;
} }
bool PagedSpace::TryFreeLast(Address object_address, int object_size) { bool PagedSpaceBase::TryFreeLast(Address object_address, int object_size) {
if (allocation_info_->top() != kNullAddress) { if (allocation_info_->top() != kNullAddress) {
return allocation_info_->DecrementTopIfAdjacent(object_address, return allocation_info_->DecrementTopIfAdjacent(object_address,
object_size); object_size);
...@@ -86,10 +86,10 @@ bool PagedSpace::TryFreeLast(Address object_address, int object_size) { ...@@ -86,10 +86,10 @@ bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
return false; return false;
} }
V8_INLINE bool PagedSpace::EnsureAllocation(int size_in_bytes, V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin, AllocationOrigin origin,
int* out_max_aligned_size) { int* out_max_aligned_size) {
if (!is_compaction_space()) { if (!is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the // Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is // allocation function to mark the object black when incremental marking is
......
This diff is collapsed.
...@@ -41,8 +41,8 @@ class ObjectVisitor; ...@@ -41,8 +41,8 @@ class ObjectVisitor;
class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator { class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public: public:
// Creates a new object iterator in a given space. // Creates a new object iterator in a given space.
PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space); PagedSpaceObjectIterator(Heap* heap, const PagedSpaceBase* space);
PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space, PagedSpaceObjectIterator(Heap* heap, const PagedSpaceBase* space,
const Page* page); const Page* page);
PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space, PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space,
const Page* page, Address start_address); const Page* page, Address start_address);
...@@ -72,7 +72,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator { ...@@ -72,7 +72,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
Address cur_addr_; // Current iteration point. Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point. Address cur_end_; // End iteration point.
const PagedSpace* const space_; const PagedSpaceBase* const space_;
ConstPageRange page_range_; ConstPageRange page_range_;
ConstPageRange::iterator current_page_; ConstPageRange::iterator current_page_;
#if V8_COMPRESS_POINTERS #if V8_COMPRESS_POINTERS
...@@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator { ...@@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
#endif // V8_COMPRESS_POINTERS #endif // V8_COMPRESS_POINTERS
}; };
class V8_EXPORT_PRIVATE PagedSpace class V8_EXPORT_PRIVATE PagedSpaceBase
: NON_EXPORTED_BASE(public SpaceWithLinearArea) { : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public: public:
using iterator = PageIterator; using iterator = PageIterator;
...@@ -89,12 +89,13 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -89,12 +89,13 @@ class V8_EXPORT_PRIVATE PagedSpace
static const size_t kCompactionMemoryWanted = 500 * KB; static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id. // Creates a space with an id.
PagedSpace( PagedSpaceBase(
Heap* heap, AllocationSpace id, Executability executable, Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list, LinearAllocationArea* allocation_info_, FreeList* free_list, LinearAllocationArea* allocation_info,
LinearAreaOriginalData& linear_area_original_data,
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone); CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
~PagedSpace() override { TearDown(); } ~PagedSpaceBase() override { TearDown(); }
// Checks whether an object/address is in this space. // Checks whether an object/address is in this space.
inline bool Contains(Address a) const; inline bool Contains(Address a) const;
...@@ -308,21 +309,6 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -308,21 +309,6 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetLinearAllocationArea(Address top, Address limit); void SetLinearAllocationArea(Address top, Address limit);
Address original_top() const { return original_top_; }
Address original_limit() const { return original_limit_; }
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_ = top();
}
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
void AddRangeToActiveSystemPages(Page* page, Address start, Address end); void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
void ReduceActiveSystemPages(Page* page, void ReduceActiveSystemPages(Page* page,
ActiveSystemPages active_system_pages); ActiveSystemPages active_system_pages);
...@@ -330,7 +316,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -330,7 +316,7 @@ class V8_EXPORT_PRIVATE PagedSpace
private: private:
class ConcurrentAllocationMutex { class ConcurrentAllocationMutex {
public: public:
explicit ConcurrentAllocationMutex(const PagedSpace* space) { explicit ConcurrentAllocationMutex(const PagedSpaceBase* space) {
if (space->SupportsConcurrentAllocation()) { if (space->SupportsConcurrentAllocation()) {
guard_.emplace(&space->space_mutex_); guard_.emplace(&space->space_mutex_);
} }
...@@ -423,14 +409,6 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -423,14 +409,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Mutex guarding any concurrent access to the space. // Mutex guarding any concurrent access to the space.
mutable base::Mutex space_mutex_; mutable base::Mutex space_mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values are protected by pending_allocation_mutex_.
Address original_top_;
Address original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
std::atomic<size_t> committed_physical_memory_{0}; std::atomic<size_t> committed_physical_memory_{0};
friend class IncrementalMarking; friend class IncrementalMarking;
...@@ -440,6 +418,20 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -440,6 +418,20 @@ class V8_EXPORT_PRIVATE PagedSpace
friend class heap::HeapTester; friend class heap::HeapTester;
}; };
class V8_EXPORT_PRIVATE PagedSpace : public PagedSpaceBase {
public:
// Creates a space with an id.
PagedSpace(
Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list, LinearAllocationArea* allocation_info,
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone)
: PagedSpaceBase(heap, id, executable, free_list, allocation_info,
linear_area_original_data_, compaction_space_kind) {}
private:
LinearAreaOriginalData linear_area_original_data_;
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction. // Compaction space that is used temporarily during compaction.
......
...@@ -41,7 +41,7 @@ class Isolate; ...@@ -41,7 +41,7 @@ class Isolate;
class LargeObjectSpace; class LargeObjectSpace;
class LargePage; class LargePage;
class Page; class Page;
class PagedSpace; class PagedSpaceBase;
class SemiSpace; class SemiSpace;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
...@@ -477,11 +477,43 @@ class LocalAllocationBuffer { ...@@ -477,11 +477,43 @@ class LocalAllocationBuffer {
LinearAllocationArea allocation_info_; LinearAllocationArea allocation_info_;
}; };
class LinearAreaOriginalData {
public:
Address get_original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
}
Address get_original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed);
}
void set_original_top_release(Address top) {
original_top_.store(top, std::memory_order_release);
}
void set_original_limit_relaxed(Address limit) {
original_limit_.store(limit, std::memory_order_relaxed);
}
base::SharedMutex* linear_area_lock() { return &linear_area_lock_; }
private:
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_ = 0;
std::atomic<Address> original_limit_ = 0;
// Protects original_top_ and original_limit_.
base::SharedMutex linear_area_lock_;
};
class SpaceWithLinearArea : public Space { class SpaceWithLinearArea : public Space {
public: public:
SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list, SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list,
LinearAllocationArea* allocation_info) LinearAllocationArea* allocation_info,
: Space(heap, id, free_list), allocation_info_(allocation_info) {} LinearAreaOriginalData& linear_area_original_data)
: Space(heap, id, free_list),
allocation_info_(allocation_info),
linear_area_original_data_(linear_area_original_data) {}
virtual bool SupportsAllocationObserver() const = 0; virtual bool SupportsAllocationObserver() const = 0;
...@@ -546,6 +578,24 @@ class SpaceWithLinearArea : public Space { ...@@ -546,6 +578,24 @@ class SpaceWithLinearArea : public Space {
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment, AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime); AllocationOrigin origin = AllocationOrigin::kRuntime);
base::SharedMutex* linear_area_lock() {
return linear_area_original_data_.linear_area_lock();
}
Address original_top_acquire() const {
return linear_area_original_data_.get_original_top_acquire();
}
Address original_limit_relaxed() const {
return linear_area_original_data_.get_original_limit_relaxed();
}
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(linear_area_lock());
DCHECK_GE(top(), linear_area_original_data_.get_original_top_acquire());
DCHECK_LE(top(), linear_area_original_data_.get_original_limit_relaxed());
linear_area_original_data_.set_original_top_release(top());
}
protected: protected:
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin); V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
...@@ -581,6 +631,8 @@ class SpaceWithLinearArea : public Space { ...@@ -581,6 +631,8 @@ class SpaceWithLinearArea : public Space {
#endif // DEBUG #endif // DEBUG
LinearAllocationArea* const allocation_info_; LinearAllocationArea* const allocation_info_;
LinearAreaOriginalData& linear_area_original_data_;
bool use_lab_ = true; bool use_lab_ = true;
size_t allocations_origins_[static_cast<int>( size_t allocations_origins_[static_cast<int>(
......
...@@ -175,7 +175,7 @@ void Sweeper::StartSweeperTasks() { ...@@ -175,7 +175,7 @@ void Sweeper::StartSweeperTasks() {
} }
} }
Page* Sweeper::GetSweptPageSafe(PagedSpace* space) { Page* Sweeper::GetSweptPageSafe(PagedSpaceBase* space) {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())]; SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
if (!list.empty()) { if (!list.empty()) {
......
...@@ -20,7 +20,7 @@ namespace internal { ...@@ -20,7 +20,7 @@ namespace internal {
class InvalidatedSlotsCleanup; class InvalidatedSlotsCleanup;
class MajorNonAtomicMarkingState; class MajorNonAtomicMarkingState;
class Page; class Page;
class PagedSpace; class PagedSpaceBase;
class Space; class Space;
enum class FreeSpaceTreatmentMode { kIgnoreFreeSpace, kZapFreeSpace }; enum class FreeSpaceTreatmentMode { kIgnoreFreeSpace, kZapFreeSpace };
...@@ -106,7 +106,7 @@ class Sweeper { ...@@ -106,7 +106,7 @@ class Sweeper {
// Support concurrent sweepers from main thread // Support concurrent sweepers from main thread
void SupportConcurrentSweeping(); void SupportConcurrentSweeping();
Page* GetSweptPageSafe(PagedSpace* space); Page* GetSweptPageSafe(PagedSpaceBase* space);
private: private:
class IncrementalSweeperTask; class IncrementalSweeperTask;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment