Commit 0dc4d88c authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

heap: Mark space methods as const

A lot of the space/chunk methods can be trivially marked as const.

There are more methods that can be made const but these will require
creating new const object iterators, so those are left out for now.

Bug: v8:12612
Change-Id: I753b8b3f7a200ecf255596c7825917e4eb600b81
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3571815Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79824}
parent ea1b92ce
...@@ -29,12 +29,12 @@ class V8_EXPORT_PRIVATE BaseSpace : public Malloced { ...@@ -29,12 +29,12 @@ class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
return heap_; return heap_;
} }
AllocationSpace identity() { return id_; } AllocationSpace identity() const { return id_; }
// Returns name of the space. // Returns name of the space.
static const char* GetSpaceName(AllocationSpace space); static const char* GetSpaceName(AllocationSpace space);
const char* name() { return GetSpaceName(id_); } const char* name() const { return GetSpaceName(id_); }
void AccountCommitted(size_t bytes) { void AccountCommitted(size_t bytes) {
DCHECK_GE(committed_ + bytes, committed_); DCHECK_GE(committed_ + bytes, committed_);
...@@ -51,15 +51,15 @@ class V8_EXPORT_PRIVATE BaseSpace : public Malloced { ...@@ -51,15 +51,15 @@ class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
// Return the total amount committed memory for this space, i.e., allocatable // Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers. // memory and page headers.
virtual size_t CommittedMemory() { return committed_; } virtual size_t CommittedMemory() const { return committed_; }
virtual size_t MaximumCommittedMemory() { return max_committed_; } virtual size_t MaximumCommittedMemory() const { return max_committed_; }
// Approximate amount of physical memory committed for this space. // Approximate amount of physical memory committed for this space.
virtual size_t CommittedPhysicalMemory() = 0; virtual size_t CommittedPhysicalMemory() const = 0;
// Returns allocated size. // Returns allocated size.
virtual size_t Size() = 0; virtual size_t Size() const = 0;
protected: protected:
BaseSpace(Heap* heap, AllocationSpace id) BaseSpace(Heap* heap, AllocationSpace id)
......
...@@ -138,7 +138,9 @@ class BasicMemoryChunk { ...@@ -138,7 +138,9 @@ class BasicMemoryChunk {
Address address() const { return reinterpret_cast<Address>(this); } Address address() const { return reinterpret_cast<Address>(this); }
// Returns the offset of a given address to this page. // Returns the offset of a given address to this page.
inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); } inline size_t Offset(Address a) const {
return static_cast<size_t>(a - address());
}
// Some callers rely on the fact that this can operate on both // Some callers rely on the fact that this can operate on both
// tagged and aligned object addresses. // tagged and aligned object addresses.
...@@ -200,11 +202,11 @@ class BasicMemoryChunk { ...@@ -200,11 +202,11 @@ class BasicMemoryChunk {
return IsFlagSet(READ_ONLY_HEAP); return IsFlagSet(READ_ONLY_HEAP);
} }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); } bool NeverEvacuate() const { return IsFlagSet(NEVER_EVACUATE); }
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); } void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
bool CanAllocate() { bool CanAllocate() const {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
} }
...@@ -219,7 +221,7 @@ class BasicMemoryChunk { ...@@ -219,7 +221,7 @@ class BasicMemoryChunk {
((flags & COMPACTION_WAS_ABORTED) == 0); ((flags & COMPACTION_WAS_ABORTED) == 0);
} }
Executability executable() { Executability executable() const {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
} }
...@@ -288,7 +290,7 @@ class BasicMemoryChunk { ...@@ -288,7 +290,7 @@ class BasicMemoryChunk {
Bitmap::FromAddress(address() + kMarkingBitmapOffset)); Bitmap::FromAddress(address() + kMarkingBitmapOffset));
} }
Address HighWaterMark() { return address() + high_water_mark_; } Address HighWaterMark() const { return address() + high_water_mark_; }
static inline void UpdateHighWaterMark(Address mark) { static inline void UpdateHighWaterMark(Address mark) {
if (mark == kNullAddress) return; if (mark == kNullAddress) return;
......
...@@ -60,7 +60,7 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk, ...@@ -60,7 +60,7 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
return page; return page;
} }
size_t LargeObjectSpace::Available() { size_t LargeObjectSpace::Available() const {
// We return zero here since we cannot take advantage of already allocated // We return zero here since we cannot take advantage of already allocated
// large object memory. // large object memory.
return 0; return 0;
...@@ -226,7 +226,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size, ...@@ -226,7 +226,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
return page; return page;
} }
size_t LargeObjectSpace::CommittedPhysicalMemory() { size_t LargeObjectSpace::CommittedPhysicalMemory() const {
// On a platform that provides lazy committing of memory, we over-account // On a platform that provides lazy committing of memory, we over-account
// the actually committed memory. There is no easy way right now to support // the actually committed memory. There is no easy way right now to support
// precise accounting of committed memory in large object space. // precise accounting of committed memory in large object space.
...@@ -347,7 +347,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -347,7 +347,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
objects_size_ = surviving_object_size; objects_size_ = surviving_object_size;
} }
bool LargeObjectSpace::Contains(HeapObject object) { bool LargeObjectSpace::Contains(HeapObject object) const {
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object); BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
bool owned = (chunk->owner() == this); bool owned = (chunk->owner() == this);
...@@ -357,8 +357,8 @@ bool LargeObjectSpace::Contains(HeapObject object) { ...@@ -357,8 +357,8 @@ bool LargeObjectSpace::Contains(HeapObject object) {
return owned; return owned;
} }
bool LargeObjectSpace::ContainsSlow(Address addr) { bool LargeObjectSpace::ContainsSlow(Address addr) const {
for (LargePage* page : *this) { for (const LargePage* page : *this) {
if (page->Contains(addr)) return true; if (page->Contains(addr)) return true;
} }
return false; return false;
...@@ -536,7 +536,9 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) { ...@@ -536,7 +536,9 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
return AllocationResult::FromObject(result); return AllocationResult::FromObject(result);
} }
size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); } size_t NewLargeObjectSpace::Available() const {
return capacity_ - SizeOfObjects();
}
void NewLargeObjectSpace::Flip() { void NewLargeObjectSpace::Flip() {
for (LargePage* chunk = first_page(); chunk != nullptr; for (LargePage* chunk = first_page(); chunk != nullptr;
......
...@@ -43,6 +43,9 @@ class LargePage : public MemoryChunk { ...@@ -43,6 +43,9 @@ class LargePage : public MemoryChunk {
HeapObject GetObject() { return HeapObject::FromAddress(area_start()); } HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }
LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); } LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }
const LargePage* next_page() const {
return static_cast<const LargePage*>(list_node_.next());
}
// Uncommit memory that is not in use anymore by the object. If the object // Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned. // cannot be shrunk 0 is returned.
...@@ -66,6 +69,7 @@ STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); ...@@ -66,6 +69,7 @@ STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
public: public:
using iterator = LargePageIterator; using iterator = LargePageIterator;
using const_iterator = ConstLargePageIterator;
~LargeObjectSpace() override { TearDown(); } ~LargeObjectSpace() override { TearDown(); }
...@@ -73,27 +77,27 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -73,27 +77,27 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
void TearDown(); void TearDown();
// Available bytes for objects in this space. // Available bytes for objects in this space.
size_t Available() override; size_t Available() const override;
size_t Size() override { return size_; } size_t Size() const override { return size_; }
size_t SizeOfObjects() override { return objects_size_; } size_t SizeOfObjects() const override { return objects_size_; }
// Approximate amount of physical memory committed for this space. // Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override; size_t CommittedPhysicalMemory() const override;
int PageCount() { return page_count_; } int PageCount() const { return page_count_; }
// Frees unmarked objects. // Frees unmarked objects.
virtual void FreeUnmarkedObjects(); virtual void FreeUnmarkedObjects();
// Checks whether a heap object is in this space; O(1). // Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj); bool Contains(HeapObject obj) const;
// Checks whether an address is in the object area in this space. Iterates all // Checks whether an address is in the object area in this space. Iterates all
// objects in the space. May be slow. // objects in the space. May be slow.
bool ContainsSlow(Address addr); bool ContainsSlow(Address addr) const;
// Checks whether the space is empty. // Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; } bool IsEmpty() const { return first_page() == nullptr; }
virtual void AddPage(LargePage* page, size_t object_size); virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size); virtual void RemovePage(LargePage* page, size_t object_size);
...@@ -101,10 +105,16 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -101,10 +105,16 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
LargePage* first_page() override { LargePage* first_page() override {
return reinterpret_cast<LargePage*>(memory_chunk_list_.front()); return reinterpret_cast<LargePage*>(memory_chunk_list_.front());
} }
const LargePage* first_page() const override {
return reinterpret_cast<const LargePage*>(memory_chunk_list_.front());
}
iterator begin() { return iterator(first_page()); } iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); } iterator end() { return iterator(nullptr); }
const_iterator begin() const { return const_iterator(first_page()); }
const_iterator end() const { return const_iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override; std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
virtual bool is_off_thread() const { return false; } virtual bool is_off_thread() const { return false; }
...@@ -119,7 +129,7 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -119,7 +129,7 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
// The last allocated object that is not guaranteed to be initialized when the // The last allocated object that is not guaranteed to be initialized when the
// concurrent marker visits it. // concurrent marker visits it.
Address pending_object() { Address pending_object() const {
return pending_object_.load(std::memory_order_acquire); return pending_object_.load(std::memory_order_acquire);
} }
...@@ -187,7 +197,7 @@ class NewLargeObjectSpace : public LargeObjectSpace { ...@@ -187,7 +197,7 @@ class NewLargeObjectSpace : public LargeObjectSpace {
AllocateRaw(int object_size); AllocateRaw(int object_size);
// Available bytes for objects in this space. // Available bytes for objects in this space.
size_t Available() override; size_t Available() const override;
void Flip(); void Flip();
......
...@@ -197,7 +197,7 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size, ...@@ -197,7 +197,7 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
#endif #endif
} }
size_t MemoryChunk::CommittedPhysicalMemory() { size_t MemoryChunk::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits() || IsLargePage()) return size(); if (!base::OS::HasLazyCommits() || IsLargePage()) return size();
return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits()); return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits());
} }
......
...@@ -98,7 +98,7 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -98,7 +98,7 @@ class MemoryChunk : public BasicMemoryChunk {
return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load()); return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
} }
bool SweepingDone() { bool SweepingDone() const {
return concurrent_sweeping_ == ConcurrentSweepingState::kDone; return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
} }
...@@ -158,7 +158,7 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -158,7 +158,7 @@ class MemoryChunk : public BasicMemoryChunk {
int FreeListsLength(); int FreeListsLength();
// Approximate amount of physical memory committed for this chunk. // Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory(); V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const;
class ProgressBar& ProgressBar() { class ProgressBar& ProgressBar() {
return progress_bar_; return progress_bar_;
...@@ -171,7 +171,7 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -171,7 +171,7 @@ class MemoryChunk : public BasicMemoryChunk {
inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type, inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount); size_t amount);
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) { size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const {
return external_backing_store_bytes_[type]; return external_backing_store_bytes_[type];
} }
......
...@@ -167,7 +167,7 @@ bool SemiSpace::Uncommit() { ...@@ -167,7 +167,7 @@ bool SemiSpace::Uncommit() {
return true; return true;
} }
size_t SemiSpace::CommittedPhysicalMemory() { size_t SemiSpace::CommittedPhysicalMemory() const {
if (!IsCommitted()) return 0; if (!IsCommitted()) return 0;
if (!base::OS::HasLazyCommits()) return CommittedMemory(); if (!base::OS::HasLazyCommits()) return CommittedMemory();
return committed_physical_memory_; return committed_physical_memory_;
...@@ -361,7 +361,7 @@ void SemiSpace::Print() {} ...@@ -361,7 +361,7 @@ void SemiSpace::Print() {}
#endif #endif
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
void SemiSpace::Verify() { void SemiSpace::Verify() const {
bool is_from_space = (id_ == kFromSpace); bool is_from_space = (id_ == kFromSpace);
size_t external_backing_store_bytes[kNumTypes]; size_t external_backing_store_bytes[kNumTypes];
...@@ -372,7 +372,7 @@ void SemiSpace::Verify() { ...@@ -372,7 +372,7 @@ void SemiSpace::Verify() {
int actual_pages = 0; int actual_pages = 0;
size_t computed_committed_physical_memory = 0; size_t computed_committed_physical_memory = 0;
for (Page* page : *this) { for (const Page* page : *this) {
CHECK_EQ(page->owner(), this); CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace()); CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
...@@ -435,7 +435,7 @@ void SemiSpace::AssertValidRange(Address start, Address end) { ...@@ -435,7 +435,7 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// SemiSpaceObjectIterator implementation. // SemiSpaceObjectIterator implementation.
SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) { SemiSpaceObjectIterator::SemiSpaceObjectIterator(const NewSpace* space) {
Initialize(space->first_allocatable_address(), space->top()); Initialize(space->first_allocatable_address(), space->top());
} }
...@@ -445,7 +445,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) { ...@@ -445,7 +445,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
limit_ = end; limit_ = end;
} }
size_t NewSpace::CommittedPhysicalMemory() { size_t NewSpace::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits()) return CommittedMemory(); if (!base::OS::HasLazyCommits()) return CommittedMemory();
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top()); BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
size_t size = to_space_.CommittedPhysicalMemory(); size_t size = to_space_.CommittedPhysicalMemory();
...@@ -736,7 +736,7 @@ void NewSpace::FreeLinearAllocationArea() { ...@@ -736,7 +736,7 @@ void NewSpace::FreeLinearAllocationArea() {
UpdateInlineAllocationLimit(0); UpdateInlineAllocationLimit(0);
} }
void NewSpace::VerifyTop() { void NewSpace::VerifyTop() const {
// Ensure validity of LAB: start <= top <= limit // Ensure validity of LAB: start <= top <= limit
DCHECK_LE(allocation_info_->start(), allocation_info_->top()); DCHECK_LE(allocation_info_->start(), allocation_info_->top());
DCHECK_LE(allocation_info_->top(), allocation_info_->limit()); DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
...@@ -754,7 +754,7 @@ void NewSpace::VerifyTop() { ...@@ -754,7 +754,7 @@ void NewSpace::VerifyTop() {
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume // We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking). // that it works (it depends on the invariants we are checking).
void NewSpace::Verify(Isolate* isolate) { void NewSpace::Verify(Isolate* isolate) const {
// The allocation pointer should be in the space or at the very end. // The allocation pointer should be in the space or at the very end.
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
......
...@@ -59,7 +59,7 @@ class SemiSpace : public Space { ...@@ -59,7 +59,7 @@ class SemiSpace : public Space {
bool Commit(); bool Commit();
bool Uncommit(); bool Uncommit();
bool IsCommitted() { return !memory_chunk_list_.Empty(); } bool IsCommitted() const { return !memory_chunk_list_.Empty(); }
// Grow the semispace to the new capacity. The new capacity requested must // Grow the semispace to the new capacity. The new capacity requested must
// be larger than the current capacity and less than the maximum capacity. // be larger than the current capacity and less than the maximum capacity.
...@@ -73,7 +73,7 @@ class SemiSpace : public Space { ...@@ -73,7 +73,7 @@ class SemiSpace : public Space {
bool EnsureCurrentCapacity(); bool EnsureCurrentCapacity();
// Returns the start address of the first page of the space. // Returns the start address of the first page of the space.
Address space_start() { Address space_start() const {
DCHECK_NE(memory_chunk_list_.front(), nullptr); DCHECK_NE(memory_chunk_list_.front(), nullptr);
return memory_chunk_list_.front()->area_start(); return memory_chunk_list_.front()->area_start();
} }
...@@ -81,10 +81,10 @@ class SemiSpace : public Space { ...@@ -81,10 +81,10 @@ class SemiSpace : public Space {
Page* current_page() { return current_page_; } Page* current_page() { return current_page_; }
// Returns the start address of the current page of the space. // Returns the start address of the current page of the space.
Address page_low() { return current_page_->area_start(); } Address page_low() const { return current_page_->area_start(); }
// Returns one past the end address of the current page of the space. // Returns one past the end address of the current page of the space.
Address page_high() { return current_page_->area_end(); } Address page_high() const { return current_page_->area_end(); }
bool AdvancePage() { bool AdvancePage() {
Page* next_page = current_page_->next_page(); Page* next_page = current_page_->next_page();
...@@ -109,34 +109,34 @@ class SemiSpace : public Space { ...@@ -109,34 +109,34 @@ class SemiSpace : public Space {
Page* InitializePage(MemoryChunk* chunk) override; Page* InitializePage(MemoryChunk* chunk) override;
// Age mark accessors. // Age mark accessors.
Address age_mark() { return age_mark_; } Address age_mark() const { return age_mark_; }
void set_age_mark(Address mark); void set_age_mark(Address mark);
// Returns the current capacity of the semispace. // Returns the current capacity of the semispace.
size_t current_capacity() { return current_capacity_; } size_t current_capacity() const { return current_capacity_; }
// Returns the target capacity of the semispace. // Returns the target capacity of the semispace.
size_t target_capacity() { return target_capacity_; } size_t target_capacity() const { return target_capacity_; }
// Returns the maximum capacity of the semispace. // Returns the maximum capacity of the semispace.
size_t maximum_capacity() { return maximum_capacity_; } size_t maximum_capacity() const { return maximum_capacity_; }
// Returns the initial capacity of the semispace. // Returns the initial capacity of the semispace.
size_t minimum_capacity() { return minimum_capacity_; } size_t minimum_capacity() const { return minimum_capacity_; }
SemiSpaceId id() { return id_; } SemiSpaceId id() const { return id_; }
// Approximate amount of physical memory committed for this space. // Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override; size_t CommittedPhysicalMemory() const override;
// If we don't have these here then SemiSpace will be abstract. However // If we don't have these here then SemiSpace will be abstract. However
// they should never be called: // they should never be called:
size_t Size() override { UNREACHABLE(); } size_t Size() const override { UNREACHABLE(); }
size_t SizeOfObjects() override { return Size(); } size_t SizeOfObjects() const override { return Size(); }
size_t Available() override { UNREACHABLE(); } size_t Available() const override { UNREACHABLE(); }
Page* first_page() override { Page* first_page() override {
return reinterpret_cast<Page*>(memory_chunk_list_.front()); return reinterpret_cast<Page*>(memory_chunk_list_.front());
...@@ -172,7 +172,7 @@ class SemiSpace : public Space { ...@@ -172,7 +172,7 @@ class SemiSpace : public Space {
#endif #endif
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
virtual void Verify(); virtual void Verify() const;
#endif #endif
void AddRangeToActiveSystemPages(Address start, Address end); void AddRangeToActiveSystemPages(Address start, Address end);
...@@ -220,7 +220,7 @@ class SemiSpace : public Space { ...@@ -220,7 +220,7 @@ class SemiSpace : public Space {
class SemiSpaceObjectIterator : public ObjectIterator { class SemiSpaceObjectIterator : public ObjectIterator {
public: public:
// Create an iterator over the allocated objects in the given to-space. // Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceObjectIterator(NewSpace* space); explicit SemiSpaceObjectIterator(const NewSpace* space);
inline HeapObject Next() override; inline HeapObject Next() override;
...@@ -239,7 +239,7 @@ class SemiSpaceObjectIterator : public ObjectIterator { ...@@ -239,7 +239,7 @@ class SemiSpaceObjectIterator : public ObjectIterator {
// The new space consists of a contiguous pair of semispaces. It simply // The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace. // forwards most functions to the appropriate semispace.
class V8_EXPORT_PRIVATE NewSpace class V8_EXPORT_PRIVATE NewSpace final
: NON_EXPORTED_BASE(public SpaceWithLinearArea) { : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public: public:
using iterator = PageIterator; using iterator = PageIterator;
...@@ -268,17 +268,17 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -268,17 +268,17 @@ class V8_EXPORT_PRIVATE NewSpace
void Shrink(); void Shrink();
// Return the allocated bytes in the active semispace. // Return the allocated bytes in the active semispace.
size_t Size() final { size_t Size() const final {
DCHECK_GE(top(), to_space_.page_low()); DCHECK_GE(top(), to_space_.page_low());
return (to_space_.current_capacity() - Page::kPageSize) / Page::kPageSize * return (to_space_.current_capacity() - Page::kPageSize) / Page::kPageSize *
MemoryChunkLayout::AllocatableMemoryInDataPage() + MemoryChunkLayout::AllocatableMemoryInDataPage() +
static_cast<size_t>(top() - to_space_.page_low()); static_cast<size_t>(top() - to_space_.page_low());
} }
size_t SizeOfObjects() final { return Size(); } size_t SizeOfObjects() const final { return Size(); }
// Return the allocatable capacity of a semispace. // Return the allocatable capacity of a semispace.
size_t Capacity() { size_t Capacity() const {
SLOW_DCHECK(to_space_.target_capacity() == from_space_.target_capacity()); SLOW_DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
return (to_space_.target_capacity() / Page::kPageSize) * return (to_space_.target_capacity() / Page::kPageSize) *
MemoryChunkLayout::AllocatableMemoryInDataPage(); MemoryChunkLayout::AllocatableMemoryInDataPage();
...@@ -286,27 +286,27 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -286,27 +286,27 @@ class V8_EXPORT_PRIVATE NewSpace
// Return the current size of a semispace, allocatable and non-allocatable // Return the current size of a semispace, allocatable and non-allocatable
// memory. // memory.
size_t TotalCapacity() { size_t TotalCapacity() const {
DCHECK(to_space_.target_capacity() == from_space_.target_capacity()); DCHECK(to_space_.target_capacity() == from_space_.target_capacity());
return to_space_.target_capacity(); return to_space_.target_capacity();
} }
// Committed memory for NewSpace is the committed memory of both semi-spaces // Committed memory for NewSpace is the committed memory of both semi-spaces
// combined. // combined.
size_t CommittedMemory() final { size_t CommittedMemory() const final {
return from_space_.CommittedMemory() + to_space_.CommittedMemory(); return from_space_.CommittedMemory() + to_space_.CommittedMemory();
} }
size_t MaximumCommittedMemory() final { size_t MaximumCommittedMemory() const final {
return from_space_.MaximumCommittedMemory() + return from_space_.MaximumCommittedMemory() +
to_space_.MaximumCommittedMemory(); to_space_.MaximumCommittedMemory();
} }
// Approximate amount of physical memory committed for this space. // Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() final; size_t CommittedPhysicalMemory() const final;
// Return the available bytes without growing. // Return the available bytes without growing.
size_t Available() final { size_t Available() const final {
DCHECK_GE(Capacity(), Size()); DCHECK_GE(Capacity(), Size());
return Capacity() - Size(); return Capacity() - Size();
} }
...@@ -318,7 +318,7 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -318,7 +318,7 @@ class V8_EXPORT_PRIVATE NewSpace
return to_space_.ExternalBackingStoreBytes(type); return to_space_.ExternalBackingStoreBytes(type);
} }
size_t ExternalBackingStoreBytes() { size_t ExternalBackingStoreBytes() const {
size_t result = 0; size_t result = 0;
for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) { for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
result += result +=
...@@ -327,7 +327,7 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -327,7 +327,7 @@ class V8_EXPORT_PRIVATE NewSpace
return result; return result;
} }
size_t AllocatedSinceLastGC() { size_t AllocatedSinceLastGC() const {
const Address age_mark = to_space_.age_mark(); const Address age_mark = to_space_.age_mark();
DCHECK_NE(age_mark, kNullAddress); DCHECK_NE(age_mark, kNullAddress);
DCHECK_NE(top(), kNullAddress); DCHECK_NE(top(), kNullAddress);
...@@ -364,34 +364,36 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -364,34 +364,36 @@ class V8_EXPORT_PRIVATE NewSpace
bool Rebalance(); bool Rebalance();
// Return the maximum capacity of a semispace. // Return the maximum capacity of a semispace.
size_t MaximumCapacity() { size_t MaximumCapacity() const {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity()); DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
return to_space_.maximum_capacity(); return to_space_.maximum_capacity();
} }
bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); } bool IsAtMaximumCapacity() const {
return TotalCapacity() == MaximumCapacity();
}
// Returns the initial capacity of a semispace. // Returns the initial capacity of a semispace.
size_t InitialTotalCapacity() { size_t InitialTotalCapacity() const {
DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity()); DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
return to_space_.minimum_capacity(); return to_space_.minimum_capacity();
} }
void VerifyTop(); void VerifyTop() const;
Address original_top_acquire() { Address original_top_acquire() const {
return original_top_.load(std::memory_order_acquire); return original_top_.load(std::memory_order_acquire);
} }
Address original_limit_relaxed() { Address original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed); return original_limit_.load(std::memory_order_relaxed);
} }
// Return the address of the first allocatable address in the active // Return the address of the first allocatable address in the active
// semispace. This may be the address where the first object resides. // semispace. This may be the address where the first object resides.
Address first_allocatable_address() { return to_space_.space_start(); } Address first_allocatable_address() const { return to_space_.space_start(); }
// Get the age mark of the inactive semispace. // Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); } Address age_mark() const { return from_space_.age_mark(); }
// Set the age mark in the active semispace. // Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
...@@ -433,7 +435,7 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -433,7 +435,7 @@ class V8_EXPORT_PRIVATE NewSpace
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
// Verify the active semispace. // Verify the active semispace.
virtual void Verify(Isolate* isolate); virtual void Verify(Isolate* isolate) const;
#endif #endif
#ifdef DEBUG #ifdef DEBUG
...@@ -452,7 +454,7 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -452,7 +454,7 @@ class V8_EXPORT_PRIVATE NewSpace
return from_space_.Uncommit(); return from_space_.Uncommit();
} }
bool IsFromSpaceCommitted() { return from_space_.IsCommitted(); } bool IsFromSpaceCommitted() const { return from_space_.IsCommitted(); }
SemiSpace* active_space() { return &to_space_; } SemiSpace* active_space() { return &to_space_; }
...@@ -532,7 +534,7 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -532,7 +534,7 @@ class V8_EXPORT_PRIVATE NewSpace
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime); int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment); bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsAllocationObserver() override { return true; } bool SupportsAllocationObserver() const override { return true; }
friend class SemiSpaceObjectIterator; friend class SemiSpaceObjectIterator;
}; };
......
...@@ -205,7 +205,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { ...@@ -205,7 +205,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
DCHECK_EQ(0u, other->Capacity()); DCHECK_EQ(0u, other->Capacity());
} }
size_t PagedSpace::CommittedPhysicalMemory() { size_t PagedSpace::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits()) { if (!base::OS::HasLazyCommits()) {
DCHECK_EQ(0, committed_physical_memory()); DCHECK_EQ(0, committed_physical_memory());
return CommittedMemory(); return CommittedMemory();
...@@ -231,10 +231,10 @@ void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) { ...@@ -231,10 +231,10 @@ void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
} }
#if DEBUG #if DEBUG
void PagedSpace::VerifyCommittedPhysicalMemory() { void PagedSpace::VerifyCommittedPhysicalMemory() const {
heap()->safepoint()->AssertActive(); heap()->safepoint()->AssertActive();
size_t size = 0; size_t size = 0;
for (Page* page : *this) { for (const Page* page : *this) {
DCHECK(page->SweepingDone()); DCHECK(page->SweepingDone());
size += page->CommittedPhysicalMemory(); size += page->CommittedPhysicalMemory();
} }
...@@ -371,9 +371,9 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground( ...@@ -371,9 +371,9 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
return std::make_pair(object_start, size_in_bytes); return std::make_pair(object_start, size_in_bytes);
} }
int PagedSpace::CountTotalPages() { int PagedSpace::CountTotalPages() const {
int count = 0; int count = 0;
for (Page* page : *this) { for (const Page* page : *this) {
count++; count++;
USE(page); USE(page);
} }
...@@ -447,7 +447,7 @@ void PagedSpace::MakeLinearAllocationAreaIterable() { ...@@ -447,7 +447,7 @@ void PagedSpace::MakeLinearAllocationAreaIterable() {
} }
} }
size_t PagedSpace::Available() { size_t PagedSpace::Available() const {
ConcurrentAllocationMutex guard(this); ConcurrentAllocationMutex guard(this);
return free_list_->Available(); return free_list_->Available();
} }
...@@ -719,7 +719,7 @@ PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes, ...@@ -719,7 +719,7 @@ PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
return std::make_pair(start, used_size_in_bytes); return std::make_pair(start, used_size_in_bytes);
} }
bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) { bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) const {
// Code space sweeping is only allowed on main thread. // Code space sweeping is only allowed on main thread.
return (local_heap && local_heap->is_main_thread()) || return (local_heap && local_heap->is_main_thread()) ||
identity() != CODE_SPACE; identity() != CODE_SPACE;
...@@ -1085,7 +1085,7 @@ void MapSpace::SortFreeList() { ...@@ -1085,7 +1085,7 @@ void MapSpace::SortFreeList() {
} }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); } void MapSpace::VerifyObject(HeapObject object) const { CHECK(object.IsMap()); }
#endif #endif
} // namespace internal } // namespace internal
......
...@@ -69,7 +69,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator { ...@@ -69,7 +69,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
Address cur_addr_; // Current iteration point. Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point. Address cur_end_; // End iteration point.
PagedSpace* space_; const PagedSpace* const space_;
PageRange page_range_; PageRange page_range_;
PageRange::iterator current_page_; PageRange::iterator current_page_;
#if V8_COMPRESS_POINTERS #if V8_COMPRESS_POINTERS
...@@ -99,19 +99,19 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -99,19 +99,19 @@ class V8_EXPORT_PRIVATE PagedSpace
bool ContainsSlow(Address addr) const; bool ContainsSlow(Address addr) const;
// Does the space need executable memory? // Does the space need executable memory?
Executability executable() { return executable_; } Executability executable() const { return executable_; }
// Prepares for a mark-compact GC. // Prepares for a mark-compact GC.
void PrepareForMarkCompact(); void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()). // Current capacity without growing (Size() + Available()).
size_t Capacity() { return accounting_stats_.Capacity(); } size_t Capacity() const { return accounting_stats_.Capacity(); }
// Approximate amount of physical memory committed for this space. // Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override; size_t CommittedPhysicalMemory() const override;
#if DEBUG #if DEBUG
void VerifyCommittedPhysicalMemory(); void VerifyCommittedPhysicalMemory() const;
#endif // DEBUG #endif // DEBUG
void IncrementCommittedPhysicalMemory(size_t increment_value); void IncrementCommittedPhysicalMemory(size_t increment_value);
...@@ -131,17 +131,17 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -131,17 +131,17 @@ class V8_EXPORT_PRIVATE PagedSpace
// The bytes in the linear allocation area are not included in this total // The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are // because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here. // immediately added to the free list so they show up here.
size_t Available() override; size_t Available() const override;
// Allocated bytes in this space. Garbage bytes that were not found due to // Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the // concurrent sweeping are counted as being allocated! The bytes in the
// current linear allocation area (between top and limit) are also counted // current linear allocation area (between top and limit) are also counted
// here. // here.
size_t Size() override { return accounting_stats_.Size(); } size_t Size() const override { return accounting_stats_.Size(); }
// Wasted bytes in this space. These are just the bytes that were thrown away // Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation. // due to being too small to use for allocation.
virtual size_t Waste() { return free_list_->wasted_bytes(); } virtual size_t Waste() const { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space if possible, return a // Allocate the requested number of bytes in the space if possible, return a
// failure object if not. // failure object if not.
...@@ -257,7 +257,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -257,7 +257,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Overridden by subclasses to verify space-specific object // Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space). // properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject obj) {} virtual void VerifyObject(HeapObject obj) const {}
#endif #endif
#ifdef DEBUG #ifdef DEBUG
...@@ -271,19 +271,21 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -271,19 +271,21 @@ class V8_EXPORT_PRIVATE PagedSpace
static void ResetCodeStatistics(Isolate* isolate); static void ResetCodeStatistics(Isolate* isolate);
#endif #endif
bool CanExpand(size_t size); bool CanExpand(size_t size) const;
// Returns the number of total pages in this space. // Returns the number of total pages in this space.
int CountTotalPages(); int CountTotalPages() const;
// Return size of allocatable area on a page in this space. // Return size of allocatable area on a page in this space.
inline int AreaSize() { return static_cast<int>(area_size_); } inline int AreaSize() const { return static_cast<int>(area_size_); }
bool is_compaction_space() { bool is_compaction_space() const {
return compaction_space_kind_ != CompactionSpaceKind::kNone; return compaction_space_kind_ != CompactionSpaceKind::kNone;
} }
CompactionSpaceKind compaction_space_kind() { return compaction_space_kind_; } CompactionSpaceKind compaction_space_kind() const {
return compaction_space_kind_;
}
// Merges {other} into the current space. Note that this modifies {other}, // Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics. // e.g., removes its bump pointer area and resets statistics.
...@@ -321,9 +323,9 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -321,9 +323,9 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetLinearAllocationArea(Address top, Address limit); void SetLinearAllocationArea(Address top, Address limit);
Address original_top() { return original_top_; } Address original_top() const { return original_top_; }
Address original_limit() { return original_limit_; } Address original_limit() const { return original_limit_; }
void MoveOriginalTopForward() { void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_); base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
...@@ -343,7 +345,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -343,7 +345,7 @@ class V8_EXPORT_PRIVATE PagedSpace
private: private:
class ConcurrentAllocationMutex { class ConcurrentAllocationMutex {
public: public:
explicit ConcurrentAllocationMutex(PagedSpace* space) { explicit ConcurrentAllocationMutex(const PagedSpace* space) {
if (space->SupportsConcurrentAllocation()) { if (space->SupportsConcurrentAllocation()) {
guard_.emplace(&space->space_mutex_); guard_.emplace(&space->space_mutex_);
} }
...@@ -352,13 +354,15 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -352,13 +354,15 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Optional<base::MutexGuard> guard_; base::Optional<base::MutexGuard> guard_;
}; };
bool SupportsConcurrentAllocation() { return !is_compaction_space(); } bool SupportsConcurrentAllocation() const { return !is_compaction_space(); }
// Set space linear allocation area. // Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit); void SetTopAndLimit(Address top, Address limit);
void DecreaseLimit(Address new_limit); void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override; void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsAllocationObserver() override { return !is_compaction_space(); } bool SupportsAllocationObserver() const override {
return !is_compaction_space();
}
// Slow path of allocation function // Slow path of allocation function
V8_WARN_UNUSED_RESULT AllocationResult V8_WARN_UNUSED_RESULT AllocationResult
...@@ -368,13 +372,13 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -368,13 +372,13 @@ class V8_EXPORT_PRIVATE PagedSpace
protected: protected:
// PagedSpaces that should be included in snapshots have different, i.e., // PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages. // smaller, initial pages.
virtual bool snapshotable() { return true; } virtual bool snapshotable() const { return true; }
bool HasPages() { return first_page() != nullptr; } bool HasPages() const { return first_page() != nullptr; }
// Returns whether sweeping of this space is safe on this thread. Code space // Returns whether sweeping of this space is safe on this thread. Code space
// sweeping is only allowed on the main thread. // sweeping is only allowed on the main thread.
bool IsSweepingAllowedOnThread(LocalHeap* local_heap); bool IsSweepingAllowedOnThread(LocalHeap* local_heap) const;
// Cleans up the space, frees all pages in this space except those belonging // Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk. // to the initial chunk, uncommits addresses in the initial chunk.
...@@ -448,7 +452,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -448,7 +452,7 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationStats accounting_stats_; AllocationStats accounting_stats_;
// Mutex guarding any concurrent access to the space. // Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_; mutable base::Mutex space_mutex_;
// The top and the limit at the time of setting the linear allocation area. // The top and the limit at the time of setting the linear allocation area.
// These values are protected by pending_allocation_mutex_. // These values are protected by pending_allocation_mutex_.
...@@ -490,7 +494,7 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace { ...@@ -490,7 +494,7 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
Page* Expand() override; Page* Expand() override;
// The space is temporary and not included in any snapshots. // The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; } bool snapshotable() const override { return false; }
// Pages that were allocated in this local space and need to be merged // Pages that were allocated in this local space and need to be merged
// to the main space. // to the main space.
std::vector<Page*> new_pages_; std::vector<Page*> new_pages_;
...@@ -576,7 +580,7 @@ class MapSpace : public PagedSpace { ...@@ -576,7 +580,7 @@ class MapSpace : public PagedSpace {
: PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(), : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
&paged_allocation_info_) {} &paged_allocation_info_) {}
int RoundSizeDownToObjectAlignment(int size) override { int RoundSizeDownToObjectAlignment(int size) const override {
if (base::bits::IsPowerOfTwo(Map::kSize)) { if (base::bits::IsPowerOfTwo(Map::kSize)) {
return RoundDown(size, Map::kSize); return RoundDown(size, Map::kSize);
} else { } else {
...@@ -587,7 +591,7 @@ class MapSpace : public PagedSpace { ...@@ -587,7 +591,7 @@ class MapSpace : public PagedSpace {
void SortFreeList(); void SortFreeList();
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
void VerifyObject(HeapObject obj) override; void VerifyObject(HeapObject obj) const override;
#endif #endif
private: private:
...@@ -612,7 +616,7 @@ class OldGenerationMemoryChunkIterator { ...@@ -612,7 +616,7 @@ class OldGenerationMemoryChunkIterator {
kCodeLargeObjectState, kCodeLargeObjectState,
kFinishedState kFinishedState
}; };
Heap* heap_; Heap* const heap_;
State state_; State state_;
PageIterator old_iterator_; PageIterator old_iterator_;
PageIterator code_iterator_; PageIterator code_iterator_;
......
...@@ -253,10 +253,12 @@ size_t ReadOnlyHeap::read_only_object_cache_size() const { ...@@ -253,10 +253,12 @@ size_t ReadOnlyHeap::read_only_object_cache_size() const {
return read_only_object_cache_.size(); return read_only_object_cache_.size();
} }
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap) ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(
const ReadOnlyHeap* ro_heap)
: ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {} : ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {}
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space) ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(
const ReadOnlySpace* ro_space)
: ro_space_(ro_space), : ro_space_(ro_space),
current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
? std::vector<ReadOnlyPage*>::iterator() ? std::vector<ReadOnlyPage*>::iterator()
......
...@@ -147,13 +147,13 @@ class SoleReadOnlyHeap : public ReadOnlyHeap { ...@@ -147,13 +147,13 @@ class SoleReadOnlyHeap : public ReadOnlyHeap {
// This class enables iterating over all read-only heap objects. // This class enables iterating over all read-only heap objects.
class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator { class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
public: public:
explicit ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap); explicit ReadOnlyHeapObjectIterator(const ReadOnlyHeap* ro_heap);
explicit ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space); explicit ReadOnlyHeapObjectIterator(const ReadOnlySpace* ro_space);
HeapObject Next(); HeapObject Next();
private: private:
ReadOnlySpace* const ro_space_; const ReadOnlySpace* const ro_space_;
std::vector<ReadOnlyPage*>::const_iterator current_page_; std::vector<ReadOnlyPage*>::const_iterator current_page_;
Address current_addr_; Address current_addr_;
}; };
......
...@@ -430,7 +430,7 @@ void ReadOnlySpace::Unseal() { ...@@ -430,7 +430,7 @@ void ReadOnlySpace::Unseal() {
is_marked_read_only_ = false; is_marked_read_only_ = false;
} }
bool ReadOnlySpace::ContainsSlow(Address addr) { bool ReadOnlySpace::ContainsSlow(Address addr) const {
BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr); BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr);
for (BasicMemoryChunk* chunk : pages_) { for (BasicMemoryChunk* chunk : pages_) {
if (chunk == c) return true; if (chunk == c) return true;
...@@ -442,7 +442,7 @@ namespace { ...@@ -442,7 +442,7 @@ namespace {
// Only iterates over a single chunk as the chunk iteration is done externally. // Only iterates over a single chunk as the chunk iteration is done externally.
class ReadOnlySpaceObjectIterator : public ObjectIterator { class ReadOnlySpaceObjectIterator : public ObjectIterator {
public: public:
ReadOnlySpaceObjectIterator(Heap* heap, ReadOnlySpace* space, ReadOnlySpaceObjectIterator(const Heap* heap, const ReadOnlySpace* space,
BasicMemoryChunk* chunk) BasicMemoryChunk* chunk)
: cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {} : cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {}
...@@ -481,7 +481,7 @@ class ReadOnlySpaceObjectIterator : public ObjectIterator { ...@@ -481,7 +481,7 @@ class ReadOnlySpaceObjectIterator : public ObjectIterator {
Address cur_addr_; // Current iteration point. Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point. Address cur_end_; // End iteration point.
ReadOnlySpace* space_; const ReadOnlySpace* const space_;
}; };
} // namespace } // namespace
...@@ -510,7 +510,7 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor { ...@@ -510,7 +510,7 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
}; };
} // namespace } // namespace
void ReadOnlySpace::Verify(Isolate* isolate) { void ReadOnlySpace::Verify(Isolate* isolate) const {
bool allocation_pointer_found_in_space = top_ == limit_; bool allocation_pointer_found_in_space = top_ == limit_;
VerifyReadOnlyPointersVisitor visitor(isolate->heap()); VerifyReadOnlyPointersVisitor visitor(isolate->heap());
...@@ -558,7 +558,7 @@ void ReadOnlySpace::Verify(Isolate* isolate) { ...@@ -558,7 +558,7 @@ void ReadOnlySpace::Verify(Isolate* isolate) {
} }
#ifdef DEBUG #ifdef DEBUG
void ReadOnlySpace::VerifyCounters(Heap* heap) { void ReadOnlySpace::VerifyCounters(Heap* heap) const {
size_t total_capacity = 0; size_t total_capacity = 0;
size_t total_allocated = 0; size_t total_allocated = 0;
for (BasicMemoryChunk* page : pages_) { for (BasicMemoryChunk* page : pages_) {
...@@ -582,7 +582,7 @@ void ReadOnlySpace::VerifyCounters(Heap* heap) { ...@@ -582,7 +582,7 @@ void ReadOnlySpace::VerifyCounters(Heap* heap) {
#endif // DEBUG #endif // DEBUG
#endif // VERIFY_HEAP #endif // VERIFY_HEAP
size_t ReadOnlySpace::CommittedPhysicalMemory() { size_t ReadOnlySpace::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits()) return CommittedMemory(); if (!base::OS::HasLazyCommits()) return CommittedMemory();
BasicMemoryChunk::UpdateHighWaterMark(top_); BasicMemoryChunk::UpdateHighWaterMark(top_);
size_t size = 0; size_t size = 0;
......
...@@ -212,20 +212,20 @@ class ReadOnlySpace : public BaseSpace { ...@@ -212,20 +212,20 @@ class ReadOnlySpace : public BaseSpace {
// to write it into the free space nodes that were already created. // to write it into the free space nodes that were already created.
void RepairFreeSpacesAfterDeserialization(); void RepairFreeSpacesAfterDeserialization();
size_t Size() override { return accounting_stats_.Size(); } size_t Size() const override { return accounting_stats_.Size(); }
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() override; V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const override;
const std::vector<ReadOnlyPage*>& pages() const { return pages_; } const std::vector<ReadOnlyPage*>& pages() const { return pages_; }
Address top() const { return top_; } Address top() const { return top_; }
Address limit() const { return limit_; } Address limit() const { return limit_; }
size_t Capacity() const { return capacity_; } size_t Capacity() const { return capacity_; }
bool ContainsSlow(Address addr); bool ContainsSlow(Address addr) const;
V8_EXPORT_PRIVATE void ShrinkPages(); V8_EXPORT_PRIVATE void ShrinkPages();
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
void Verify(Isolate* isolate); void Verify(Isolate* isolate) const;
#ifdef DEBUG #ifdef DEBUG
void VerifyCounters(Heap* heap); void VerifyCounters(Heap* heap) const;
#endif // DEBUG #endif // DEBUG
#endif // VERIFY_HEAP #endif // VERIFY_HEAP
......
...@@ -236,7 +236,7 @@ void Space::PauseAllocationObservers() { allocation_counter_.Pause(); } ...@@ -236,7 +236,7 @@ void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); } void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end, Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) { size_t min_size) const {
DCHECK_GE(end - start, min_size); DCHECK_GE(end - start, min_size);
if (!use_lab_) { if (!use_lab_) {
...@@ -288,7 +288,7 @@ void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) { ...@@ -288,7 +288,7 @@ void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
allocations_origins_[static_cast<int>(origin)]++; allocations_origins_[static_cast<int>(origin)]++;
} }
void SpaceWithLinearArea::PrintAllocationsOrigins() { void SpaceWithLinearArea::PrintAllocationsOrigins() const {
PrintIsolate( PrintIsolate(
heap()->isolate(), heap()->isolate(),
"Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n", "Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n",
......
...@@ -145,12 +145,12 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace { ...@@ -145,12 +145,12 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
// Returns size of objects. Can differ from the allocated size // Returns size of objects. Can differ from the allocated size
// (e.g. see OldLargeObjectSpace). // (e.g. see OldLargeObjectSpace).
virtual size_t SizeOfObjects() { return Size(); } virtual size_t SizeOfObjects() const { return Size(); }
// Return the available bytes without growing. // Return the available bytes without growing.
virtual size_t Available() = 0; virtual size_t Available() const = 0;
virtual int RoundSizeDownToObjectAlignment(int size) { virtual int RoundSizeDownToObjectAlignment(int size) const {
if (id_ == CODE_SPACE) { if (id_ == CODE_SPACE) {
return RoundDown(size, kCodeAlignment); return RoundDown(size, kCodeAlignment);
} else { } else {
...@@ -372,6 +372,7 @@ class PageIteratorImpl ...@@ -372,6 +372,7 @@ class PageIteratorImpl
using PageIterator = PageIteratorImpl<Page>; using PageIterator = PageIteratorImpl<Page>;
using ConstPageIterator = PageIteratorImpl<const Page>; using ConstPageIterator = PageIteratorImpl<const Page>;
using LargePageIterator = PageIteratorImpl<LargePage>; using LargePageIterator = PageIteratorImpl<LargePage>;
using ConstLargePageIterator = PageIteratorImpl<const LargePage>;
class PageRange { class PageRange {
public: public:
...@@ -466,7 +467,7 @@ class SpaceWithLinearArea : public Space { ...@@ -466,7 +467,7 @@ class SpaceWithLinearArea : public Space {
LinearAllocationArea* allocation_info) LinearAllocationArea* allocation_info)
: Space(heap, id, free_list), allocation_info_(allocation_info) {} : Space(heap, id, free_list), allocation_info_(allocation_info) {}
virtual bool SupportsAllocationObserver() = 0; virtual bool SupportsAllocationObserver() const = 0;
// Returns the allocation pointer in this space. // Returns the allocation pointer in this space.
Address top() const { return allocation_info_->top(); } Address top() const { return allocation_info_->top(); }
...@@ -504,7 +505,7 @@ class SpaceWithLinearArea : public Space { ...@@ -504,7 +505,7 @@ class SpaceWithLinearArea : public Space {
// area bounded by [start, end), this function computes the limit to use to // area bounded by [start, end), this function computes the limit to use to
// allow proper observation based on existing observers. min_size specifies // allow proper observation based on existing observers. min_size specifies
// the minimum size that the limited area should have. // the minimum size that the limited area should have.
Address ComputeLimit(Address start, Address end, size_t min_size); Address ComputeLimit(Address start, Address end, size_t min_size) const;
V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit( V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
size_t min_size) = 0; size_t min_size) = 0;
...@@ -512,7 +513,7 @@ class SpaceWithLinearArea : public Space { ...@@ -512,7 +513,7 @@ class SpaceWithLinearArea : public Space {
void EnableInlineAllocation(); void EnableInlineAllocation();
bool IsInlineAllocationEnabled() const { return use_lab_; } bool IsInlineAllocationEnabled() const { return use_lab_; }
void PrintAllocationsOrigins(); void PrintAllocationsOrigins() const;
protected: protected:
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin); V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment