Commit a8656e55 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Merge original_top/limit from NewSpace and PagedSpace

SpaceWithLinearArea will holds a ref to a struct containing
original_top_ and original_limit_ as well the lock used to sync them
for querying IsPendingAllocation.

PagedSpace is split into PagedSpaceBase (that holds all funcitonality)
and PagedSpace. The actual fields are owned by PagedSpace and NewSpace.

This is done in preparation for PagedNewSpace to allow PagedSpaceiBase
and NewSpace to share the same original_top_ and original_limit_ fields.

Bug: v8:12612
Change-Id: Iefbbd5209c5553db4ee16cb261734e6479e0f23f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3644795
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80549}
parent 48a41953
......@@ -431,7 +431,7 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) {
switch (base_space->identity()) {
case NEW_SPACE: {
base::SharedMutexGuard<base::kShared> guard(
new_space_->pending_allocation_mutex());
new_space_->linear_area_lock());
Address top = new_space_->original_top_acquire();
Address limit = new_space_->original_limit_relaxed();
DCHECK_LE(top, limit);
......@@ -443,9 +443,9 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) {
case MAP_SPACE: {
PagedSpace* paged_space = static_cast<PagedSpace*>(base_space);
base::SharedMutexGuard<base::kShared> guard(
paged_space->pending_allocation_mutex());
Address top = paged_space->original_top();
Address limit = paged_space->original_limit();
paged_space->linear_area_lock());
Address top = paged_space->original_top_acquire();
Address limit = paged_space->original_limit_relaxed();
DCHECK_LE(top, limit);
return top && top <= addr && addr < limit;
}
......
......@@ -2493,7 +2493,7 @@ class Heap {
friend class NewSpace;
friend class ObjectStatsCollector;
friend class Page;
friend class PagedSpace;
friend class PagedSpaceBase;
friend class PromoteYoungGenerationGC;
friend class ReadOnlyRoots;
friend class Scavenger;
......
......@@ -448,7 +448,8 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
// NewSpace implementation
NewSpace::NewSpace(Heap* heap, LinearAllocationArea* allocation_info)
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info) {}
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info,
linear_area_original_data_) {}
void NewSpace::ResetParkedAllocationBuffers() {
parked_allocation_buffers_.clear();
......@@ -456,7 +457,8 @@ void NewSpace::ResetParkedAllocationBuffers() {
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
if (allocation_info_->MergeIfAdjacent(info)) {
original_top_.store(allocation_info_->top(), std::memory_order_release);
linear_area_original_data_.set_original_top_release(
allocation_info_->top());
}
#if DEBUG
......@@ -484,10 +486,12 @@ void NewSpace::VerifyTop() const {
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
DCHECK_GE(original_top_, allocation_info_->start());
DCHECK_GE(linear_area_original_data_.get_original_top_acquire(),
allocation_info_->start());
// Ensure that limit() is <= original_limit_.
DCHECK_LE(allocation_info_->limit(), original_limit_);
DCHECK_LE(allocation_info_->limit(),
linear_area_original_data_.get_original_limit_relaxed());
}
#endif // DEBUG
......@@ -654,9 +658,9 @@ void SemiSpaceNewSpace::UpdateLinearAllocationArea(Address known_top) {
// The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run.
{
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
base::SharedMutexGuard<base::kExclusive> guard(linear_area_lock());
linear_area_original_data_.set_original_limit_relaxed(limit());
linear_area_original_data_.set_original_top_release(top());
}
to_space_.AddRangeToActiveSystemPages(top(), limit());
......@@ -744,13 +748,9 @@ bool SemiSpaceNewSpace::AddParkedAllocationBuffer(
void SemiSpaceNewSpace::VerifyTop() const {
NewSpace::VerifyTop();
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
DCHECK_GE(original_top_, allocation_info_->start());
// original_limit_ always needs to be end of curent to space page.
DCHECK_LE(allocation_info_->limit(), original_limit_);
DCHECK_EQ(original_limit_, to_space_.page_high());
DCHECK_EQ(linear_area_original_data_.get_original_limit_relaxed(),
to_space_.page_high());
}
#endif // DEBUG
......
......@@ -251,30 +251,12 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
void VerifyTop() const override;
#endif // DEBUG
Address original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
}
Address original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed);
}
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
void MaybeFreeUnusedLab(LinearAllocationArea info);
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
// Creates a filler object in the linear allocation area.
void MakeLinearAllocationAreaIterable();
......@@ -333,14 +315,7 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
base::Mutex mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
LinearAreaOriginalData linear_area_original_data_;
ParkedAllocationBuffersVector parked_allocation_buffers_;
......
......@@ -45,26 +45,26 @@ HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
return HeapObject();
}
bool PagedSpace::Contains(Address addr) const {
bool PagedSpaceBase::Contains(Address addr) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
return Page::FromAddress(addr)->owner() == this;
}
bool PagedSpace::Contains(Object o) const {
bool PagedSpaceBase::Contains(Object o) const {
if (!o.IsHeapObject()) return false;
return Page::FromAddress(o.ptr())->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
void PagedSpaceBase::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
free_list()->RemoveCategory(category);
});
}
size_t PagedSpace::RelinkFreeListCategories(Page* page) {
size_t PagedSpaceBase::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
......@@ -78,7 +78,7 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
return added;
}
bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
bool PagedSpaceBase::TryFreeLast(Address object_address, int object_size) {
if (allocation_info_->top() != kNullAddress) {
return allocation_info_->DecrementTopIfAdjacent(object_address,
object_size);
......@@ -86,7 +86,7 @@ bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
return false;
}
V8_INLINE bool PagedSpace::EnsureAllocation(int size_in_bytes,
V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
......
......@@ -19,6 +19,7 @@
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/safepoint.h"
#include "src/heap/spaces.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/string.h"
#include "src/utils/utils.h"
......@@ -30,7 +31,7 @@ namespace internal {
// PagedSpaceObjectIterator
PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
const PagedSpace* space)
const PagedSpaceBase* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
......@@ -46,7 +47,7 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
}
PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
const PagedSpace* space,
const PagedSpaceBase* space,
const Page* page)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
......@@ -93,7 +94,7 @@ bool PagedSpaceObjectIterator::AdvanceToNextPage() {
return true;
}
Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
Page* PagedSpaceBase::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk);
DCHECK_EQ(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
......@@ -108,18 +109,20 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
return page;
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable, FreeList* free_list,
LinearAllocationArea* allocation_info_,
PagedSpaceBase::PagedSpaceBase(
Heap* heap, AllocationSpace space, Executability executable,
FreeList* free_list, LinearAllocationArea* allocation_info_,
LinearAreaOriginalData& linear_area_original_data,
CompactionSpaceKind compaction_space_kind)
: SpaceWithLinearArea(heap, space, free_list, allocation_info_),
: SpaceWithLinearArea(heap, space, free_list, allocation_info_,
linear_area_original_data),
executable_(executable),
compaction_space_kind_(compaction_space_kind) {
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
accounting_stats_.Clear();
}
void PagedSpace::TearDown() {
void PagedSpaceBase::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
......@@ -129,7 +132,7 @@ void PagedSpace::TearDown() {
accounting_stats_.Clear();
}
void PagedSpace::RefillFreeList() {
void PagedSpaceBase::RefillFreeList() {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
......@@ -173,7 +176,7 @@ void PagedSpace::RefillFreeList() {
}
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
void PagedSpaceBase::MergeCompactionSpace(CompactionSpace* other) {
base::MutexGuard guard(mutex());
DCHECK(identity() == other->identity());
......@@ -221,7 +224,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
DCHECK_EQ(0u, other->Capacity());
}
size_t PagedSpace::CommittedPhysicalMemory() const {
size_t PagedSpaceBase::CommittedPhysicalMemory() const {
if (!base::OS::HasLazyCommits()) {
DCHECK_EQ(0, committed_physical_memory());
return CommittedMemory();
......@@ -233,7 +236,7 @@ size_t PagedSpace::CommittedPhysicalMemory() const {
return committed_physical_memory();
}
void PagedSpace::IncrementCommittedPhysicalMemory(size_t increment_value) {
void PagedSpaceBase::IncrementCommittedPhysicalMemory(size_t increment_value) {
if (!base::OS::HasLazyCommits() || increment_value == 0) return;
size_t old_value = committed_physical_memory_.fetch_add(
increment_value, std::memory_order_relaxed);
......@@ -241,7 +244,7 @@ void PagedSpace::IncrementCommittedPhysicalMemory(size_t increment_value) {
DCHECK_LT(old_value, old_value + increment_value);
}
void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
void PagedSpaceBase::DecrementCommittedPhysicalMemory(size_t decrement_value) {
if (!base::OS::HasLazyCommits() || decrement_value == 0) return;
size_t old_value = committed_physical_memory_.fetch_sub(
decrement_value, std::memory_order_relaxed);
......@@ -250,7 +253,7 @@ void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
}
#if DEBUG
void PagedSpace::VerifyCommittedPhysicalMemory() const {
void PagedSpaceBase::VerifyCommittedPhysicalMemory() const {
heap()->safepoint()->AssertActive();
size_t size = 0;
for (const Page* page : *this) {
......@@ -262,7 +265,7 @@ void PagedSpace::VerifyCommittedPhysicalMemory() const {
}
#endif // DEBUG
bool PagedSpace::ContainsSlow(Address addr) const {
bool PagedSpaceBase::ContainsSlow(Address addr) const {
Page* p = Page::FromAddress(addr);
for (const Page* page : *this) {
if (page == p) return true;
......@@ -270,7 +273,7 @@ bool PagedSpace::ContainsSlow(Address addr) const {
return false;
}
void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
void PagedSpaceBase::RefineAllocatedBytesAfterSweeping(Page* page) {
CHECK(page->SweepingDone());
auto marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
......@@ -286,7 +289,7 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
marking_state->SetLiveBytes(page, 0);
}
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
Page* PagedSpaceBase::RemovePageSafe(int size_in_bytes) {
base::MutexGuard guard(mutex());
Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
......@@ -294,7 +297,7 @@ Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
return page;
}
size_t PagedSpace::AddPage(Page* page) {
size_t PagedSpaceBase::AddPage(Page* page) {
CHECK(page->SweepingDone());
page->set_owner(this);
memory_chunk_list_.PushBack(page);
......@@ -309,7 +312,7 @@ size_t PagedSpace::AddPage(Page* page) {
return RelinkFreeListCategories(page);
}
void PagedSpace::RemovePage(Page* page) {
void PagedSpaceBase::RemovePage(Page* page) {
CHECK(page->SweepingDone());
memory_chunk_list_.Remove(page);
UnlinkFreeListCategories(page);
......@@ -323,34 +326,33 @@ void PagedSpace::RemovePage(Page* page) {
DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
void PagedSpaceBase::SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
allocation_info_->Reset(top, limit);
base::Optional<base::SharedMutexGuard<base::kExclusive>> optional_guard;
if (!is_compaction_space())
optional_guard.emplace(&pending_allocation_mutex_);
original_limit_ = limit;
original_top_ = top;
if (!is_compaction_space()) optional_guard.emplace(linear_area_lock());
linear_area_original_data_.set_original_limit_relaxed(limit);
linear_area_original_data_.set_original_top_release(top);
}
size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
size_t PagedSpaceBase::ShrinkPageToHighWaterMark(Page* page) {
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
AccountUncommitted(unused);
return unused;
}
void PagedSpace::ResetFreeList() {
void PagedSpaceBase::ResetFreeList() {
for (Page* page : *this) {
free_list_->EvictFreeListItems(page);
}
DCHECK(free_list_->IsEmpty());
}
void PagedSpace::ShrinkImmortalImmovablePages() {
void PagedSpaceBase::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
FreeLinearAllocationArea();
......@@ -361,7 +363,7 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
}
}
Page* PagedSpace::Expand() {
Page* PagedSpaceBase::Expand() {
Page* page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, this, executable());
if (page == nullptr) return nullptr;
......@@ -372,7 +374,7 @@ Page* PagedSpace::Expand() {
return page;
}
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
base::Optional<std::pair<Address, size_t>> PagedSpaceBase::ExpandBackground(
size_t size_in_bytes) {
Page* page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, this, executable());
......@@ -390,7 +392,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
return std::make_pair(object_start, size_in_bytes);
}
int PagedSpace::CountTotalPages() const {
int PagedSpaceBase::CountTotalPages() const {
int count = 0;
for (const Page* page : *this) {
count++;
......@@ -399,7 +401,7 @@ int PagedSpace::CountTotalPages() const {
return count;
}
void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
void PagedSpaceBase::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != kNullAddress && top != limit &&
heap()->incremental_marking()->black_allocation()) {
......@@ -407,7 +409,7 @@ void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
}
}
void PagedSpace::DecreaseLimit(Address new_limit) {
void PagedSpaceBase::DecreaseLimit(Address new_limit) {
Address old_limit = limit();
DCHECK_LE(top(), new_limit);
DCHECK_GE(old_limit, new_limit);
......@@ -430,7 +432,7 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
}
}
void PagedSpace::MarkLinearAllocationAreaBlack() {
void PagedSpaceBase::MarkLinearAllocationAreaBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
Address current_limit = limit();
......@@ -440,7 +442,7 @@ void PagedSpace::MarkLinearAllocationAreaBlack() {
}
}
void PagedSpace::UnmarkLinearAllocationArea() {
void PagedSpaceBase::UnmarkLinearAllocationArea() {
Address current_top = top();
Address current_limit = limit();
if (current_top != kNullAddress && current_top != current_limit) {
......@@ -449,7 +451,7 @@ void PagedSpace::UnmarkLinearAllocationArea() {
}
}
void PagedSpace::MakeLinearAllocationAreaIterable() {
void PagedSpaceBase::MakeLinearAllocationAreaIterable() {
Address current_top = top();
Address current_limit = limit();
if (current_top != kNullAddress && current_top != current_limit) {
......@@ -465,7 +467,7 @@ void PagedSpace::MakeLinearAllocationAreaIterable() {
}
}
size_t PagedSpace::Available() const {
size_t PagedSpaceBase::Available() const {
ConcurrentAllocationMutex guard(this);
return free_list_->Available();
}
......@@ -479,7 +481,7 @@ UnprotectMemoryOrigin GetUnprotectMemoryOrigin(bool is_compaction_space) {
} // namespace
void PagedSpace::FreeLinearAllocationArea() {
void PagedSpaceBase::FreeLinearAllocationArea() {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
Address current_top = top();
......@@ -515,7 +517,7 @@ void PagedSpace::FreeLinearAllocationArea() {
SpaceAccountingMode::kSpaceAccounted);
}
void PagedSpace::ReleasePage(Page* page) {
void PagedSpaceBase::ReleasePage(Page* page) {
DCHECK_EQ(
0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
page));
......@@ -538,7 +540,7 @@ void PagedSpace::ReleasePage(Page* page) {
page);
}
void PagedSpace::SetReadable() {
void PagedSpaceBase::SetReadable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
......@@ -546,7 +548,7 @@ void PagedSpace::SetReadable() {
}
}
void PagedSpace::SetReadAndExecutable() {
void PagedSpaceBase::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
......@@ -554,7 +556,7 @@ void PagedSpace::SetReadAndExecutable() {
}
}
void PagedSpace::SetCodeModificationPermissions() {
void PagedSpaceBase::SetCodeModificationPermissions() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
......@@ -562,12 +564,12 @@ void PagedSpace::SetCodeModificationPermissions() {
}
}
std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
std::unique_ptr<ObjectIterator> PagedSpaceBase::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(
new PagedSpaceObjectIterator(heap, this));
}
bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes,
AllocationOrigin origin) {
ConcurrentAllocationMutex guard(this);
DCHECK(IsAligned(size_in_bytes, kTaggedSize));
......@@ -620,9 +622,12 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
return true;
}
base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
LocalHeap* local_heap, size_t min_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
base::Optional<std::pair<Address, size_t>>
PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!is_compaction_space());
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
identity() == MAP_SPACE);
......@@ -691,10 +696,9 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
}
base::Optional<std::pair<Address, size_t>>
PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
PagedSpaceBase::TryAllocationFromFreeListBackground(
size_t min_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard lock(&space_mutex_);
DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
......@@ -737,18 +741,18 @@ PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
return std::make_pair(start, used_size_in_bytes);
}
bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) const {
bool PagedSpaceBase::IsSweepingAllowedOnThread(LocalHeap* local_heap) const {
// Code space sweeping is only allowed on main thread.
return (local_heap && local_heap->is_main_thread()) ||
identity() != CODE_SPACE;
}
#ifdef DEBUG
void PagedSpace::Print() {}
void PagedSpaceBase::Print() {}
#endif
#ifdef VERIFY_HEAP
void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
bool allocation_pointer_found_in_space =
(allocation_info_->top() == allocation_info_->limit());
size_t external_space_bytes[kNumTypes];
......@@ -834,7 +838,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
#endif
}
void PagedSpace::VerifyLiveBytes() const {
void PagedSpaceBase::VerifyLiveBytes() const {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
PtrComprCageBase cage_base(heap()->isolate());
......@@ -854,7 +858,7 @@ void PagedSpace::VerifyLiveBytes() const {
#endif // VERIFY_HEAP
#ifdef DEBUG
void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) const {
void PagedSpaceBase::VerifyCountersAfterSweeping(Heap* heap) const {
size_t total_capacity = 0;
size_t total_allocated = 0;
PtrComprCageBase cage_base(heap->isolate());
......@@ -878,7 +882,7 @@ void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) const {
DCHECK_EQ(total_allocated, accounting_stats_.Size());
}
void PagedSpace::VerifyCountersBeforeConcurrentSweeping() const {
void PagedSpaceBase::VerifyCountersBeforeConcurrentSweeping() const {
size_t total_capacity = 0;
size_t total_allocated = 0;
auto marking_state =
......@@ -897,7 +901,7 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() const {
}
#endif
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
void PagedSpaceBase::UpdateInlineAllocationLimit(size_t min_size) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
......@@ -910,12 +914,12 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
// -----------------------------------------------------------------------------
// OldSpace implementation
void PagedSpace::PrepareForMarkCompact() {
void PagedSpaceBase::PrepareForMarkCompact() {
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_->Reset();
}
bool PagedSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
bool PagedSpaceBase::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
RCS_SCOPE(heap()->isolate(),
RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
......@@ -923,7 +927,7 @@ bool PagedSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
}
Page* CompactionSpace::Expand() {
Page* page = PagedSpace::Expand();
Page* page = PagedSpaceBase::Expand();
new_pages_.push_back(page);
return page;
}
......@@ -933,7 +937,7 @@ bool CompactionSpace::RefillLabMain(int size_in_bytes,
return RawRefillLabMain(size_in_bytes, origin);
}
bool PagedSpace::TryExpand(int size_in_bytes, AllocationOrigin origin) {
bool PagedSpaceBase::TryExpand(int size_in_bytes, AllocationOrigin origin) {
Page* page = Expand();
if (!page) return false;
if (!is_compaction_space()) {
......@@ -945,7 +949,8 @@ bool PagedSpace::TryExpand(int size_in_bytes, AllocationOrigin origin) {
origin);
}
bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
AllocationOrigin origin) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
......@@ -1002,7 +1007,7 @@ bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
return false;
}
bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
int max_pages, int size_in_bytes,
AllocationOrigin origin) {
// Cleanup invalidated old-to-new refs for compaction space in the
......@@ -1021,7 +1026,7 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
return false;
}
void PagedSpace::AddRangeToActiveSystemPages(Page* page, Address start,
void PagedSpaceBase::AddRangeToActiveSystemPages(Page* page, Address start,
Address end) {
DCHECK_LE(page->address(), start);
DCHECK_LT(start, end);
......@@ -1035,7 +1040,7 @@ void PagedSpace::AddRangeToActiveSystemPages(Page* page, Address start,
MemoryAllocator::GetCommitPageSize());
}
void PagedSpace::ReduceActiveSystemPages(
void PagedSpaceBase::ReduceActiveSystemPages(
Page* page, ActiveSystemPages active_system_pages) {
const size_t reduced_pages =
page->active_system_pages()->Reduce(active_system_pages);
......
......@@ -41,8 +41,8 @@ class ObjectVisitor;
class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space);
PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space,
PagedSpaceObjectIterator(Heap* heap, const PagedSpaceBase* space);
PagedSpaceObjectIterator(Heap* heap, const PagedSpaceBase* space,
const Page* page);
PagedSpaceObjectIterator(Heap* heap, const PagedSpace* space,
const Page* page, Address start_address);
......@@ -72,7 +72,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
const PagedSpace* const space_;
const PagedSpaceBase* const space_;
ConstPageRange page_range_;
ConstPageRange::iterator current_page_;
#if V8_COMPRESS_POINTERS
......@@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
#endif // V8_COMPRESS_POINTERS
};
class V8_EXPORT_PRIVATE PagedSpace
class V8_EXPORT_PRIVATE PagedSpaceBase
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
......@@ -89,12 +89,13 @@ class V8_EXPORT_PRIVATE PagedSpace
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
PagedSpace(
PagedSpaceBase(
Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list, LinearAllocationArea* allocation_info_,
FreeList* free_list, LinearAllocationArea* allocation_info,
LinearAreaOriginalData& linear_area_original_data,
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
~PagedSpace() override { TearDown(); }
~PagedSpaceBase() override { TearDown(); }
// Checks whether an object/address is in this space.
inline bool Contains(Address a) const;
......@@ -308,21 +309,6 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetLinearAllocationArea(Address top, Address limit);
Address original_top() const { return original_top_; }
Address original_limit() const { return original_limit_; }
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_ = top();
}
base::SharedMutex* pending_allocation_mutex() {
return &pending_allocation_mutex_;
}
void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
void ReduceActiveSystemPages(Page* page,
ActiveSystemPages active_system_pages);
......@@ -330,7 +316,7 @@ class V8_EXPORT_PRIVATE PagedSpace
private:
class ConcurrentAllocationMutex {
public:
explicit ConcurrentAllocationMutex(const PagedSpace* space) {
explicit ConcurrentAllocationMutex(const PagedSpaceBase* space) {
if (space->SupportsConcurrentAllocation()) {
guard_.emplace(&space->space_mutex_);
}
......@@ -423,14 +409,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Mutex guarding any concurrent access to the space.
mutable base::Mutex space_mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values are protected by pending_allocation_mutex_.
Address original_top_;
Address original_limit_;
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
std::atomic<size_t> committed_physical_memory_{0};
friend class IncrementalMarking;
......@@ -440,6 +418,20 @@ class V8_EXPORT_PRIVATE PagedSpace
friend class heap::HeapTester;
};
class V8_EXPORT_PRIVATE PagedSpace : public PagedSpaceBase {
public:
// Creates a space with an id.
PagedSpace(
Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list, LinearAllocationArea* allocation_info,
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone)
: PagedSpaceBase(heap, id, executable, free_list, allocation_info,
linear_area_original_data_, compaction_space_kind) {}
private:
LinearAreaOriginalData linear_area_original_data_;
};
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
......
......@@ -41,7 +41,7 @@ class Isolate;
class LargeObjectSpace;
class LargePage;
class Page;
class PagedSpace;
class PagedSpaceBase;
class SemiSpace;
// -----------------------------------------------------------------------------
......@@ -477,11 +477,43 @@ class LocalAllocationBuffer {
LinearAllocationArea allocation_info_;
};
class LinearAreaOriginalData {
public:
Address get_original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
}
Address get_original_limit_relaxed() const {
return original_limit_.load(std::memory_order_relaxed);
}
void set_original_top_release(Address top) {
original_top_.store(top, std::memory_order_release);
}
void set_original_limit_relaxed(Address limit) {
original_limit_.store(limit, std::memory_order_relaxed);
}
base::SharedMutex* linear_area_lock() { return &linear_area_lock_; }
private:
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks. Protected by
// pending_allocation_mutex_.
std::atomic<Address> original_top_ = 0;
std::atomic<Address> original_limit_ = 0;
// Protects original_top_ and original_limit_.
base::SharedMutex linear_area_lock_;
};
class SpaceWithLinearArea : public Space {
public:
SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list,
LinearAllocationArea* allocation_info)
: Space(heap, id, free_list), allocation_info_(allocation_info) {}
LinearAllocationArea* allocation_info,
LinearAreaOriginalData& linear_area_original_data)
: Space(heap, id, free_list),
allocation_info_(allocation_info),
linear_area_original_data_(linear_area_original_data) {}
virtual bool SupportsAllocationObserver() const = 0;
......@@ -546,6 +578,24 @@ class SpaceWithLinearArea : public Space {
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
base::SharedMutex* linear_area_lock() {
return linear_area_original_data_.linear_area_lock();
}
Address original_top_acquire() const {
return linear_area_original_data_.get_original_top_acquire();
}
Address original_limit_relaxed() const {
return linear_area_original_data_.get_original_limit_relaxed();
}
void MoveOriginalTopForward() {
base::SharedMutexGuard<base::kExclusive> guard(linear_area_lock());
DCHECK_GE(top(), linear_area_original_data_.get_original_top_acquire());
DCHECK_LE(top(), linear_area_original_data_.get_original_limit_relaxed());
linear_area_original_data_.set_original_top_release(top());
}
protected:
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
......@@ -581,6 +631,8 @@ class SpaceWithLinearArea : public Space {
#endif // DEBUG
LinearAllocationArea* const allocation_info_;
LinearAreaOriginalData& linear_area_original_data_;
bool use_lab_ = true;
size_t allocations_origins_[static_cast<int>(
......
......@@ -175,7 +175,7 @@ void Sweeper::StartSweeperTasks() {
}
}
Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
Page* Sweeper::GetSweptPageSafe(PagedSpaceBase* space) {
base::MutexGuard guard(&mutex_);
SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
if (!list.empty()) {
......
......@@ -20,7 +20,7 @@ namespace internal {
class InvalidatedSlotsCleanup;
class MajorNonAtomicMarkingState;
class Page;
class PagedSpace;
class PagedSpaceBase;
class Space;
enum class FreeSpaceTreatmentMode { kIgnoreFreeSpace, kZapFreeSpace };
......@@ -106,7 +106,7 @@ class Sweeper {
// Support concurrent sweepers from main thread
void SupportConcurrentSweeping();
Page* GetSweptPageSafe(PagedSpace* space);
Page* GetSweptPageSafe(PagedSpaceBase* space);
private:
class IncrementalSweeperTask;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment