Commit 4f9b30b0 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Make PagedSpace::Expand thread-safe

This moves page post-processinng into NotifyOldGenerationExpansion
and introduces a new CanExpandOldGenerationBackground.

Bug: v8:10536
Change-Id: I54761226487434955f8a0dadf7c4dbb31c7955e7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2204283Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67853}
parent 66dd6bdb
...@@ -417,6 +417,11 @@ bool Heap::CanExpandOldGeneration(size_t size) { ...@@ -417,6 +417,11 @@ bool Heap::CanExpandOldGeneration(size_t size) {
return memory_allocator()->Size() + size <= MaxReserved(); return memory_allocator()->Size() + size <= MaxReserved();
} }
bool Heap::CanExpandOldGenerationBackground(size_t size) {
if (force_oom_) return false;
return memory_allocator()->Size() + size <= MaxReserved();
}
bool Heap::HasBeenSetUp() const { bool Heap::HasBeenSetUp() const {
// We will always have a new space when the heap is set up. // We will always have a new space when the heap is set up.
return new_space_ != nullptr; return new_space_ != nullptr;
...@@ -5445,7 +5450,15 @@ void Heap::NotifyBootstrapComplete() { ...@@ -5445,7 +5450,15 @@ void Heap::NotifyBootstrapComplete() {
} }
} }
void Heap::NotifyOldGenerationExpansion() { void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
MemoryChunk* chunk) {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!deserialization_complete()) {
chunk->MarkNeverEvacuate();
}
if (space == CODE_SPACE || space == CODE_LO_SPACE) {
isolate()->AddCodeMemoryChunk(chunk);
}
const size_t kMemoryReducerActivationThreshold = 1 * MB; const size_t kMemoryReducerActivationThreshold = 1 * MB;
if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 && if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ + OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
......
...@@ -449,7 +449,7 @@ class Heap { ...@@ -449,7 +449,7 @@ class Heap {
void NotifyBootstrapComplete(); void NotifyBootstrapComplete();
void NotifyOldGenerationExpansion(); void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk);
inline Address* NewSpaceAllocationTopAddress(); inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress(); inline Address* NewSpaceAllocationLimitAddress();
...@@ -1854,6 +1854,7 @@ class Heap { ...@@ -1854,6 +1854,7 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_ != 0; } bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size); V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation( bool ShouldExpandOldGenerationOnSlowAllocation(
LocalHeap* local_heap = nullptr); LocalHeap* local_heap = nullptr);
...@@ -2272,6 +2273,7 @@ class Heap { ...@@ -2272,6 +2273,7 @@ class Heap {
friend class IncrementalMarking; friend class IncrementalMarking;
friend class IncrementalMarkingJob; friend class IncrementalMarkingJob;
friend class OffThreadHeap; friend class OffThreadHeap;
friend class OffThreadSpace;
friend class OldLargeObjectSpace; friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState> template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase; friend class MarkingVisitorBase;
......
...@@ -134,7 +134,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size, ...@@ -134,7 +134,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->black_allocation(), heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object)); heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence(); page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion(); heap()->NotifyOldGenerationExpansion(identity(), page);
AllocationStep(object_size, object.address(), object_size); AllocationStep(object_size, object.address(), object_size);
return object; return object;
} }
...@@ -514,7 +514,6 @@ AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) { ...@@ -514,7 +514,6 @@ AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) { void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
OldLargeObjectSpace::AddPage(page, object_size); OldLargeObjectSpace::AddPage(page, object_size);
InsertChunkMapEntries(page); InsertChunkMapEntries(page);
heap()->isolate()->AddCodeMemoryChunk(page);
} }
void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) { void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
......
...@@ -149,7 +149,6 @@ void OffThreadHeap::Publish(Heap* heap) { ...@@ -149,7 +149,6 @@ void OffThreadHeap::Publish(Heap* heap) {
heap->lo_space()->MergeOffThreadSpace(&lo_space_); heap->lo_space()->MergeOffThreadSpace(&lo_space_);
DCHECK(heap->CanExpandOldGeneration(0)); DCHECK(heap->CanExpandOldGeneration(0));
heap->NotifyOldGenerationExpansion();
// Possibly trigger a GC if we're close to exhausting the old generation. // Possibly trigger a GC if we're close to exhausting the old generation.
// TODO(leszeks): Adjust the heuristics here. // TODO(leszeks): Adjust the heuristics here.
......
...@@ -202,9 +202,7 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) { ...@@ -202,9 +202,7 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
// Relinking requires the category to be unlinked. // Relinking requires the category to be unlinked.
other->RemovePage(p); other->RemovePage(p);
AddPage(p); AddPage(p);
// These code pages were allocated by the CompactionSpace. heap()->NotifyOldGenerationExpansion(identity(), p);
if (identity() == CODE_SPACE) heap()->isolate()->AddCodeMemoryChunk(p);
heap()->NotifyOldGenerationExpansion();
DCHECK_IMPLIES( DCHECK_IMPLIES(
!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE), !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes()); p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
...@@ -318,31 +316,19 @@ void PagedSpace::ShrinkImmortalImmovablePages() { ...@@ -318,31 +316,19 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
} }
} }
bool PagedSpace::Expand() { Page* PagedSpace::Expand() {
// TODO(ulan): Remove the mutex as it seems redundant:
// Always lock against the main space as we can only adjust capacity and // Always lock against the main space as we can only adjust capacity and
// pages concurrently for the main paged space. // pages concurrently for the main paged space.
base::MutexGuard guard(heap()->paged_space(identity())->mutex()); base::MutexGuard guard(heap()->paged_space(identity())->mutex());
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
Page* page = Page* page =
heap()->memory_allocator()->AllocatePage(size, this, executable()); heap()->memory_allocator()->AllocatePage(AreaSize(), this, executable());
if (page == nullptr) return false; if (page == nullptr) return nullptr;
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
AddPage(page); AddPage(page);
// If this is a non-compaction code space, this is a previously unseen page.
if (identity() == CODE_SPACE && !is_compaction_space()) {
heap()->isolate()->AddCodeMemoryChunk(page);
}
Free(page->area_start(), page->area_size(), Free(page->area_start(), page->area_size(),
SpaceAccountingMode::kSpaceAccounted); SpaceAccountingMode::kSpaceAccounted);
if (!is_off_thread_space()) { return page;
heap()->NotifyOldGenerationExpansion();
}
return true;
} }
int PagedSpace::CountTotalPages() { int PagedSpace::CountTotalPages() {
...@@ -581,7 +567,7 @@ PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap, ...@@ -581,7 +567,7 @@ PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
} }
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) && if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
Expand()) { heap()->CanExpandOldGenerationBackground(AreaSize()) && Expand()) {
DCHECK((CountTotalPages() > 1) || DCHECK((CountTotalPages() > 1) ||
(min_size_in_bytes <= free_list_->Available())); (min_size_in_bytes <= free_list_->Available()));
return TryAllocationFromFreeListBackground( return TryAllocationFromFreeListBackground(
...@@ -868,7 +854,7 @@ bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes, ...@@ -868,7 +854,7 @@ bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin)) if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
return true; return true;
if (Expand()) { if (heap()->CanExpandOldGenerationBackground(size_in_bytes) && Expand()) {
DCHECK((CountTotalPages() > 1) || DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available())); (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList( return RefillLinearAllocationAreaFromFreeList(
...@@ -925,11 +911,18 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes, ...@@ -925,11 +911,18 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
} }
} }
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) { if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
DCHECK((CountTotalPages() > 1) || heap()->CanExpandOldGeneration(AreaSize())) {
(static_cast<size_t>(size_in_bytes) <= free_list_->Available())); Page* page = Expand();
return RefillLinearAllocationAreaFromFreeList( if (page) {
static_cast<size_t>(size_in_bytes), origin); if (!is_compaction_space()) {
heap()->NotifyOldGenerationExpansion(identity(), page);
}
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin);
}
} }
if (is_compaction_space()) { if (is_compaction_space()) {
......
...@@ -336,7 +336,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -336,7 +336,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Expands the space by allocating a fixed number of pages. Returns false if // Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap // it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit. // size limit has been hit.
bool Expand(); Page* Expand();
// Sets up a linear allocation area that fits the given number of bytes. // Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry // Returns false if there is not enough space and the caller has to retry
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment