Commit 4f9b30b0 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Make PagedSpace::Expand thread-safe

This moves page post-processinng into NotifyOldGenerationExpansion
and introduces a new CanExpandOldGenerationBackground.

Bug: v8:10536
Change-Id: I54761226487434955f8a0dadf7c4dbb31c7955e7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2204283Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67853}
parent 66dd6bdb
......@@ -417,6 +417,11 @@ bool Heap::CanExpandOldGeneration(size_t size) {
return memory_allocator()->Size() + size <= MaxReserved();
}
bool Heap::CanExpandOldGenerationBackground(size_t size) {
if (force_oom_) return false;
return memory_allocator()->Size() + size <= MaxReserved();
}
bool Heap::HasBeenSetUp() const {
// We will always have a new space when the heap is set up.
return new_space_ != nullptr;
......@@ -5445,7 +5450,15 @@ void Heap::NotifyBootstrapComplete() {
}
}
void Heap::NotifyOldGenerationExpansion() {
void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
MemoryChunk* chunk) {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!deserialization_complete()) {
chunk->MarkNeverEvacuate();
}
if (space == CODE_SPACE || space == CODE_LO_SPACE) {
isolate()->AddCodeMemoryChunk(chunk);
}
const size_t kMemoryReducerActivationThreshold = 1 * MB;
if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
......
......@@ -449,7 +449,7 @@ class Heap {
void NotifyBootstrapComplete();
void NotifyOldGenerationExpansion();
void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk);
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
......@@ -1854,6 +1854,7 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation(
LocalHeap* local_heap = nullptr);
......@@ -2272,6 +2273,7 @@ class Heap {
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class OffThreadHeap;
friend class OffThreadSpace;
friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
......
......@@ -134,7 +134,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion();
heap()->NotifyOldGenerationExpansion(identity(), page);
AllocationStep(object_size, object.address(), object_size);
return object;
}
......@@ -514,7 +514,6 @@ AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
OldLargeObjectSpace::AddPage(page, object_size);
InsertChunkMapEntries(page);
heap()->isolate()->AddCodeMemoryChunk(page);
}
void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
......
......@@ -149,7 +149,6 @@ void OffThreadHeap::Publish(Heap* heap) {
heap->lo_space()->MergeOffThreadSpace(&lo_space_);
DCHECK(heap->CanExpandOldGeneration(0));
heap->NotifyOldGenerationExpansion();
// Possibly trigger a GC if we're close to exhausting the old generation.
// TODO(leszeks): Adjust the heuristics here.
......
......@@ -202,9 +202,7 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
// These code pages were allocated by the CompactionSpace.
if (identity() == CODE_SPACE) heap()->isolate()->AddCodeMemoryChunk(p);
heap()->NotifyOldGenerationExpansion();
heap()->NotifyOldGenerationExpansion(identity(), p);
DCHECK_IMPLIES(
!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
......@@ -318,31 +316,19 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
}
}
bool PagedSpace::Expand() {
Page* PagedSpace::Expand() {
// TODO(ulan): Remove the mutex as it seems redundant:
// Always lock against the main space as we can only adjust capacity and
// pages concurrently for the main paged space.
base::MutexGuard guard(heap()->paged_space(identity())->mutex());
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
Page* page =
heap()->memory_allocator()->AllocatePage(size, this, executable());
if (page == nullptr) return false;
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
heap()->memory_allocator()->AllocatePage(AreaSize(), this, executable());
if (page == nullptr) return nullptr;
AddPage(page);
// If this is a non-compaction code space, this is a previously unseen page.
if (identity() == CODE_SPACE && !is_compaction_space()) {
heap()->isolate()->AddCodeMemoryChunk(page);
}
Free(page->area_start(), page->area_size(),
SpaceAccountingMode::kSpaceAccounted);
if (!is_off_thread_space()) {
heap()->NotifyOldGenerationExpansion();
}
return true;
return page;
}
int PagedSpace::CountTotalPages() {
......@@ -581,7 +567,7 @@ PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
Expand()) {
heap()->CanExpandOldGenerationBackground(AreaSize()) && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(min_size_in_bytes <= free_list_->Available()));
return TryAllocationFromFreeListBackground(
......@@ -868,7 +854,7 @@ bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
return true;
if (Expand()) {
if (heap()->CanExpandOldGenerationBackground(size_in_bytes) && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
......@@ -925,11 +911,18 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
}
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin);
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
heap()->CanExpandOldGeneration(AreaSize())) {
Page* page = Expand();
if (page) {
if (!is_compaction_space()) {
heap()->NotifyOldGenerationExpansion(identity(), page);
}
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin);
}
}
if (is_compaction_space()) {
......
......@@ -336,7 +336,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
bool Expand();
Page* Expand();
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment