Commit 8686ea81 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Split out paged-spaces.h

Splits out all of PagedSpace and subclasses into paged-spaces.h. Also
moves CodeObjectRegistry to code-object-registry.h.

Bug: v8:10473, v8:10506
Change-Id: I35fab1e545e958eb32f3e39a5e2ce8fb087c2a53
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2201763Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67811}
parent b079058b
......@@ -2401,6 +2401,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/barrier.h",
"src/heap/basic-memory-chunk.cc",
"src/heap/basic-memory-chunk.h",
"src/heap/code-object-registry.cc",
"src/heap/code-object-registry.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
"src/heap/combined-heap.cc",
......@@ -2473,6 +2475,9 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/off-thread-factory.h",
"src/heap/off-thread-heap.cc",
"src/heap/off-thread-heap.h",
"src/heap/paged-spaces-inl.h",
"src/heap/paged-spaces.cc",
"src/heap/paged-spaces.h",
"src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/code-object-registry.h"
#include <algorithm>
#include "src/base/logging.h"
namespace v8 {
namespace internal {
void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
auto result = code_object_registry_newly_allocated_.insert(code);
USE(result);
DCHECK(result.second);
}
void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
code_object_registry_already_existing_.push_back(code);
}
void CodeObjectRegistry::Clear() {
code_object_registry_already_existing_.clear();
code_object_registry_newly_allocated_.clear();
}
void CodeObjectRegistry::Finalize() {
code_object_registry_already_existing_.shrink_to_fit();
}
bool CodeObjectRegistry::Contains(Address object) const {
return (code_object_registry_newly_allocated_.find(object) !=
code_object_registry_newly_allocated_.end()) ||
(std::binary_search(code_object_registry_already_existing_.begin(),
code_object_registry_already_existing_.end(),
object));
}
Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
Address address) const {
// Let's first find the object which comes right before address in the vector
// of already existing code objects.
Address already_existing_set_ = 0;
Address newly_allocated_set_ = 0;
if (!code_object_registry_already_existing_.empty()) {
auto it =
std::upper_bound(code_object_registry_already_existing_.begin(),
code_object_registry_already_existing_.end(), address);
if (it != code_object_registry_already_existing_.begin()) {
already_existing_set_ = *(--it);
}
}
// Next, let's find the object which comes right before address in the set
// of newly allocated code objects.
if (!code_object_registry_newly_allocated_.empty()) {
auto it = code_object_registry_newly_allocated_.upper_bound(address);
if (it != code_object_registry_newly_allocated_.begin()) {
newly_allocated_set_ = *(--it);
}
}
// The code objects which contains address has to be in one of the two
// data structures.
DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
// The address which is closest to the given address is the code object.
return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
: newly_allocated_set_;
}
} // namespace internal
} // namespace v8
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CODE_OBJECT_REGISTRY_H_
#define V8_HEAP_CODE_OBJECT_REGISTRY_H_
#include <set>
#include <vector>
#include "src/base/macros.h"
#include "src/common/globals.h"
namespace v8 {
namespace internal {
// The CodeObjectRegistry holds all start addresses of code objects of a given
// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
// to the actual code object.
class V8_EXPORT_PRIVATE CodeObjectRegistry {
public:
void RegisterNewlyAllocatedCodeObject(Address code);
void RegisterAlreadyExistingCodeObject(Address code);
void Clear();
void Finalize();
bool Contains(Address code) const;
Address GetCodeObjectStartFromInnerAddress(Address address) const;
private:
std::vector<Address> code_object_registry_already_existing_;
std::set<Address> code_object_registry_newly_allocated_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_CODE_OBJECT_REGISTRY_H_
......@@ -7,7 +7,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/heap/paged-spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"
namespace v8 {
......
......@@ -23,7 +23,9 @@
// leak heap internals to users of this interface!
#include "src/execution/isolate-data.h"
#include "src/execution/isolate.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/allocation-site-inl.h"
......
......@@ -30,6 +30,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/barrier.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/code-stats.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
......@@ -51,6 +52,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/safepoint.h"
......
......@@ -7,6 +7,7 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
namespace v8 {
......
......@@ -16,6 +16,7 @@
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
......
......@@ -6,6 +6,7 @@
#include "src/base/platform/platform.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
......
......@@ -5,8 +5,8 @@
#include "src/heap/off-thread-heap.h"
#include "src/common/globals.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-body-descriptors-inl.h"
#include "src/roots/roots.h"
#include "src/snapshot/references.h"
......
......@@ -9,6 +9,7 @@
#include "src/common/globals.h"
#include "src/heap/large-spaces.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_PAGED_SPACES_INL_H_
#define V8_HEAP_PAGED_SPACES_INL_H_
#include "src/heap/incremental-marking.h"
#include "src/heap/paged-spaces.h"
#include "src/objects/code-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// PagedSpaceObjectIterator
HeapObject PagedSpaceObjectIterator::Next() {
do {
HeapObject next_obj = FromCurrentPage();
if (!next_obj.is_null()) return next_obj;
} while (AdvanceToNextPage());
return HeapObject();
}
HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
cur_addr_ = space_->limit();
continue;
}
HeapObject obj = HeapObject::FromAddress(cur_addr_);
const int obj_size = obj.Size();
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller()) {
if (obj.IsCode()) {
DCHECK_IMPLIES(
space_->identity() != CODE_SPACE,
space_->identity() == RO_SPACE && Code::cast(obj).is_builtin());
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
} else {
DCHECK_OBJECT_SIZE(obj_size);
}
return obj;
}
}
return HeapObject();
}
bool PagedSpace::Contains(Address addr) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
return Page::FromAddress(addr)->owner() == this;
}
bool PagedSpace::Contains(Object o) const {
if (!o.IsHeapObject()) return false;
return Page::FromAddress(o.ptr())->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
free_list()->RemoveCategory(category);
});
}
size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
added += category->available();
category->Relink(free_list());
});
DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
page->AvailableInFreeList() ==
page->AvailableInFreeListFromAllocatedBytes());
return added;
}
bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
if (allocation_info_.top() != kNullAddress) {
const Address object_address = object.address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
}
}
return false;
}
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
return SlowRefillLinearAllocationArea(size_in_bytes, origin);
}
HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
HeapObject PagedSpace::TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit()) return HeapObject();
allocation_info_.set_top(new_top);
if (filler_size > 0) {
*size_in_bytes += filler_size;
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top),
filler_size);
}
return HeapObject::FromAddress(current_top);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK(!object.is_null());
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_PAGED_SPACES_INL_H_
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/paged-spaces.h"
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/logging/counters.h"
#include "src/objects/string.h"
namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// PagedSpaceObjectIterator
PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
PagedSpace* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
PagedSpace* space,
Page* page)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
page_range_(page),
current_page_(page_range_.begin()) {
heap->mark_compact_collector()->EnsureSweepingCompleted();
#ifdef DEBUG
AllocationSpace owner = page->owner_identity();
DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
owner == CODE_SPACE);
#endif // DEBUG
}
PagedSpaceObjectIterator::PagedSpaceObjectIterator(OffThreadSpace* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {}
// We have hit the end of the page and should advance to the next block of
// objects. This happens at the end of the page.
bool PagedSpaceObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
return true;
}
Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk);
DCHECK_EQ(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocationStatistics();
page->SetOldGenerationPageFlags(!is_off_thread_space() &&
heap()->incremental_marking()->IsMarking());
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
page->list_node().Initialize();
page->InitializationMemoryFence();
return page;
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable, FreeList* free_list,
LocalSpaceKind local_space_kind)
: SpaceWithLinearArea(heap, space, free_list),
executable_(executable),
local_space_kind_(local_space_kind) {
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
accounting_stats_.Clear();
}
void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
}
accounting_stats_.Clear();
}
void PagedSpace::RefillFreeList() {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
identity() != MAP_SPACE && identity() != RO_SPACE) {
return;
}
DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
// We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
// entries here to make them unavailable for allocations.
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
p->ForAllFreeListCategories([this](FreeListCategory* category) {
category->Reset(free_list());
});
}
// Also merge old-to-new remembered sets if not scavenging because of
// data races: One thread might iterate remembered set, while another
// thread merges them.
if (local_space_kind() != LocalSpaceKind::kCompactionSpaceForScavenge) {
p->MergeOldToNewRememberedSets();
}
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
if (is_compaction_space()) {
DCHECK_NE(this, p->owner());
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
base::MutexGuard guard(owner->mutex());
owner->RefineAllocatedBytesAfterSweeping(p);
owner->RemovePage(p);
added += AddPage(p);
} else {
base::MutexGuard guard(mutex());
DCHECK_EQ(this, p->owner());
RefineAllocatedBytesAfterSweeping(p);
added += RelinkFreeListCategories(p);
}
added += p->wasted_memory();
if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
}
}
}
void OffThreadSpace::RefillFreeList() {
// We should never try to refill the free list in off-thread space, because
// we know it will always be fully linear.
UNREACHABLE();
}
void PagedSpace::MergeLocalSpace(LocalSpace* other) {
base::MutexGuard guard(mutex());
DCHECK(identity() == other->identity());
// Unmerged fields:
// area_size_
other->FreeLinearAllocationArea();
for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
allocations_origins_[i] += other->allocations_origins_[i];
}
// The linear allocation area of {other} should be destroyed now.
DCHECK_EQ(kNullAddress, other->top());
DCHECK_EQ(kNullAddress, other->limit());
bool merging_from_off_thread = other->is_off_thread_space();
// Move over pages.
for (auto it = other->begin(); it != other->end();) {
Page* p = *(it++);
if (merging_from_off_thread) {
DCHECK_NULL(p->sweeping_slot_set());
p->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
if (heap()->incremental_marking()->black_allocation()) {
p->CreateBlackArea(p->area_start(), p->HighWaterMark());
}
} else {
p->MergeOldToNewRememberedSets();
}
// Ensure that pages are initialized before objects on it are discovered by
// concurrent markers.
p->InitializationMemoryFence();
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
// These code pages were allocated by the CompactionSpace.
if (identity() == CODE_SPACE) heap()->isolate()->AddCodeMemoryChunk(p);
DCHECK_IMPLIES(
!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
// TODO(leszeks): Here we should allocation step, but:
// 1. Allocation groups are currently not handled properly by the sampling
// allocation profiler, and
// 2. Observers might try to take the space lock, which isn't reentrant.
// We'll have to come up with a better solution for allocation stepping
// before shipping, which will likely be using LocalHeap.
}
DCHECK_EQ(0u, other->Size());
DCHECK_EQ(0u, other->Capacity());
}
size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
for (Page* page : *this) {
size += page->CommittedPhysicalMemory();
}
return size;
}
bool PagedSpace::ContainsSlow(Address addr) const {
Page* p = Page::FromAddress(addr);
for (const Page* page : *this) {
if (page == p) return true;
}
return false;
}
void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
CHECK(page->SweepingDone());
auto marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
// The live_byte on the page was accounted in the space allocated
// bytes counter. After sweeping allocated_bytes() contains the
// accurate live byte count on the page.
size_t old_counter = marking_state->live_bytes(page);
size_t new_counter = page->allocated_bytes();
DCHECK_GE(old_counter, new_counter);
if (old_counter > new_counter) {
DecreaseAllocatedBytes(old_counter - new_counter, page);
// Give the heap a chance to adjust counters in response to the
// more precise and smaller old generation size.
heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
}
marking_state->SetLiveBytes(page, 0);
}
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
base::MutexGuard guard(mutex());
Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
RemovePage(page);
return page;
}
size_t PagedSpace::AddPage(Page* page) {
CHECK(page->SweepingDone());
page->set_owner(this);
memory_chunk_list_.PushBack(page);
AccountCommitted(page->size());
IncreaseCapacity(page->area_size());
IncreaseAllocatedBytes(page->allocated_bytes(), page);
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
return RelinkFreeListCategories(page);
}
void PagedSpace::RemovePage(Page* page) {
CHECK(page->SweepingDone());
memory_chunk_list_.Remove(page);
UnlinkFreeListCategories(page);
DecreaseAllocatedBytes(page->allocated_bytes(), page);
DecreaseCapacity(page->area_size());
AccountUncommitted(page->size());
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
}
size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
AccountUncommitted(unused);
return unused;
}
void PagedSpace::ResetFreeList() {
for (Page* page : *this) {
free_list_->EvictFreeListItems(page);
}
DCHECK(free_list_->IsEmpty());
}
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
FreeLinearAllocationArea();
ResetFreeList();
for (Page* page : *this) {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
ShrinkPageToHighWaterMark(page);
}
}
bool PagedSpace::Expand() {
// Always lock against the main space as we can only adjust capacity and
// pages concurrently for the main paged space.
base::MutexGuard guard(heap()->paged_space(identity())->mutex());
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
Page* page =
heap()->memory_allocator()->AllocatePage(size, this, executable());
if (page == nullptr) return false;
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
AddPage(page);
// If this is a non-compaction code space, this is a previously unseen page.
if (identity() == CODE_SPACE && !is_compaction_space()) {
heap()->isolate()->AddCodeMemoryChunk(page);
}
Free(page->area_start(), page->area_size(),
SpaceAccountingMode::kSpaceAccounted);
heap()->NotifyOldGenerationExpansion();
return true;
}
int PagedSpace::CountTotalPages() {
int count = 0;
for (Page* page : *this) {
count++;
USE(page);
}
return count;
}
void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != kNullAddress && top != limit && !is_off_thread_space() &&
heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
}
}
void PagedSpace::DecreaseLimit(Address new_limit) {
Address old_limit = limit();
DCHECK_LE(top(), new_limit);
DCHECK_GE(old_limit, new_limit);
if (new_limit != old_limit) {
SetTopAndLimit(top(), new_limit);
Free(new_limit, old_limit - new_limit,
SpaceAccountingMode::kSpaceAccounted);
if (heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
old_limit);
}
}
}
void PagedSpace::MarkLinearAllocationAreaBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
Address current_limit = limit();
if (current_top != kNullAddress && current_top != current_limit) {
Page::FromAllocationAreaAddress(current_top)
->CreateBlackArea(current_top, current_limit);
}
}
void PagedSpace::UnmarkLinearAllocationArea() {
Address current_top = top();
Address current_limit = limit();
if (current_top != kNullAddress && current_top != current_limit) {
Page::FromAllocationAreaAddress(current_top)
->DestroyBlackArea(current_top, current_limit);
}
}
void PagedSpace::FreeLinearAllocationArea() {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
Address current_top = top();
Address current_limit = limit();
if (current_top == kNullAddress) {
DCHECK_EQ(kNullAddress, current_limit);
return;
}
if (!is_off_thread_space() &&
heap()->incremental_marking()->black_allocation()) {
Page* page = Page::FromAllocationAreaAddress(current_top);
// Clear the bits in the unused black area.
if (current_top != current_limit) {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
marking_state->bitmap(page)->ClearRange(
page->AddressToMarkbitIndex(current_top),
page->AddressToMarkbitIndex(current_limit));
marking_state->IncrementLiveBytes(
page, -static_cast<int>(current_limit - current_top));
}
}
InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
SetTopAndLimit(kNullAddress, kNullAddress);
DCHECK_GE(current_limit, current_top);
// The code page of the linear allocation area needs to be unprotected
// because we are going to write a filler into that memory area below.
if (identity() == CODE_SPACE) {
heap()->UnprotectAndRegisterMemoryChunk(
MemoryChunk::FromAddress(current_top));
}
Free(current_top, current_limit - current_top,
SpaceAccountingMode::kSpaceAccounted);
}
void PagedSpace::ReleasePage(Page* page) {
DCHECK_EQ(
0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
page));
DCHECK_EQ(page->owner(), this);
free_list_->EvictFreeListItems(page);
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
allocation_info_.Reset(kNullAddress, kNullAddress);
}
heap()->isolate()->RemoveCodeMemoryChunk(page);
AccountUncommitted(page->size());
accounting_stats_.DecreaseCapacity(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
}
void PagedSpace::SetReadable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadable();
}
}
void PagedSpace::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
}
void PagedSpace::SetReadAndWritable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndWritable();
}
}
std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(
new PagedSpaceObjectIterator(heap, this));
}
bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin) {
DCHECK(IsAligned(size_in_bytes, kTaggedSize));
DCHECK_LE(top(), limit());
#ifdef DEBUG
if (top() != limit()) {
DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
}
#endif
// Don't free list allocate if there is linear space available.
DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
FreeLinearAllocationArea();
if (!is_local_space()) {
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
}
size_t new_node_size = 0;
FreeSpace new_node =
free_list_->Allocate(size_in_bytes, &new_node_size, origin);
if (new_node.is_null()) return false;
DCHECK_GE(new_node_size, size_in_bytes);
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
Address limit = ComputeLimit(start, end, size_in_bytes);
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
if (identity() == CODE_SPACE) {
heap()->UnprotectAndRegisterMemoryChunk(page);
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
return true;
}
base::Optional<std::pair<Address, size_t>>
PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!is_local_space() && identity() == OLD_SPACE);
DCHECK_EQ(origin, AllocationOrigin::kRuntime);
base::MutexGuard lock(&allocation_mutex_);
auto result = TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
RefillFreeList();
// Retry the free list allocation.
auto result = TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
Sweeper::FreeSpaceMayContainInvalidatedSlots
invalidated_slots_in_free_space =
Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
const int kMaxPagesToSweep = 1;
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
invalidated_slots_in_free_space);
RefillFreeList();
if (static_cast<size_t>(max_freed) >= min_size_in_bytes)
return TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
Expand()) {
DCHECK((CountTotalPages() > 1) ||
(min_size_in_bytes <= free_list_->Available()));
return TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
// TODO(dinfuehr): Complete sweeping here and try allocation again.
return {};
}
base::Optional<std::pair<Address, size_t>>
PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
DCHECK_EQ(identity(), OLD_SPACE);
size_t new_node_size = 0;
FreeSpace new_node =
free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
if (new_node.is_null()) return {};
DCHECK_GE(new_node_size, min_size_in_bytes);
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
Address limit = new_node.address() + used_size_in_bytes;
DCHECK_LE(limit, end);
DCHECK_LE(min_size_in_bytes, limit - start);
if (limit != end) {
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
return std::make_pair(start, used_size_in_bytes);
}
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
size_t external_space_bytes[kNumTypes];
size_t external_page_bytes[kNumTypes];
for (int i = 0; i < kNumTypes; i++) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
for (Page* page : *this) {
#ifdef V8_SHARED_RO_HEAP
if (identity() == RO_SPACE) {
CHECK_NULL(page->owner());
} else {
CHECK_EQ(page->owner(), this);
}
#else
CHECK_EQ(page->owner(), this);
#endif
for (int i = 0; i < kNumTypes; i++) {
external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->SweepingDone());
PagedSpaceObjectIterator it(isolate->heap(), this, page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
CHECK(end_of_previous_object <= object.address());
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map map = object.map();
CHECK(map.IsMap());
CHECK(ReadOnlyHeap::Contains(map) ||
isolate->heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
// The object itself should look OK.
object.ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
isolate->heap()->VerifyRememberedSetFor(object);
}
// All the interior pointers should be contained in the heap.
int size = object.Size();
object.IterateBody(map, size, visitor);
CHECK(object.address() + size <= top);
end_of_previous_object = object.address() + size;
if (object.IsExternalString()) {
ExternalString external_string = ExternalString::cast(object);
size_t size = external_string.ExternalPayloadSize();
external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size =
ArrayBufferTracker::Lookup(isolate->heap(), array_buffer)
->PerIsolateAccountingLength();
external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
external_space_bytes[t] += external_page_bytes[t];
}
}
for (int i = 0; i < kNumTypes; i++) {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
i == ExternalBackingStoreType::kArrayBuffer)
continue;
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
}
CHECK(allocation_pointer_found_in_space);
if (identity() == OLD_SPACE && V8_ARRAY_BUFFER_EXTENSION_BOOL) {
size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
CHECK_EQ(bytes,
ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
}
#ifdef DEBUG
VerifyCountersAfterSweeping(isolate->heap());
#endif
}
void PagedSpace::VerifyLiveBytes() {
DCHECK_NE(identity(), RO_SPACE);
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->SweepingDone());
PagedSpaceObjectIterator it(heap(), this, page);
int black_size = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
black_size += object.Size();
}
}
CHECK_LE(black_size, marking_state->live_bytes(page));
}
}
#endif // VERIFY_HEAP
#ifdef DEBUG
void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
size_t total_capacity = 0;
size_t total_allocated = 0;
for (Page* page : *this) {
DCHECK(page->SweepingDone());
total_capacity += page->area_size();
PagedSpaceObjectIterator it(heap, this, page);
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object.IsFreeSpaceOrFiller()) {
real_allocated += object.Size();
}
}
total_allocated += page->allocated_bytes();
// The real size can be smaller than the accounted size if array trimming,
// object slack tracking happened after sweeping.
DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
}
DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
DCHECK_EQ(total_allocated, accounting_stats_.Size());
}
void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
// We need to refine the counters on pages that are already swept and have
// not been moved over to the actual space. Otherwise, the AccountingStats
// are just an over approximation.
RefillFreeList();
size_t total_capacity = 0;
size_t total_allocated = 0;
auto marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (Page* page : *this) {
size_t page_allocated =
page->SweepingDone()
? page->allocated_bytes()
: static_cast<size_t>(marking_state->live_bytes(page));
total_capacity += page->area_size();
total_allocated += page_allocated;
DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
}
DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
DCHECK_EQ(total_allocated, accounting_stats_.Size());
}
#endif
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
Address new_limit = ComputeLimit(top(), limit(), min_size);
DCHECK_LE(new_limit, limit());
DecreaseLimit(new_limit);
}
// -----------------------------------------------------------------------------
// OldSpace implementation
void PagedSpace::PrepareForMarkCompact() {
// We don't have a linear allocation area while sweeping. It will be restored
// on the first allocation after the sweep.
FreeLinearAllocationArea();
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_->Reset();
}
size_t PagedSpace::SizeOfObjects() {
CHECK_GE(limit(), top());
DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
return Size() - (limit() - top());
}
bool PagedSpace::EnsureSweptAndRetryAllocation(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!is_local_space());
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
collector->EnsureSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
// entries.
return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
}
return false;
}
bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
base::Optional<base::MutexGuard> optional_mutex;
if (FLAG_concurrent_allocation && origin != AllocationOrigin::kGC &&
identity() == OLD_SPACE) {
optional_mutex.emplace(&allocation_mutex_);
}
return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
}
bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
}
bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
return true;
if (Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin);
}
return false;
}
bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
// Non-compaction local spaces are not supported.
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
return true;
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
if (FLAG_concurrent_sweeping && !is_compaction_space() &&
!collector->sweeper()->AreSweeperTasksRunning()) {
collector->EnsureSweepingCompleted();
}
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
RefillFreeList();
// Retry the free list allocation.
if (RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin))
return true;
if (SweepAndRetryAllocation(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
origin))
return true;
}
if (is_compaction_space()) {
// The main thread may have acquired all swept pages. Try to steal from
// it. This can only happen during young generation evacuation.
PagedSpace* main_space = heap()->paged_space(identity());
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
if (RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin))
return true;
}
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin);
}
if (is_compaction_space()) {
return SweepAndRetryAllocation(0, 0, size_in_bytes, origin);
} else {
// If sweeper threads are active, wait for them at that point and steal
// elements from their free-lists. Allocation may still fail here which
// would indicate that there is not enough memory for the given allocation.
return EnsureSweptAndRetryAllocation(size_in_bytes, origin);
}
}
bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
int max_pages, int size_in_bytes,
AllocationOrigin origin) {
// Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause.
Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
: Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), required_freed_bytes, max_pages,
invalidated_slots_in_free_space);
RefillFreeList();
if (max_freed >= size_in_bytes)
return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
}
return false;
}
// -----------------------------------------------------------------------------
// MapSpace implementation
// TODO(dmercadier): use a heap instead of sorting like that.
// Using a heap will have multiple benefits:
// - for now, SortFreeList is only called after sweeping, which is somewhat
// late. Using a heap, sorting could be done online: FreeListCategories would
// be inserted in a heap (ie, in a sorted manner).
// - SortFreeList is a bit fragile: any change to FreeListMap (or to
// MapSpace::free_list_) could break it.
void MapSpace::SortFreeList() {
using LiveBytesPagePair = std::pair<size_t, Page*>;
std::vector<LiveBytesPagePair> pages;
pages.reserve(CountTotalPages());
for (Page* p : *this) {
free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
pages.push_back(std::make_pair(p->allocated_bytes(), p));
}
// Sorting by least-allocated-bytes first.
std::sort(pages.begin(), pages.end(),
[](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
return a.first < b.first;
});
for (LiveBytesPagePair const& p : pages) {
// Since AddCategory inserts in head position, it reverts the order produced
// by the sort above: least-allocated-bytes will be Added first, and will
// therefore be the last element (and the first one will be
// most-allocated-bytes).
free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
}
}
#ifdef VERIFY_HEAP
void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
#endif
} // namespace internal
} // namespace v8
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_PAGED_SPACES_H_
#define V8_HEAP_PAGED_SPACES_H_
#include <memory>
#include <utility>
#include "src/base/bounds.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
class Heap;
class HeapObject;
class Isolate;
class ObjectVisitor;
// -----------------------------------------------------------------------------
// Heap object iterator in old/map spaces.
//
// A PagedSpaceObjectIterator iterates objects from the bottom of the given
// space to its top or from the bottom of the given page to its top.
//
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects. The caller must create a new
// iterator in order to be sure to visit these new objects.
class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
// Creates a new object iterator in a given off-thread space.
explicit PagedSpaceObjectIterator(OffThreadSpace* space);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
// Returns nullptr when the iteration has ended.
inline HeapObject Next() override;
private:
// Fast (inlined) path of next().
inline HeapObject FromCurrentPage();
// Slow path of next(), goes into the next page. Returns false if the
// iteration has ended.
bool AdvanceToNextPage();
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
PagedSpace* space_;
PageRange page_range_;
PageRange::iterator current_page_;
};
class V8_EXPORT_PRIVATE PagedSpace
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list,
LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
~PagedSpace() override { TearDown(); }
// Checks whether an object/address is in this space.
inline bool Contains(Address a) const;
inline bool Contains(Object o) const;
bool ContainsSlow(Address addr) const;
// Does the space need executable memory?
Executability executable() { return executable_; }
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()).
size_t Capacity() { return accounting_stats_.Capacity(); }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
// discovered during the sweeping they are subtracted from the size and added
// to the available and wasted totals. The free list is cleared as well.
void ClearAllocatorState() {
accounting_stats_.ClearSize();
free_list_->Reset();
}
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
size_t Available() override { return free_list_->Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
// current linear allocation area (between top and limit) are also counted
// here.
size_t Size() override { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
size_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
virtual size_t Waste() { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space from a background
// thread.
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0;
heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
if (mode == SpaceAccountingMode::kSpaceAccounted) {
return AccountedFree(start, size_in_bytes);
} else {
return UnaccountedFree(start, size_in_bytes);
}
}
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
size_t AccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
size_t UnaccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
inline bool TryFreeLast(HeapObject object, int object_size);
void ResetFreeList();
// Empty space linear allocation area, returning unused area to free list.
void FreeLinearAllocationArea();
void MarkLinearAllocationAreaBlack();
void UnmarkLinearAllocationArea();
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.DecreaseAllocatedBytes(bytes, page);
}
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.IncreaseAllocatedBytes(bytes, page);
}
void DecreaseCapacity(size_t bytes) {
accounting_stats_.DecreaseCapacity(bytes);
}
void IncreaseCapacity(size_t bytes) {
accounting_stats_.IncreaseCapacity(bytes);
}
void RefineAllocatedBytesAfterSweeping(Page* page);
Page* InitializePage(MemoryChunk* chunk);
void ReleasePage(Page* page);
// Adds the page to this space and returns the number of bytes added to the
// free list of the space.
size_t AddPage(Page* page);
void RemovePage(Page* page);
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
void SetReadable();
void SetReadAndExecutable();
void SetReadAndWritable();
void SetDefaultCodePermissions() {
if (FLAG_jitless) {
SetReadable();
} else {
SetReadAndExecutable();
}
}
#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
void VerifyLiveBytes();
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject obj) {}
#endif
#ifdef DEBUG
void VerifyCountersAfterSweeping(Heap* heap);
void VerifyCountersBeforeConcurrentSweeping();
// Print meta info and objects in this space.
void Print() override;
// Report code object related statistics
static void ReportCodeStatistics(Isolate* isolate);
static void ResetCodeStatistics(Isolate* isolate);
#endif
bool CanExpand(size_t size);
// Returns the number of total pages in this space.
int CountTotalPages();
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return static_cast<int>(area_size_); }
bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
bool is_off_thread_space() {
return local_space_kind_ == LocalSpaceKind::kOffThreadSpace;
}
bool is_compaction_space() {
return base::IsInRange(local_space_kind_,
LocalSpaceKind::kFirstCompactionSpace,
LocalSpaceKind::kLastCompactionSpace);
}
LocalSpaceKind local_space_kind() { return local_space_kind_; }
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeLocalSpace(LocalSpace* other);
// Refills the free list from the corresponding free list filled by the
// sweeper.
virtual void RefillFreeList();
base::Mutex* mutex() { return &space_mutex_; }
inline void UnlinkFreeListCategories(Page* page);
inline size_t RelinkFreeListCategories(Page* page);
Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
const Page* first_page() const {
return reinterpret_cast<const Page*>(Space::first_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
const_iterator begin() const { return const_iterator(first_page()); }
const_iterator end() const { return const_iterator(nullptr); }
// Shrink immortal immovable pages of the space to be exactly the size needed
// using the high water mark.
void ShrinkImmortalImmovablePages();
size_t ShrinkPageToHighWaterMark(Page* page);
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
void SetLinearAllocationArea(Address top, Address limit);
private:
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
}
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsInlineAllocation() override {
return identity() == OLD_SPACE && !is_local_space();
}
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
bool HasPages() { return first_page() != nullptr; }
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
void TearDown();
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
bool Expand();
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
inline bool EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline HeapObject AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
// has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
int max_pages,
int size_in_bytes,
AllocationOrigin origin);
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
Executability executable_;
LocalSpaceKind local_space_kind_;
size_t area_size_;
// Accounting information for this space.
AllocationStats accounting_stats_;
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
// Mutex guarding concurrent allocation.
base::Mutex allocation_mutex_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
// Used in cctest.
friend class heap::HeapTester;
};
// -----------------------------------------------------------------------------
// Base class for compaction space and off-thread space.
class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
public:
LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
LocalSpaceKind local_space_kind)
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
local_space_kind) {
DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
}
protected:
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
};
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
LocalSpaceKind local_space_kind)
: LocalSpace(heap, id, executable, local_space_kind) {
DCHECK(is_compaction_space());
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
};
// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap,
LocalSpaceKind local_space_kind)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
local_space_kind),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
local_space_kind) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
case OLD_SPACE:
return &old_space_;
case CODE_SPACE:
return &code_space_;
default:
UNREACHABLE();
}
UNREACHABLE();
}
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
};
// -----------------------------------------------------------------------------
// Old generation regular object space.
class OldSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit OldSpace(Heap* heap)
: PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
FreeList::CreateFreeList()) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
MemoryChunkLayout::ObjectStartOffsetInDataPage();
}
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
type == ExternalBackingStoreType::kArrayBuffer)
return heap()->OldArrayBufferBytes();
return external_backing_store_bytes_[type];
}
};
// -----------------------------------------------------------------------------
// Old generation code object space.
class CodeSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit CodeSpace(Heap* heap)
: PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
};
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
SLOW_DCHECK((space).page_low() <= (info).top() && \
(info).top() <= (space).page_high() && \
(info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------
// Old space for all map objects
class MapSpace : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
: PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
return (size / Map::kSize) * Map::kSize;
}
}
void SortFreeList();
#ifdef VERIFY_HEAP
void VerifyObject(HeapObject obj) override;
#endif
};
// -----------------------------------------------------------------------------
// Off-thread space that is used for folded allocation on a different thread.
class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
public:
explicit OffThreadSpace(Heap* heap)
: LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
LocalSpaceKind::kOffThreadSpace) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
// OffThreadSpace doesn't work with third-party heap.
UNREACHABLE();
#endif
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
void RefillFreeList() override;
};
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator {
public:
inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
// Return nullptr when the iterator is done.
inline MemoryChunk* next();
private:
enum State {
kOldSpaceState,
kMapState,
kCodeState,
kLargeObjectState,
kCodeLargeObjectState,
kFinishedState
};
Heap* heap_;
State state_;
PageIterator old_iterator_;
PageIterator code_iterator_;
PageIterator map_iterator_;
LargePageIterator lo_iterator_;
LargePageIterator code_lo_iterator_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_PAGED_SPACES_H_
......@@ -12,6 +12,7 @@
#include "src/base/macros.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
namespace v8 {
......
......@@ -63,42 +63,6 @@ HeapObject SemiSpaceObjectIterator::Next() {
return HeapObject();
}
// -----------------------------------------------------------------------------
// PagedSpaceObjectIterator
HeapObject PagedSpaceObjectIterator::Next() {
do {
HeapObject next_obj = FromCurrentPage();
if (!next_obj.is_null()) return next_obj;
} while (AdvanceToNextPage());
return HeapObject();
}
HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
cur_addr_ = space_->limit();
continue;
}
HeapObject obj = HeapObject::FromAddress(cur_addr_);
const int obj_size = obj.Size();
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller()) {
if (obj.IsCode()) {
DCHECK_IMPLIES(
space_->identity() != CODE_SPACE,
space_->identity() == RO_SPACE && Code::cast(obj).is_builtin());
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
} else {
DCHECK_OBJECT_SIZE(obj_size);
}
return obj;
}
}
return HeapObject();
}
void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount) {
base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
......@@ -165,50 +129,6 @@ bool NewSpace::FromSpaceContains(Object o) const {
return from_space_.Contains(o);
}
bool PagedSpace::Contains(Address addr) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
return Page::FromAddress(addr)->owner() == this;
}
bool PagedSpace::Contains(Object o) const {
if (!o.IsHeapObject()) return false;
return Page::FromAddress(o.ptr())->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
free_list()->RemoveCategory(category);
});
}
size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
added += category->available();
category->Relink(free_list());
});
DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
page->AvailableInFreeList() ==
page->AvailableInFreeListFromAllocatedBytes());
return added;
}
bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
if (allocation_info_.top() != kNullAddress) {
const Address object_address = object.address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
}
}
return false;
}
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
......@@ -345,122 +265,6 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
return SlowRefillLinearAllocationArea(size_in_bytes, origin);
}
HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
HeapObject PagedSpace::TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit()) return HeapObject();
allocation_info_.set_top(new_top);
if (filler_size > 0) {
*size_in_bytes += filler_size;
return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
HeapObject::FromAddress(current_top),
filler_size);
}
return HeapObject::FromAddress(current_top);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK(!object.is_null());
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
// The top_on_previous_step_ can be one byte beyond the current page.
DCHECK_NE(top(), kNullAddress);
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
top_on_previous_step_ = top();
}
size_t bytes_since_last =
top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}
// -----------------------------------------------------------------------------
// NewSpace
......
......@@ -49,55 +49,6 @@ namespace internal {
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
// ----------------------------------------------------------------------------
// PagedSpaceObjectIterator
PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
PagedSpace* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
PagedSpace* space,
Page* page)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
page_range_(page),
current_page_(page_range_.begin()) {
heap->mark_compact_collector()->EnsureSweepingCompleted();
#ifdef DEBUG
AllocationSpace owner = page->owner_identity();
DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
owner == CODE_SPACE);
#endif // DEBUG
}
PagedSpaceObjectIterator::PagedSpaceObjectIterator(OffThreadSpace* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {}
// We have hit the end of the page and should advance to the next block of
// objects. This happens at the end of the page.
bool PagedSpaceObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
return true;
}
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
......@@ -468,82 +419,6 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
auto result = code_object_registry_newly_allocated_.insert(code);
USE(result);
DCHECK(result.second);
}
void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
code_object_registry_already_existing_.push_back(code);
}
void CodeObjectRegistry::Clear() {
code_object_registry_already_existing_.clear();
code_object_registry_newly_allocated_.clear();
}
void CodeObjectRegistry::Finalize() {
code_object_registry_already_existing_.shrink_to_fit();
}
bool CodeObjectRegistry::Contains(Address object) const {
return (code_object_registry_newly_allocated_.find(object) !=
code_object_registry_newly_allocated_.end()) ||
(std::binary_search(code_object_registry_already_existing_.begin(),
code_object_registry_already_existing_.end(),
object));
}
Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
Address address) const {
// Let's first find the object which comes right before address in the vector
// of already existing code objects.
Address already_existing_set_ = 0;
Address newly_allocated_set_ = 0;
if (!code_object_registry_already_existing_.empty()) {
auto it =
std::upper_bound(code_object_registry_already_existing_.begin(),
code_object_registry_already_existing_.end(), address);
if (it != code_object_registry_already_existing_.begin()) {
already_existing_set_ = *(--it);
}
}
// Next, let's find the object which comes right before address in the set
// of newly allocated code objects.
if (!code_object_registry_newly_allocated_.empty()) {
auto it = code_object_registry_newly_allocated_.upper_bound(address);
if (it != code_object_registry_newly_allocated_.begin()) {
newly_allocated_set_ = *(--it);
}
}
// The code objects which contains address has to be in one of the two
// data structures.
DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
// The address which is closest to the given address is the code object.
return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
: newly_allocated_set_;
}
Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk);
DCHECK_EQ(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocationStatistics();
page->SetOldGenerationPageFlags(!is_off_thread_space() &&
heap()->incremental_marking()->IsMarking());
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
page->list_node().Initialize();
page->InitializationMemoryFence();
return page;
}
Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
bool in_to_space = (id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
......@@ -1160,297 +1035,6 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
return next_step;
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable, FreeList* free_list,
LocalSpaceKind local_space_kind)
: SpaceWithLinearArea(heap, space, free_list),
executable_(executable),
local_space_kind_(local_space_kind) {
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
accounting_stats_.Clear();
}
void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
}
accounting_stats_.Clear();
}
void PagedSpace::RefillFreeList() {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
identity() != MAP_SPACE && identity() != RO_SPACE) {
return;
}
DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
// We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
// entries here to make them unavailable for allocations.
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
p->ForAllFreeListCategories([this](FreeListCategory* category) {
category->Reset(free_list());
});
}
// Also merge old-to-new remembered sets if not scavenging because of
// data races: One thread might iterate remembered set, while another
// thread merges them.
if (local_space_kind() != LocalSpaceKind::kCompactionSpaceForScavenge) {
p->MergeOldToNewRememberedSets();
}
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
if (is_compaction_space()) {
DCHECK_NE(this, p->owner());
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
base::MutexGuard guard(owner->mutex());
owner->RefineAllocatedBytesAfterSweeping(p);
owner->RemovePage(p);
added += AddPage(p);
} else {
base::MutexGuard guard(mutex());
DCHECK_EQ(this, p->owner());
RefineAllocatedBytesAfterSweeping(p);
added += RelinkFreeListCategories(p);
}
added += p->wasted_memory();
if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
}
}
}
void OffThreadSpace::RefillFreeList() {
// We should never try to refill the free list in off-thread space, because
// we know it will always be fully linear.
UNREACHABLE();
}
void PagedSpace::MergeLocalSpace(LocalSpace* other) {
base::MutexGuard guard(mutex());
DCHECK(identity() == other->identity());
// Unmerged fields:
// area_size_
other->FreeLinearAllocationArea();
for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
allocations_origins_[i] += other->allocations_origins_[i];
}
// The linear allocation area of {other} should be destroyed now.
DCHECK_EQ(kNullAddress, other->top());
DCHECK_EQ(kNullAddress, other->limit());
bool merging_from_off_thread = other->is_off_thread_space();
// Move over pages.
for (auto it = other->begin(); it != other->end();) {
Page* p = *(it++);
if (merging_from_off_thread) {
DCHECK_NULL(p->sweeping_slot_set());
p->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
if (heap()->incremental_marking()->black_allocation()) {
p->CreateBlackArea(p->area_start(), p->HighWaterMark());
}
} else {
p->MergeOldToNewRememberedSets();
}
// Ensure that pages are initialized before objects on it are discovered by
// concurrent markers.
p->InitializationMemoryFence();
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
// These code pages were allocated by the CompactionSpace.
if (identity() == CODE_SPACE) heap()->isolate()->AddCodeMemoryChunk(p);
DCHECK_IMPLIES(
!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
// TODO(leszeks): Here we should allocation step, but:
// 1. Allocation groups are currently not handled properly by the sampling
// allocation profiler, and
// 2. Observers might try to take the space lock, which isn't reentrant.
// We'll have to come up with a better solution for allocation stepping
// before shipping, which will likely be using LocalHeap.
}
DCHECK_EQ(0u, other->Size());
DCHECK_EQ(0u, other->Capacity());
}
size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
for (Page* page : *this) {
size += page->CommittedPhysicalMemory();
}
return size;
}
bool PagedSpace::ContainsSlow(Address addr) const {
Page* p = Page::FromAddress(addr);
for (const Page* page : *this) {
if (page == p) return true;
}
return false;
}
void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
CHECK(page->SweepingDone());
auto marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
// The live_byte on the page was accounted in the space allocated
// bytes counter. After sweeping allocated_bytes() contains the
// accurate live byte count on the page.
size_t old_counter = marking_state->live_bytes(page);
size_t new_counter = page->allocated_bytes();
DCHECK_GE(old_counter, new_counter);
if (old_counter > new_counter) {
DecreaseAllocatedBytes(old_counter - new_counter, page);
// Give the heap a chance to adjust counters in response to the
// more precise and smaller old generation size.
heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
}
marking_state->SetLiveBytes(page, 0);
}
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
base::MutexGuard guard(mutex());
Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
RemovePage(page);
return page;
}
size_t PagedSpace::AddPage(Page* page) {
CHECK(page->SweepingDone());
page->set_owner(this);
memory_chunk_list_.PushBack(page);
AccountCommitted(page->size());
IncreaseCapacity(page->area_size());
IncreaseAllocatedBytes(page->allocated_bytes(), page);
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
return RelinkFreeListCategories(page);
}
void PagedSpace::RemovePage(Page* page) {
CHECK(page->SweepingDone());
memory_chunk_list_.Remove(page);
UnlinkFreeListCategories(page);
DecreaseAllocatedBytes(page->allocated_bytes(), page);
DecreaseCapacity(page->area_size());
AccountUncommitted(page->size());
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
}
size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
AccountUncommitted(unused);
return unused;
}
void PagedSpace::ResetFreeList() {
for (Page* page : *this) {
free_list_->EvictFreeListItems(page);
}
DCHECK(free_list_->IsEmpty());
}
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
FreeLinearAllocationArea();
ResetFreeList();
for (Page* page : *this) {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
ShrinkPageToHighWaterMark(page);
}
}
bool PagedSpace::Expand() {
// Always lock against the main space as we can only adjust capacity and
// pages concurrently for the main paged space.
base::MutexGuard guard(heap()->paged_space(identity())->mutex());
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
Page* page =
heap()->memory_allocator()->AllocatePage(size, this, executable());
if (page == nullptr) return false;
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
AddPage(page);
// If this is a non-compaction code space, this is a previously unseen page.
if (identity() == CODE_SPACE && !is_compaction_space()) {
heap()->isolate()->AddCodeMemoryChunk(page);
}
Free(page->area_start(), page->area_size(),
SpaceAccountingMode::kSpaceAccounted);
heap()->NotifyOldGenerationExpansion();
return true;
}
int PagedSpace::CountTotalPages() {
int count = 0;
for (Page* page : *this) {
count++;
USE(page);
}
return count;
}
void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != kNullAddress && top != limit && !is_off_thread_space() &&
heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
}
}
void PagedSpace::DecreaseLimit(Address new_limit) {
Address old_limit = limit();
DCHECK_LE(top(), new_limit);
DCHECK_GE(old_limit, new_limit);
if (new_limit != old_limit) {
SetTopAndLimit(top(), new_limit);
Free(new_limit, old_limit - new_limit,
SpaceAccountingMode::kSpaceAccounted);
if (heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
old_limit);
}
}
}
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) {
DCHECK_GE(end - start, min_size);
......@@ -1485,433 +1069,6 @@ void SpaceWithLinearArea::PrintAllocationsOrigins() {
allocations_origins_[2]);
}
void PagedSpace::MarkLinearAllocationAreaBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
Address current_limit = limit();
if (current_top != kNullAddress && current_top != current_limit) {
Page::FromAllocationAreaAddress(current_top)
->CreateBlackArea(current_top, current_limit);
}
}
void PagedSpace::UnmarkLinearAllocationArea() {
Address current_top = top();
Address current_limit = limit();
if (current_top != kNullAddress && current_top != current_limit) {
Page::FromAllocationAreaAddress(current_top)
->DestroyBlackArea(current_top, current_limit);
}
}
void PagedSpace::FreeLinearAllocationArea() {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
Address current_top = top();
Address current_limit = limit();
if (current_top == kNullAddress) {
DCHECK_EQ(kNullAddress, current_limit);
return;
}
if (!is_off_thread_space() &&
heap()->incremental_marking()->black_allocation()) {
Page* page = Page::FromAllocationAreaAddress(current_top);
// Clear the bits in the unused black area.
if (current_top != current_limit) {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
marking_state->bitmap(page)->ClearRange(
page->AddressToMarkbitIndex(current_top),
page->AddressToMarkbitIndex(current_limit));
marking_state->IncrementLiveBytes(
page, -static_cast<int>(current_limit - current_top));
}
}
InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
SetTopAndLimit(kNullAddress, kNullAddress);
DCHECK_GE(current_limit, current_top);
// The code page of the linear allocation area needs to be unprotected
// because we are going to write a filler into that memory area below.
if (identity() == CODE_SPACE) {
heap()->UnprotectAndRegisterMemoryChunk(
MemoryChunk::FromAddress(current_top));
}
Free(current_top, current_limit - current_top,
SpaceAccountingMode::kSpaceAccounted);
}
void PagedSpace::ReleasePage(Page* page) {
DCHECK_EQ(
0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
page));
DCHECK_EQ(page->owner(), this);
free_list_->EvictFreeListItems(page);
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
allocation_info_.Reset(kNullAddress, kNullAddress);
}
heap()->isolate()->RemoveCodeMemoryChunk(page);
AccountUncommitted(page->size());
accounting_stats_.DecreaseCapacity(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
}
void PagedSpace::SetReadable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadable();
}
}
void PagedSpace::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
}
void PagedSpace::SetReadAndWritable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndWritable();
}
}
std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(
new PagedSpaceObjectIterator(heap, this));
}
bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin) {
DCHECK(IsAligned(size_in_bytes, kTaggedSize));
DCHECK_LE(top(), limit());
#ifdef DEBUG
if (top() != limit()) {
DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
}
#endif
// Don't free list allocate if there is linear space available.
DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
FreeLinearAllocationArea();
if (!is_local_space()) {
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
}
size_t new_node_size = 0;
FreeSpace new_node =
free_list_->Allocate(size_in_bytes, &new_node_size, origin);
if (new_node.is_null()) return false;
DCHECK_GE(new_node_size, size_in_bytes);
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
Address limit = ComputeLimit(start, end, size_in_bytes);
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
if (identity() == CODE_SPACE) {
heap()->UnprotectAndRegisterMemoryChunk(page);
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
return true;
}
base::Optional<std::pair<Address, size_t>>
PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!is_local_space() && identity() == OLD_SPACE);
DCHECK_EQ(origin, AllocationOrigin::kRuntime);
base::MutexGuard lock(&allocation_mutex_);
auto result = TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
RefillFreeList();
// Retry the free list allocation.
auto result = TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
Sweeper::FreeSpaceMayContainInvalidatedSlots
invalidated_slots_in_free_space =
Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
const int kMaxPagesToSweep = 1;
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
invalidated_slots_in_free_space);
RefillFreeList();
if (static_cast<size_t>(max_freed) >= min_size_in_bytes)
return TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
Expand()) {
DCHECK((CountTotalPages() > 1) ||
(min_size_in_bytes <= free_list_->Available()));
return TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
// TODO(dinfuehr): Complete sweeping here and try allocation again.
return {};
}
base::Optional<std::pair<Address, size_t>>
PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
DCHECK_EQ(identity(), OLD_SPACE);
size_t new_node_size = 0;
FreeSpace new_node =
free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
if (new_node.is_null()) return {};
DCHECK_GE(new_node_size, min_size_in_bytes);
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
Address limit = new_node.address() + used_size_in_bytes;
DCHECK_LE(limit, end);
DCHECK_LE(min_size_in_bytes, limit - start);
if (limit != end) {
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
return std::make_pair(start, used_size_in_bytes);
}
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
size_t external_space_bytes[kNumTypes];
size_t external_page_bytes[kNumTypes];
for (int i = 0; i < kNumTypes; i++) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
for (Page* page : *this) {
#ifdef V8_SHARED_RO_HEAP
if (identity() == RO_SPACE) {
CHECK_NULL(page->owner());
} else {
CHECK_EQ(page->owner(), this);
}
#else
CHECK_EQ(page->owner(), this);
#endif
for (int i = 0; i < kNumTypes; i++) {
external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->SweepingDone());
PagedSpaceObjectIterator it(isolate->heap(), this, page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
CHECK(end_of_previous_object <= object.address());
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map map = object.map();
CHECK(map.IsMap());
CHECK(ReadOnlyHeap::Contains(map) ||
isolate->heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
// The object itself should look OK.
object.ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
isolate->heap()->VerifyRememberedSetFor(object);
}
// All the interior pointers should be contained in the heap.
int size = object.Size();
object.IterateBody(map, size, visitor);
CHECK(object.address() + size <= top);
end_of_previous_object = object.address() + size;
if (object.IsExternalString()) {
ExternalString external_string = ExternalString::cast(object);
size_t size = external_string.ExternalPayloadSize();
external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size =
ArrayBufferTracker::Lookup(isolate->heap(), array_buffer)
->PerIsolateAccountingLength();
external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
external_space_bytes[t] += external_page_bytes[t];
}
}
for (int i = 0; i < kNumTypes; i++) {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
i == ExternalBackingStoreType::kArrayBuffer)
continue;
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
}
CHECK(allocation_pointer_found_in_space);
if (identity() == OLD_SPACE && V8_ARRAY_BUFFER_EXTENSION_BOOL) {
size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
CHECK_EQ(bytes,
ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
}
#ifdef DEBUG
VerifyCountersAfterSweeping(isolate->heap());
#endif
}
void PagedSpace::VerifyLiveBytes() {
DCHECK_NE(identity(), RO_SPACE);
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->SweepingDone());
PagedSpaceObjectIterator it(heap(), this, page);
int black_size = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
black_size += object.Size();
}
}
CHECK_LE(black_size, marking_state->live_bytes(page));
}
}
#endif // VERIFY_HEAP
#ifdef DEBUG
void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
size_t total_capacity = 0;
size_t total_allocated = 0;
for (Page* page : *this) {
DCHECK(page->SweepingDone());
total_capacity += page->area_size();
PagedSpaceObjectIterator it(heap, this, page);
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object.IsFreeSpaceOrFiller()) {
real_allocated += object.Size();
}
}
total_allocated += page->allocated_bytes();
// The real size can be smaller than the accounted size if array trimming,
// object slack tracking happened after sweeping.
DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
}
DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
DCHECK_EQ(total_allocated, accounting_stats_.Size());
}
void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
// We need to refine the counters on pages that are already swept and have
// not been moved over to the actual space. Otherwise, the AccountingStats
// are just an over approximation.
RefillFreeList();
size_t total_capacity = 0;
size_t total_allocated = 0;
auto marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (Page* page : *this) {
size_t page_allocated =
page->SweepingDone()
? page->allocated_bytes()
: static_cast<size_t>(marking_state->live_bytes(page));
total_capacity += page->area_size();
total_allocated += page_allocated;
DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
}
DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
DCHECK_EQ(total_allocated, accounting_stats_.Size());
}
#endif
// -----------------------------------------------------------------------------
// NewSpace implementation
......@@ -2120,12 +1277,6 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
Address new_limit = ComputeLimit(top(), limit(), min_size);
DCHECK_LE(new_limit, limit());
DecreaseLimit(new_limit);
}
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!OldSpace::IsAtPageStart(top));
......@@ -2147,13 +1298,11 @@ bool NewSpace::AddFreshPage() {
return true;
}
bool NewSpace::AddFreshPageSynchronized() {
base::MutexGuard guard(&mutex_);
return AddFreshPage();
}
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
......@@ -3353,199 +2502,5 @@ size_t FreeList::SumFreeLists() {
}
#endif
// -----------------------------------------------------------------------------
// OldSpace implementation
void PagedSpace::PrepareForMarkCompact() {
// We don't have a linear allocation area while sweeping. It will be restored
// on the first allocation after the sweep.
FreeLinearAllocationArea();
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_->Reset();
}
size_t PagedSpace::SizeOfObjects() {
CHECK_GE(limit(), top());
DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
return Size() - (limit() - top());
}
bool PagedSpace::EnsureSweptAndRetryAllocation(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!is_local_space());
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
collector->EnsureSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
// entries.
return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
}
return false;
}
bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
base::Optional<base::MutexGuard> optional_mutex;
if (FLAG_concurrent_allocation && origin != AllocationOrigin::kGC &&
identity() == OLD_SPACE) {
optional_mutex.emplace(&allocation_mutex_);
}
return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
}
bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
}
bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
return true;
if (Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin);
}
return false;
}
bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
// Non-compaction local spaces are not supported.
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
return true;
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
if (FLAG_concurrent_sweeping && !is_compaction_space() &&
!collector->sweeper()->AreSweeperTasksRunning()) {
collector->EnsureSweepingCompleted();
}
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
RefillFreeList();
// Retry the free list allocation.
if (RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin))
return true;
if (SweepAndRetryAllocation(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
origin))
return true;
}
if (is_compaction_space()) {
// The main thread may have acquired all swept pages. Try to steal from
// it. This can only happen during young generation evacuation.
PagedSpace* main_space = heap()->paged_space(identity());
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
if (RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin))
return true;
}
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin);
}
if (is_compaction_space()) {
return SweepAndRetryAllocation(0, 0, size_in_bytes, origin);
} else {
// If sweeper threads are active, wait for them at that point and steal
// elements from their free-lists. Allocation may still fail here which
// would indicate that there is not enough memory for the given allocation.
return EnsureSweptAndRetryAllocation(size_in_bytes, origin);
}
}
bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
int max_pages, int size_in_bytes,
AllocationOrigin origin) {
// Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause.
Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
: Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), required_freed_bytes, max_pages,
invalidated_slots_in_free_space);
RefillFreeList();
if (max_freed >= size_in_bytes)
return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
}
return false;
}
// -----------------------------------------------------------------------------
// MapSpace implementation
// TODO(dmercadier): use a heap instead of sorting like that.
// Using a heap will have multiple benefits:
// - for now, SortFreeList is only called after sweeping, which is somewhat
// late. Using a heap, sorting could be done online: FreeListCategories would
// be inserted in a heap (ie, in a sorted manner).
// - SortFreeList is a bit fragile: any change to FreeListMap (or to
// MapSpace::free_list_) could break it.
void MapSpace::SortFreeList() {
using LiveBytesPagePair = std::pair<size_t, Page*>;
std::vector<LiveBytesPagePair> pages;
pages.reserve(CountTotalPages());
for (Page* p : *this) {
free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
pages.push_back(std::make_pair(p->allocated_bytes(), p));
}
// Sorting by least-allocated-bytes first.
std::sort(pages.begin(), pages.end(),
[](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
return a.first < b.first;
});
for (LiveBytesPagePair const& p : pages) {
// Since AddCategory inserts in head position, it reverts the order produced
// by the sort above: least-allocated-bytes will be Added first, and will
// therefore be the last element (and the first one will be
// most-allocated-bytes).
free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
}
}
#ifdef VERIFY_HEAP
void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
#endif
} // namespace internal
} // namespace v8
......@@ -18,7 +18,6 @@
#include "src/base/export-template.h"
#include "src/base/iterator.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
......@@ -539,24 +538,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
DISALLOW_COPY_AND_ASSIGN(Space);
};
// The CodeObjectRegistry holds all start addresses of code objects of a given
// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
// to the actual code object.
class V8_EXPORT_PRIVATE CodeObjectRegistry {
public:
void RegisterNewlyAllocatedCodeObject(Address code);
void RegisterAlreadyExistingCodeObject(Address code);
void Clear();
void Finalize();
bool Contains(Address code) const;
Address GetCodeObjectStartFromInnerAddress(Address address) const;
private:
std::vector<Address> code_object_registry_already_existing_;
std::set<Address> code_object_registry_newly_allocated_;
};
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
......@@ -1135,44 +1116,6 @@ class PageRange {
Page* end_;
};
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
// A PagedSpaceObjectIterator iterates objects from the bottom of the given
// space to its top or from the bottom of the given page to its top.
//
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects. The caller must create a new
// iterator in order to be sure to visit these new objects.
class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
// Creates a new object iterator in a given off-thread space.
explicit PagedSpaceObjectIterator(OffThreadSpace* space);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
// Returns nullptr when the iteration has ended.
inline HeapObject Next() override;
private:
// Fast (inlined) path of next().
inline HeapObject FromCurrentPage();
// Slow path of next(), goes into the next page. Returns false if the
// iteration has ended.
bool AdvanceToNextPage();
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
PagedSpace* space_;
PageRange page_range_;
PageRange::iterator current_page_;
};
// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
......@@ -1822,349 +1765,6 @@ class SpaceWithLinearArea : public Space {
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
class V8_EXPORT_PRIVATE PagedSpace
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
FreeList* free_list,
LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
~PagedSpace() override { TearDown(); }
// Checks whether an object/address is in this space.
inline bool Contains(Address a) const;
inline bool Contains(Object o) const;
bool ContainsSlow(Address addr) const;
// Does the space need executable memory?
Executability executable() { return executable_; }
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()).
size_t Capacity() { return accounting_stats_.Capacity(); }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
// discovered during the sweeping they are subtracted from the size and added
// to the available and wasted totals. The free list is cleared as well.
void ClearAllocatorState() {
accounting_stats_.ClearSize();
free_list_->Reset();
}
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
size_t Available() override { return free_list_->Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
// current linear allocation area (between top and limit) are also counted
// here.
size_t Size() override { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
size_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
virtual size_t Waste() { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space from a background
// thread.
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0;
heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
if (mode == SpaceAccountingMode::kSpaceAccounted) {
return AccountedFree(start, size_in_bytes);
} else {
return UnaccountedFree(start, size_in_bytes);
}
}
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
size_t AccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
size_t UnaccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
inline bool TryFreeLast(HeapObject object, int object_size);
void ResetFreeList();
// Empty space linear allocation area, returning unused area to free list.
void FreeLinearAllocationArea();
void MarkLinearAllocationAreaBlack();
void UnmarkLinearAllocationArea();
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.DecreaseAllocatedBytes(bytes, page);
}
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
accounting_stats_.IncreaseAllocatedBytes(bytes, page);
}
void DecreaseCapacity(size_t bytes) {
accounting_stats_.DecreaseCapacity(bytes);
}
void IncreaseCapacity(size_t bytes) {
accounting_stats_.IncreaseCapacity(bytes);
}
void RefineAllocatedBytesAfterSweeping(Page* page);
Page* InitializePage(MemoryChunk* chunk);
void ReleasePage(Page* page);
// Adds the page to this space and returns the number of bytes added to the
// free list of the space.
size_t AddPage(Page* page);
void RemovePage(Page* page);
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
void SetReadable();
void SetReadAndExecutable();
void SetReadAndWritable();
void SetDefaultCodePermissions() {
if (FLAG_jitless) {
SetReadable();
} else {
SetReadAndExecutable();
}
}
#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
void VerifyLiveBytes();
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject obj) {}
#endif
#ifdef DEBUG
void VerifyCountersAfterSweeping(Heap* heap);
void VerifyCountersBeforeConcurrentSweeping();
// Print meta info and objects in this space.
void Print() override;
// Report code object related statistics
static void ReportCodeStatistics(Isolate* isolate);
static void ResetCodeStatistics(Isolate* isolate);
#endif
bool CanExpand(size_t size);
// Returns the number of total pages in this space.
int CountTotalPages();
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return static_cast<int>(area_size_); }
bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
bool is_off_thread_space() {
return local_space_kind_ == LocalSpaceKind::kOffThreadSpace;
}
bool is_compaction_space() {
return base::IsInRange(local_space_kind_,
LocalSpaceKind::kFirstCompactionSpace,
LocalSpaceKind::kLastCompactionSpace);
}
LocalSpaceKind local_space_kind() { return local_space_kind_; }
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeLocalSpace(LocalSpace* other);
// Refills the free list from the corresponding free list filled by the
// sweeper.
virtual void RefillFreeList();
base::Mutex* mutex() { return &space_mutex_; }
inline void UnlinkFreeListCategories(Page* page);
inline size_t RelinkFreeListCategories(Page* page);
Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
const Page* first_page() const {
return reinterpret_cast<const Page*>(Space::first_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
const_iterator begin() const { return const_iterator(first_page()); }
const_iterator end() const { return const_iterator(nullptr); }
// Shrink immortal immovable pages of the space to be exactly the size needed
// using the high water mark.
void ShrinkImmortalImmovablePages();
size_t ShrinkPageToHighWaterMark(Page* page);
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
void SetLinearAllocationArea(Address top, Address limit);
private:
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(top, limit);
}
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsInlineAllocation() override {
return identity() == OLD_SPACE && !is_local_space();
}
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
bool HasPages() { return first_page() != nullptr; }
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
void TearDown();
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
bool Expand();
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
inline bool EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline HeapObject AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
// has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
int max_pages,
int size_in_bytes,
AllocationOrigin origin);
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
Executability executable_;
LocalSpaceKind local_space_kind_;
size_t area_size_;
// Accounting information for this space.
AllocationStats accounting_stats_;
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
// Mutex guarding concurrent allocation.
base::Mutex allocation_mutex_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
// Used in cctest.
friend class heap::HeapTester;
};
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
// -----------------------------------------------------------------------------
......@@ -2642,180 +2242,6 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
};
// -----------------------------------------------------------------------------
// Base class for compaction space and off-thread space.
class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
public:
LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
LocalSpaceKind local_space_kind)
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
local_space_kind) {
DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
}
protected:
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
};
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
LocalSpaceKind local_space_kind)
: LocalSpace(heap, id, executable, local_space_kind) {
DCHECK(is_compaction_space());
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
};
// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap,
LocalSpaceKind local_space_kind)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
local_space_kind),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
local_space_kind) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
case OLD_SPACE:
return &old_space_;
case CODE_SPACE:
return &code_space_;
default:
UNREACHABLE();
}
UNREACHABLE();
}
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
};
// -----------------------------------------------------------------------------
// Old generation regular object space.
class OldSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit OldSpace(Heap* heap)
: PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
FreeList::CreateFreeList()) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
MemoryChunkLayout::ObjectStartOffsetInDataPage();
}
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
type == ExternalBackingStoreType::kArrayBuffer)
return heap()->OldArrayBufferBytes();
return external_backing_store_bytes_[type];
}
};
// -----------------------------------------------------------------------------
// Old generation code object space.
class CodeSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit CodeSpace(Heap* heap)
: PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
};
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
SLOW_DCHECK((space).page_low() <= (info).top() && \
(info).top() <= (space).page_high() && \
(info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------
// Old space for all map objects
class MapSpace : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
: PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
return (size / Map::kSize) * Map::kSize;
}
}
void SortFreeList();
#ifdef VERIFY_HEAP
void VerifyObject(HeapObject obj) override;
#endif
};
// -----------------------------------------------------------------------------
// Off-thread space that is used for folded allocation on a different thread.
class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
public:
explicit OffThreadSpace(Heap* heap)
: LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
LocalSpaceKind::kOffThreadSpace) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
// OffThreadSpace doesn't work with third-party heap.
UNREACHABLE();
#endif
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
void RefillFreeList() override;
};
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator {
public:
inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
// Return nullptr when the iterator is done.
inline MemoryChunk* next();
private:
enum State {
kOldSpaceState,
kMapState,
kCodeState,
kLargeObjectState,
kCodeLargeObjectState,
kFinishedState
};
Heap* heap_;
State state_;
PageIterator old_iterator_;
PageIterator code_iterator_;
PageIterator map_iterator_;
LargePageIterator lo_iterator_;
LargePageIterator code_lo_iterator_;
};
} // namespace internal
} // namespace v8
......
......@@ -6,6 +6,7 @@
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
......
......@@ -6,10 +6,10 @@
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"
......
......@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/spaces.h"
#include "src/heap/code-object-registry.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment