Commit a3d5ad83 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Split out new-spaces.h

Splits out all of SemiSpace, NewSpaces and related classes into
paged-spaces.h.

Bug: v8:10473, v8:10506
Change-Id: I97ecceaf5df41263cc8ea75ff0018442bfeffa66
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2202903
Auto-Submit: Dan Elphick <delphick@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67831}
parent f81996e2
......@@ -2466,6 +2466,9 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/memory-measurement.h",
"src/heap/memory-reducer.cc",
"src/heap/memory-reducer.h",
"src/heap/new-spaces-inl.h",
"src/heap/new-spaces.cc",
"src/heap/new-spaces.h",
"src/heap/object-stats.cc",
"src/heap/object-stats.h",
"src/heap/objects-visiting-inl.h",
......
......@@ -25,6 +25,7 @@
#include "src/execution/isolate.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/new-spaces-inl.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
......
......@@ -7,6 +7,7 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/new-spaces.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_NEW_SPACES_INL_H_
#define V8_HEAP_NEW_SPACES_INL_H_
#include "src/heap/new-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/tagged-impl.h"
#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// SemiSpace
bool SemiSpace::Contains(HeapObject o) const {
MemoryChunk* memory_chunk = MemoryChunk::FromHeapObject(o);
if (memory_chunk->IsLargePage()) return false;
return id_ == kToSpace ? memory_chunk->IsToPage()
: memory_chunk->IsFromPage();
}
bool SemiSpace::Contains(Object o) const {
return o.IsHeapObject() && Contains(HeapObject::cast(o));
}
bool SemiSpace::ContainsSlow(Address a) const {
for (const Page* p : *this) {
if (p == MemoryChunk::FromAddress(a)) return true;
}
return false;
}
// --------------------------------------------------------------------------
// NewSpace
bool NewSpace::Contains(Object o) const {
return o.IsHeapObject() && Contains(HeapObject::cast(o));
}
bool NewSpace::Contains(HeapObject o) const {
return MemoryChunk::FromHeapObject(o)->InNewSpace();
}
bool NewSpace::ContainsSlow(Address a) const {
return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
}
bool NewSpace::ToSpaceContainsSlow(Address a) const {
return to_space_.ContainsSlow(a);
}
bool NewSpace::ToSpaceContains(Object o) const { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object o) const {
return from_space_.Contains(o);
}
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator
HeapObject SemiSpaceObjectIterator::Next() {
while (current_ != limit_) {
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page();
DCHECK(page);
current_ = page->area_start();
if (current_ == limit_) return HeapObject();
}
HeapObject object = HeapObject::FromAddress(current_);
current_ += object.Size();
if (!object.IsFreeSpaceOrFiller()) {
return object;
}
}
return HeapObject();
}
// -----------------------------------------------------------------------------
// NewSpace
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (allocation_info_.limit() - top <
static_cast<uintptr_t>(aligned_size_in_bytes)) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, alignment)) {
return AllocationResult::Retry();
}
top = allocation_info_.top();
filler_size = Heap::GetFillToAlign(top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return obj;
}
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
return AllocationResult::Retry();
}
top = allocation_info_.top();
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return obj;
}
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_));
top_on_previous_step_ = top();
}
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
return AllocateRawUnaligned(size_in_bytes, origin);
#endif
}
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment, origin);
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_NEW_SPACES_INL_H_
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/new-spaces.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces-inl.h"
namespace v8 {
namespace internal {
Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
bool in_to_space = (id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
Page* page = static_cast<Page*>(chunk);
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateLocalTracker();
page->list_node().Initialize();
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
->minor_mark_compact_collector()
->non_atomic_marking_state()
->ClearLiveness(page);
}
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
return page;
}
bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) {
const int expected_pages =
static_cast<int>(current_capacity_ / Page::kPageSize);
MemoryChunk* current_page = first_page();
int actual_pages = 0;
// First iterate through the pages list until expected pages if so many
// pages exist.
while (current_page != nullptr && actual_pages < expected_pages) {
actual_pages++;
current_page = current_page->list_node().next();
}
// Free all overallocated pages which are behind current_page.
while (current_page) {
MemoryChunk* next_current = current_page->list_node().next();
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
current_page);
current_page = next_current;
}
// Add more pages if we have less than expected_pages.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
while (actual_pages < expected_pages) {
actual_pages++;
current_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
current_page->SetFlags(first_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
}
}
return true;
}
// -----------------------------------------------------------------------------
// SemiSpace implementation
void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
current_capacity_ = minimum_capacity_;
maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
committed_ = false;
}
void SemiSpace::TearDown() {
// Properly uncommit memory to keep the allocator counters in sync.
if (is_committed()) {
Uncommit();
}
current_capacity_ = maximum_capacity_ = 0;
}
bool SemiSpace::Commit() {
DCHECK(!is_committed());
const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
// Pages in the new spaces can be moved to the old space by the full
// collector. Therefore, they must be initialized with the same FreeList as
// old pages.
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
}
memory_chunk_list_.PushBack(new_page);
}
Reset();
AccountCommitted(current_capacity_);
if (age_mark_ == kNullAddress) {
age_mark_ = first_page()->area_start();
}
committed_ = true;
return true;
}
bool SemiSpace::Uncommit() {
DCHECK(is_committed());
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
}
current_page_ = nullptr;
AccountUncommitted(current_capacity_);
committed_ = false;
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
return true;
}
size_t SemiSpace::CommittedPhysicalMemory() {
if (!is_committed()) return 0;
size_t size = 0;
for (Page* p : *this) {
size += p->CommittedPhysicalMemory();
}
return size;
}
bool SemiSpace::GrowTo(size_t new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
DCHECK(last_page());
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
}
memory_chunk_list_.PushBack(new_page);
marking_state->ClearLiveness(new_page);
// Duplicate the flags that was set on the old page.
new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
}
AccountCommitted(delta);
current_capacity_ = new_capacity;
return true;
}
void SemiSpace::RewindPages(int num_pages) {
DCHECK_GT(num_pages, 0);
DCHECK(last_page());
while (num_pages > 0) {
MemoryChunk* last = last_page();
memory_chunk_list_.Remove(last);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
num_pages--;
}
}
bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
DCHECK_GE(new_capacity, minimum_capacity_);
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, Page::kPageSize));
int delta_pages = static_cast<int>(delta / Page::kPageSize);
RewindPages(delta_pages);
AccountUncommitted(delta);
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
current_capacity_ = new_capacity;
return true;
}
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
for (Page* page : *this) {
page->set_owner(this);
page->SetFlags(flags, mask);
if (id_ == kToSpace) {
page->ClearFlag(MemoryChunk::FROM_PAGE);
page->SetFlag(MemoryChunk::TO_PAGE);
page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
page, 0);
} else {
page->SetFlag(MemoryChunk::FROM_PAGE);
page->ClearFlag(MemoryChunk::TO_PAGE);
}
DCHECK(page->InYoungGeneration());
}
}
void SemiSpace::Reset() {
DCHECK(first_page());
DCHECK(last_page());
current_page_ = first_page();
pages_used_ = 0;
}
void SemiSpace::RemovePage(Page* page) {
if (current_page_ == page) {
if (page->prev_page()) {
current_page_ = page->prev_page();
}
}
memory_chunk_list_.Remove(page);
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
}
void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
page->set_owner(this);
memory_chunk_list_.PushFront(page);
pages_used_++;
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them.
DCHECK(from->first_page());
DCHECK(to->first_page());
intptr_t saved_to_space_flags = to->current_page()->GetFlags();
// We swap all properties but id_.
std::swap(from->current_capacity_, to->current_capacity_);
std::swap(from->maximum_capacity_, to->maximum_capacity_);
std::swap(from->minimum_capacity_, to->minimum_capacity_);
std::swap(from->age_mark_, to->age_mark_);
std::swap(from->committed_, to->committed_);
std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
std::swap(from->current_page_, to->current_page_);
std::swap(from->external_backing_store_bytes_,
to->external_backing_store_bytes_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
from->FixPagesFlags(0, 0);
}
void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
// Mark all pages up to the one containing mark.
for (Page* p : PageRange(space_start(), mark)) {
p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
}
}
std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
// Use the NewSpace::NewObjectIterator to iterate the ToSpace.
UNREACHABLE();
}
#ifdef DEBUG
void SemiSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace);
size_t external_backing_store_bytes[kNumTypes];
for (int i = 0; i < kNumTypes; i++) {
external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
for (Page* page : *this) {
CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
: MemoryChunk::TO_PAGE));
CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
: MemoryChunk::FROM_PAGE));
CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
if (!is_from_space) {
// The pointers-from-here-are-interesting flag isn't updated dynamically
// on from-space pages, so it might be out of sync with the marking state.
if (page->heap()->incremental_marking()->IsMarking()) {
CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
} else {
CHECK(
!page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
}
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
}
CHECK_IMPLIES(page->list_node().prev(),
page->list_node().prev()->list_node().next() == page);
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
}
}
#endif
#ifdef DEBUG
void SemiSpace::AssertValidRange(Address start, Address end) {
// Addresses belong to same semi-space
Page* page = Page::FromAllocationAreaAddress(start);
Page* end_page = Page::FromAllocationAreaAddress(end);
SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
DCHECK_EQ(space, end_page->owner());
// Start address is before end address, either on same page,
// or end address is on a later page in the linked list of
// semi-space pages.
if (page == end_page) {
DCHECK_LE(start, end);
} else {
while (page != end_page) {
page = page->next_page();
}
DCHECK(page);
}
}
#endif
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator implementation.
SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
Initialize(space->first_allocatable_address(), space->top());
}
void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
SemiSpace::AssertValidRange(start, end);
current_ = start;
limit_ = end;
}
size_t NewSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
size += from_space_.CommittedPhysicalMemory();
}
return size;
}
// -----------------------------------------------------------------------------
// NewSpace implementation
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity,
size_t max_semispace_capacity)
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
if (!to_space_.Commit()) {
V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
}
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
ResetLinearAllocationArea();
}
void NewSpace::TearDown() {
allocation_info_.Reset(kNullAddress, kNullAddress);
to_space_.TearDown();
from_space_.TearDown();
}
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
// Double the semispace size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity());
size_t new_capacity =
Min(MaximumCapacity(),
static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
if (to_space_.GrowTo(new_capacity)) {
// Only grow from space if we managed to grow to-space.
if (!from_space_.GrowTo(new_capacity)) {
// If we managed to grow to-space but couldn't grow from-space,
// attempt to shrink to-space.
if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
FATAL("inconsistent state");
}
}
}
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::Shrink() {
size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
if (rounded_new_capacity < TotalCapacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
// Only shrink from-space if we managed to shrink to-space.
from_space_.Reset();
if (!from_space_.ShrinkTo(rounded_new_capacity)) {
// If we managed to shrink to-space but couldn't shrink from
// space, attempt to grow to-space again.
if (!to_space_.GrowTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
FATAL("inconsistent state");
}
}
}
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
bool NewSpace::Rebalance() {
// Order here is important to make use of the page pool.
return to_space_.EnsureCurrentCapacity() &&
from_space_.EnsureCurrentCapacity();
}
void NewSpace::UpdateLinearAllocationArea() {
// Make sure there is no unaccounted allocations.
DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
// The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run.
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::ResetLinearAllocationArea() {
// Do a step to account for memory allocated so far before resetting.
InlineAllocationStep(top(), top(), kNullAddress, 0);
to_space_.Reset();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (Page* p : to_space_) {
marking_state->ClearLiveness(p);
// Concurrent marking may have local live bytes for this page.
heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
allocation_info_.set_limit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!OldSpace::IsAtPageStart(top));
// Do a step to account for memory allocated on previous page.
InlineAllocationStep(top, top, kNullAddress, 0);
if (!to_space_.AdvancePage()) {
// No more pages left to advance.
return false;
}
// Clear remainder of current page.
Address limit = Page::FromAllocationAreaAddress(top)->area_end();
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
UpdateLinearAllocationArea();
return true;
}
bool NewSpace::AddFreshPageSynchronized() {
base::MutexGuard guard(&mutex_);
return AddFreshPage();
}
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (old_top + aligned_size_in_bytes > high) {
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
return false;
}
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
}
DCHECK(old_top + aligned_size_in_bytes <= high);
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step,
// or because idle scavenge job wants to get a chance to post a task.
// Set the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
Address soon_object = old_top + filler_size;
InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
}
return true;
}
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify(Isolate* isolate) {
// The allocation pointer should be in the space or at the very end.
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
// There should be objects packed in from the low address up to the
// allocation pointer.
Address current = to_space_.first_page()->area_start();
CHECK_EQ(current, to_space_.space_start());
size_t external_space_bytes[kNumTypes];
for (int i = 0; i < kNumTypes; i++) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
while (current != top()) {
if (!Page::IsAlignedToPageSize(current)) {
// The allocation pointer should not be in the middle of an object.
CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
current < top());
HeapObject object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map.
CHECK(!object.IsMap());
CHECK(!object.IsAbstractCode());
// The object itself should look OK.
object.ObjectVerify(isolate);
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor(heap());
int size = object.Size();
object.IterateBody(map, size, &visitor);
if (object.IsExternalString()) {
ExternalString external_string = ExternalString::cast(object);
size_t size = external_string.ExternalPayloadSize();
external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size = ArrayBufferTracker::Lookup(heap(), array_buffer)
->PerIsolateAccountingLength();
external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
current += size;
} else {
// At end of page, switch to next page.
Page* page = Page::FromAllocationAreaAddress(current)->next_page();
current = page->area_start();
}
}
for (int i = 0; i < kNumTypes; i++) {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
i == ExternalBackingStoreType::kArrayBuffer)
continue;
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
}
if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
CHECK_EQ(bytes,
ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
}
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
CHECK_EQ(to_space_.id(), kToSpace);
from_space_.Verify();
to_space_.Verify();
}
#endif
} // namespace internal
} // namespace v8
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_NEW_SPACES_H_
#define V8_HEAP_NEW_SPACES_H_
#include <atomic>
#include <memory>
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/logging/log.h"
#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
class Heap;
class MemoryChunk;
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
// The mark-compact collector uses the memory of the first page in the from
// space as a marking stack when tracing live objects.
class SemiSpace : public Space {
public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
static void Swap(SemiSpace* from, SemiSpace* to);
SemiSpace(Heap* heap, SemiSpaceId semispace)
: Space(heap, NEW_SPACE, new NoFreeList()),
current_capacity_(0),
maximum_capacity_(0),
minimum_capacity_(0),
age_mark_(kNullAddress),
committed_(false),
id_(semispace),
current_page_(nullptr),
pages_used_(0) {}
inline bool Contains(HeapObject o) const;
inline bool Contains(Object o) const;
inline bool ContainsSlow(Address a) const;
void SetUp(size_t initial_capacity, size_t maximum_capacity);
void TearDown();
bool Commit();
bool Uncommit();
bool is_committed() { return committed_; }
// Grow the semispace to the new capacity. The new capacity requested must
// be larger than the current capacity and less than the maximum capacity.
bool GrowTo(size_t new_capacity);
// Shrinks the semispace to the new capacity. The new capacity requested
// must be more than the amount of used memory in the semispace and less
// than the current capacity.
bool ShrinkTo(size_t new_capacity);
bool EnsureCurrentCapacity();
Address space_end() { return memory_chunk_list_.back()->area_end(); }
// Returns the start address of the first page of the space.
Address space_start() {
DCHECK_NE(memory_chunk_list_.front(), nullptr);
return memory_chunk_list_.front()->area_start();
}
Page* current_page() { return current_page_; }
int pages_used() { return pages_used_; }
// Returns the start address of the current page of the space.
Address page_low() { return current_page_->area_start(); }
// Returns one past the end address of the current page of the space.
Address page_high() { return current_page_->area_end(); }
bool AdvancePage() {
Page* next_page = current_page_->next_page();
// We cannot expand if we reached the maximum number of pages already. Note
// that we need to account for the next page already for this check as we
// could potentially fill the whole page after advancing.
const bool reached_max_pages = (pages_used_ + 1) == max_pages();
if (next_page == nullptr || reached_max_pages) {
return false;
}
current_page_ = next_page;
pages_used_++;
return true;
}
// Resets the space to using the first page.
void Reset();
void RemovePage(Page* page);
void PrependPage(Page* page);
Page* InitializePage(MemoryChunk* chunk);
// Age mark accessors.
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
// Returns the current capacity of the semispace.
size_t current_capacity() { return current_capacity_; }
// Returns the maximum capacity of the semispace.
size_t maximum_capacity() { return maximum_capacity_; }
// Returns the initial capacity of the semispace.
size_t minimum_capacity() { return minimum_capacity_; }
SemiSpaceId id() { return id_; }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called:
size_t Size() override { UNREACHABLE(); }
size_t SizeOfObjects() override { return Size(); }
size_t Available() override { UNREACHABLE(); }
Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
const Page* first_page() const {
return reinterpret_cast<const Page*>(Space::first_page());
}
const Page* last_page() const {
return reinterpret_cast<const Page*>(Space::last_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
const_iterator begin() const { return const_iterator(first_page()); }
const_iterator end() const { return const_iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
#ifdef DEBUG
V8_EXPORT_PRIVATE void Print() override;
// Validate a range of of addresses in a SemiSpace.
// The "from" address must be on a page prior to the "to" address,
// in the linked page order, or it must be earlier on the same page.
static void AssertValidRange(Address from, Address to);
#else
// Do nothing.
inline static void AssertValidRange(Address from, Address to) {}
#endif
#ifdef VERIFY_HEAP
virtual void Verify();
#endif
private:
void RewindPages(int num_pages);
inline int max_pages() {
return static_cast<int>(current_capacity_ / Page::kPageSize);
}
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
// The currently committed space capacity.
size_t current_capacity_;
// The maximum capacity that can be used by this space. A space cannot grow
// beyond that size.
size_t maximum_capacity_;
// The minimum capacity for the space. A space cannot shrink below this size.
size_t minimum_capacity_;
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
bool committed_;
SemiSpaceId id_;
Page* current_page_;
int pages_used_;
friend class NewSpace;
friend class SemiSpaceObjectIterator;
};
// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
// semispace of the heap's new space. It iterates over the objects in the
// semispace from a given start address (defaulting to the bottom of the
// semispace) to the top of the semispace. New objects allocated after the
// iterator is created are not iterated.
class SemiSpaceObjectIterator : public ObjectIterator {
public:
// Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceObjectIterator(NewSpace* space);
inline HeapObject Next() override;
private:
void Initialize(Address start, Address end);
// The current iteration point.
Address current_;
// The end of iteration.
Address limit_;
};
// -----------------------------------------------------------------------------
// The young generation space.
//
// The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace.
class V8_EXPORT_PRIVATE NewSpace
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity, size_t max_semispace_capacity);
~NewSpace() override { TearDown(); }
inline bool ContainsSlow(Address a) const;
inline bool Contains(Object o) const;
inline bool Contains(HeapObject o) const;
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
void TearDown();
// Flip the pair of spaces.
void Flip();
// Grow the capacity of the semispaces. Assumes that they are not at
// their maximum capacity.
void Grow();
// Shrink the capacity of the semispaces.
void Shrink();
// Return the allocated bytes in the active semispace.
size_t Size() final {
DCHECK_GE(top(), to_space_.page_low());
return to_space_.pages_used() *
MemoryChunkLayout::AllocatableMemoryInDataPage() +
static_cast<size_t>(top() - to_space_.page_low());
}
size_t SizeOfObjects() final { return Size(); }
// Return the allocatable capacity of a semispace.
size_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) *
MemoryChunkLayout::AllocatableMemoryInDataPage();
}
// Return the current size of a semispace, allocatable and non-allocatable
// memory.
size_t TotalCapacity() {
DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return to_space_.current_capacity();
}
// Committed memory for NewSpace is the committed memory of both semi-spaces
// combined.
size_t CommittedMemory() final {
return from_space_.CommittedMemory() + to_space_.CommittedMemory();
}
size_t MaximumCommittedMemory() final {
return from_space_.MaximumCommittedMemory() +
to_space_.MaximumCommittedMemory();
}
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() final;
// Return the available bytes without growing.
size_t Available() final {
DCHECK_GE(Capacity(), Size());
return Capacity() - Size();
}
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
type == ExternalBackingStoreType::kArrayBuffer)
return heap()->YoungArrayBufferBytes();
DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
return to_space_.ExternalBackingStoreBytes(type);
}
size_t ExternalBackingStoreBytes() {
size_t result = 0;
for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
result +=
ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
}
return result;
}
size_t AllocatedSinceLastGC() {
const Address age_mark = to_space_.age_mark();
DCHECK_NE(age_mark, kNullAddress);
DCHECK_NE(top(), kNullAddress);
Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
Page* const last_page = Page::FromAllocationAreaAddress(top());
Page* current_page = age_mark_page;
size_t allocated = 0;
if (current_page != last_page) {
DCHECK_EQ(current_page, age_mark_page);
DCHECK_GE(age_mark_page->area_end(), age_mark);
allocated += age_mark_page->area_end() - age_mark;
current_page = current_page->next_page();
} else {
DCHECK_GE(top(), age_mark);
return top() - age_mark;
}
while (current_page != last_page) {
DCHECK_NE(current_page, age_mark_page);
allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
current_page = current_page->next_page();
}
DCHECK_GE(top(), current_page->area_start());
allocated += top() - current_page->area_start();
DCHECK_LE(allocated, Size());
return allocated;
}
void MovePageFromSpaceToSpace(Page* page) {
DCHECK(page->IsFromPage());
from_space_.RemovePage(page);
to_space_.PrependPage(page);
}
bool Rebalance();
// Return the maximum capacity of a semispace.
size_t MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
return to_space_.maximum_capacity();
}
bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
// Returns the initial capacity of a semispace.
size_t InitialTotalCapacity() {
DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
return to_space_.minimum_capacity();
}
void ResetOriginalTop() {
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
Address original_top_acquire() {
return original_top_.load(std::memory_order_acquire);
}
Address original_limit_relaxed() {
return original_limit_.load(std::memory_order_relaxed);
}
// Return the address of the first allocatable address in the active
// semispace. This may be the address where the first object resides.
Address first_allocatable_address() { return to_space_.space_start(); }
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); }
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea();
// When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
// inline allocation every once in a while. This is done by setting
// allocation_info_.limit to be lower than the actual limit and and increasing
// it in steps to guarantee that the observers are notified periodically.
void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
inline bool ToSpaceContainsSlow(Address a) const;
inline bool ToSpaceContains(Object o) const;
inline bool FromSpaceContains(Object o) const;
// Try to switch the active semispace to a new, empty, page.
// Returns false if this isn't possible or reasonable (i.e., there
// are no pages, or the current page is already empty), or true
// if successful.
bool AddFreshPage();
bool AddFreshPageSynchronized();
#ifdef VERIFY_HEAP
// Verify the active semispace.
virtual void Verify(Isolate* isolate);
#endif
#ifdef DEBUG
// Print the active semispace.
void Print() override { to_space_.Print(); }
#endif
// Return whether the operation succeeded.
bool CommitFromSpaceIfNeeded() {
if (from_space_.is_committed()) return true;
return from_space_.Commit();
}
bool UncommitFromSpace() {
if (!from_space_.is_committed()) return true;
return from_space_.Uncommit();
}
bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
SemiSpace* active_space() { return &to_space_; }
Page* first_page() { return to_space_.first_page(); }
Page* last_page() { return to_space_.last_page(); }
iterator begin() { return to_space_.begin(); }
iterator end() { return to_space_.end(); }
const_iterator begin() const { return to_space_.begin(); }
const_iterator end() const { return to_space_.end(); }
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
SemiSpace& from_space() { return from_space_; }
SemiSpace& to_space() { return to_space_; }
private:
// Update linear allocation area to match the current to-space page.
void UpdateLinearAllocationArea();
base::Mutex mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
VirtualMemory reservation_;
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsInlineAllocation() override { return true; }
friend class SemiSpaceObjectIterator;
};
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
SLOW_DCHECK((space).page_low() <= (info).top() && \
(info).top() <= (space).page_high() && \
(info).limit() <= (space).page_high())
} // namespace internal
} // namespace v8
#endif // V8_HEAP_NEW_SPACES_H_
......@@ -22,6 +22,8 @@ namespace internal {
class Heap;
class HeapObject;
class Isolate;
class LocalSpace;
class OffThreadSpace;
class ObjectVisitor;
// -----------------------------------------------------------------------------
......@@ -500,13 +502,6 @@ class CodeSpace : public PagedSpace {
: PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
};
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
SLOW_DCHECK((space).page_low() <= (info).top() && \
(info).top() <= (space).page_high() && \
(info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------
// Old space for all map objects
......
......@@ -12,9 +12,10 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/new-spaces.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
#include "src/objects/code-inl.h"
#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
......@@ -42,27 +43,6 @@ PageRange::PageRange(Address start, Address limit)
#endif // DEBUG
}
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator
HeapObject SemiSpaceObjectIterator::Next() {
while (current_ != limit_) {
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page();
DCHECK(page);
current_ = page->area_start();
if (current_ == limit_) return HeapObject();
}
HeapObject object = HeapObject::FromAddress(current_);
current_ += object.Size();
if (!object.IsFreeSpaceOrFiller()) {
return object;
}
}
return HeapObject();
}
void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount) {
base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
......@@ -84,51 +64,6 @@ void Space::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
}
// -----------------------------------------------------------------------------
// SemiSpace
bool SemiSpace::Contains(HeapObject o) const {
MemoryChunk* memory_chunk = MemoryChunk::FromHeapObject(o);
if (memory_chunk->IsLargePage()) return false;
return id_ == kToSpace ? memory_chunk->IsToPage()
: memory_chunk->IsFromPage();
}
bool SemiSpace::Contains(Object o) const {
return o.IsHeapObject() && Contains(HeapObject::cast(o));
}
bool SemiSpace::ContainsSlow(Address a) const {
for (const Page* p : *this) {
if (p == MemoryChunk::FromAddress(a)) return true;
}
return false;
}
// --------------------------------------------------------------------------
// NewSpace
bool NewSpace::Contains(Object o) const {
return o.IsHeapObject() && Contains(HeapObject::cast(o));
}
bool NewSpace::Contains(HeapObject o) const {
return MemoryChunk::FromHeapObject(o)->InNewSpace();
}
bool NewSpace::ContainsSlow(Address a) const {
return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
}
bool NewSpace::ToSpaceContainsSlow(Address a) const {
return to_space_.ContainsSlow(a);
}
bool NewSpace::ToSpaceContains(Object o) const { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object o) const {
return from_space_.Contains(o);
}
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
......@@ -265,100 +200,6 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
// -----------------------------------------------------------------------------
// NewSpace
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (allocation_info_.limit() - top <
static_cast<uintptr_t>(aligned_size_in_bytes)) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, alignment)) {
return AllocationResult::Retry();
}
top = allocation_info_.top();
filler_size = Heap::GetFillToAlign(top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return obj;
}
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
return AllocationResult::Retry();
}
top = allocation_info_.top();
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return obj;
}
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_));
top_on_previous_step_ = top();
}
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
return AllocateRawUnaligned(size_in_bytes, origin);
#endif
}
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment, origin);
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
......
......@@ -419,26 +419,6 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
bool in_to_space = (id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
Page* page = static_cast<Page*>(chunk);
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateLocalTracker();
page->list_node().Initialize();
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
->minor_mark_compact_collector()
->non_atomic_marking_state()
->ClearLiveness(page);
}
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
return page;
}
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ =
......@@ -1069,133 +1049,6 @@ void SpaceWithLinearArea::PrintAllocationsOrigins() {
allocations_origins_[2]);
}
// -----------------------------------------------------------------------------
// NewSpace implementation
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity,
size_t max_semispace_capacity)
: SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
if (!to_space_.Commit()) {
V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
}
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
ResetLinearAllocationArea();
}
void NewSpace::TearDown() {
allocation_info_.Reset(kNullAddress, kNullAddress);
to_space_.TearDown();
from_space_.TearDown();
}
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
// Double the semispace size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity());
size_t new_capacity =
Min(MaximumCapacity(),
static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
if (to_space_.GrowTo(new_capacity)) {
// Only grow from space if we managed to grow to-space.
if (!from_space_.GrowTo(new_capacity)) {
// If we managed to grow to-space but couldn't grow from-space,
// attempt to shrink to-space.
if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
FATAL("inconsistent state");
}
}
}
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::Shrink() {
size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
if (rounded_new_capacity < TotalCapacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
// Only shrink from-space if we managed to shrink to-space.
from_space_.Reset();
if (!from_space_.ShrinkTo(rounded_new_capacity)) {
// If we managed to shrink to-space but couldn't shrink from
// space, attempt to grow to-space again.
if (!to_space_.GrowTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
FATAL("inconsistent state");
}
}
}
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
bool NewSpace::Rebalance() {
// Order here is important to make use of the page pool.
return to_space_.EnsureCurrentCapacity() &&
from_space_.EnsureCurrentCapacity();
}
bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) {
const int expected_pages =
static_cast<int>(current_capacity_ / Page::kPageSize);
MemoryChunk* current_page = first_page();
int actual_pages = 0;
// First iterate through the pages list until expected pages if so many
// pages exist.
while (current_page != nullptr && actual_pages < expected_pages) {
actual_pages++;
current_page = current_page->list_node().next();
}
// Free all overallocated pages which are behind current_page.
while (current_page) {
MemoryChunk* next_current = current_page->list_node().next();
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
current_page);
current_page = next_current;
}
// Add more pages if we have less than expected_pages.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
while (actual_pages < expected_pages) {
actual_pages++;
current_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
current_page->SetFlags(first_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
}
}
return true;
}
LinearAllocationArea LocalAllocationBuffer::CloseAndMakeIterable() {
if (IsValid()) {
MakeIterable();
......@@ -1240,102 +1093,6 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
other.allocation_info_.Reset(kNullAddress, kNullAddress);
return *this;
}
void NewSpace::UpdateLinearAllocationArea() {
// Make sure there is no unaccounted allocations.
DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
// The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run.
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::ResetLinearAllocationArea() {
// Do a step to account for memory allocated so far before resetting.
InlineAllocationStep(top(), top(), kNullAddress, 0);
to_space_.Reset();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (Page* p : to_space_) {
marking_state->ClearLiveness(p);
// Concurrent marking may have local live bytes for this page.
heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
allocation_info_.set_limit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!OldSpace::IsAtPageStart(top));
// Do a step to account for memory allocated on previous page.
InlineAllocationStep(top, top, kNullAddress, 0);
if (!to_space_.AdvancePage()) {
// No more pages left to advance.
return false;
}
// Clear remainder of current page.
Address limit = Page::FromAllocationAreaAddress(top)->area_end();
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
UpdateLinearAllocationArea();
return true;
}
bool NewSpace::AddFreshPageSynchronized() {
base::MutexGuard guard(&mutex_);
return AddFreshPage();
}
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (old_top + aligned_size_in_bytes > high) {
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
return false;
}
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
}
DCHECK(old_top + aligned_size_in_bytes <= high);
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step,
// or because idle scavenge job wants to get a chance to post a task.
// Set the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
Address soon_object = old_top + filler_size;
InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
}
return true;
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
......@@ -1402,405 +1159,6 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
}
}
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify(Isolate* isolate) {
// The allocation pointer should be in the space or at the very end.
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
// There should be objects packed in from the low address up to the
// allocation pointer.
Address current = to_space_.first_page()->area_start();
CHECK_EQ(current, to_space_.space_start());
size_t external_space_bytes[kNumTypes];
for (int i = 0; i < kNumTypes; i++) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
while (current != top()) {
if (!Page::IsAlignedToPageSize(current)) {
// The allocation pointer should not be in the middle of an object.
CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
current < top());
HeapObject object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map.
CHECK(!object.IsMap());
CHECK(!object.IsAbstractCode());
// The object itself should look OK.
object.ObjectVerify(isolate);
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor(heap());
int size = object.Size();
object.IterateBody(map, size, &visitor);
if (object.IsExternalString()) {
ExternalString external_string = ExternalString::cast(object);
size_t size = external_string.ExternalPayloadSize();
external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
} else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size = ArrayBufferTracker::Lookup(heap(), array_buffer)
->PerIsolateAccountingLength();
external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
current += size;
} else {
// At end of page, switch to next page.
Page* page = Page::FromAllocationAreaAddress(current)->next_page();
current = page->area_start();
}
}
for (int i = 0; i < kNumTypes; i++) {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
i == ExternalBackingStoreType::kArrayBuffer)
continue;
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
}
if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
CHECK_EQ(bytes,
ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
}
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
CHECK_EQ(to_space_.id(), kToSpace);
from_space_.Verify();
to_space_.Verify();
}
#endif
// -----------------------------------------------------------------------------
// SemiSpace implementation
void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
current_capacity_ = minimum_capacity_;
maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
committed_ = false;
}
void SemiSpace::TearDown() {
// Properly uncommit memory to keep the allocator counters in sync.
if (is_committed()) {
Uncommit();
}
current_capacity_ = maximum_capacity_ = 0;
}
bool SemiSpace::Commit() {
DCHECK(!is_committed());
const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
// Pages in the new spaces can be moved to the old space by the full
// collector. Therefore, they must be initialized with the same FreeList as
// old pages.
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
}
memory_chunk_list_.PushBack(new_page);
}
Reset();
AccountCommitted(current_capacity_);
if (age_mark_ == kNullAddress) {
age_mark_ = first_page()->area_start();
}
committed_ = true;
return true;
}
bool SemiSpace::Uncommit() {
DCHECK(is_committed());
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
}
current_page_ = nullptr;
AccountUncommitted(current_capacity_);
committed_ = false;
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
return true;
}
size_t SemiSpace::CommittedPhysicalMemory() {
if (!is_committed()) return 0;
size_t size = 0;
for (Page* p : *this) {
size += p->CommittedPhysicalMemory();
}
return size;
}
bool SemiSpace::GrowTo(size_t new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
DCHECK(last_page());
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
}
memory_chunk_list_.PushBack(new_page);
marking_state->ClearLiveness(new_page);
// Duplicate the flags that was set on the old page.
new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
}
AccountCommitted(delta);
current_capacity_ = new_capacity;
return true;
}
void SemiSpace::RewindPages(int num_pages) {
DCHECK_GT(num_pages, 0);
DCHECK(last_page());
while (num_pages > 0) {
MemoryChunk* last = last_page();
memory_chunk_list_.Remove(last);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
num_pages--;
}
}
bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
DCHECK_GE(new_capacity, minimum_capacity_);
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, Page::kPageSize));
int delta_pages = static_cast<int>(delta / Page::kPageSize);
RewindPages(delta_pages);
AccountUncommitted(delta);
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
current_capacity_ = new_capacity;
return true;
}
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
for (Page* page : *this) {
page->set_owner(this);
page->SetFlags(flags, mask);
if (id_ == kToSpace) {
page->ClearFlag(MemoryChunk::FROM_PAGE);
page->SetFlag(MemoryChunk::TO_PAGE);
page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
page, 0);
} else {
page->SetFlag(MemoryChunk::FROM_PAGE);
page->ClearFlag(MemoryChunk::TO_PAGE);
}
DCHECK(page->InYoungGeneration());
}
}
void SemiSpace::Reset() {
DCHECK(first_page());
DCHECK(last_page());
current_page_ = first_page();
pages_used_ = 0;
}
void SemiSpace::RemovePage(Page* page) {
if (current_page_ == page) {
if (page->prev_page()) {
current_page_ = page->prev_page();
}
}
memory_chunk_list_.Remove(page);
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
}
void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
page->set_owner(this);
memory_chunk_list_.PushFront(page);
pages_used_++;
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them.
DCHECK(from->first_page());
DCHECK(to->first_page());
intptr_t saved_to_space_flags = to->current_page()->GetFlags();
// We swap all properties but id_.
std::swap(from->current_capacity_, to->current_capacity_);
std::swap(from->maximum_capacity_, to->maximum_capacity_);
std::swap(from->minimum_capacity_, to->minimum_capacity_);
std::swap(from->age_mark_, to->age_mark_);
std::swap(from->committed_, to->committed_);
std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
std::swap(from->current_page_, to->current_page_);
std::swap(from->external_backing_store_bytes_,
to->external_backing_store_bytes_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
from->FixPagesFlags(0, 0);
}
void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
// Mark all pages up to the one containing mark.
for (Page* p : PageRange(space_start(), mark)) {
p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
}
}
std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
// Use the NewSpace::NewObjectIterator to iterate the ToSpace.
UNREACHABLE();
}
#ifdef DEBUG
void SemiSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace);
size_t external_backing_store_bytes[kNumTypes];
for (int i = 0; i < kNumTypes; i++) {
external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
for (Page* page : *this) {
CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
: MemoryChunk::TO_PAGE));
CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
: MemoryChunk::FROM_PAGE));
CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
if (!is_from_space) {
// The pointers-from-here-are-interesting flag isn't updated dynamically
// on from-space pages, so it might be out of sync with the marking state.
if (page->heap()->incremental_marking()->IsMarking()) {
CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
} else {
CHECK(
!page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
}
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
}
CHECK_IMPLIES(page->list_node().prev(),
page->list_node().prev()->list_node().next() == page);
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
}
}
#endif
#ifdef DEBUG
void SemiSpace::AssertValidRange(Address start, Address end) {
// Addresses belong to same semi-space
Page* page = Page::FromAllocationAreaAddress(start);
Page* end_page = Page::FromAllocationAreaAddress(end);
SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
DCHECK_EQ(space, end_page->owner());
// Start address is before end address, either on same page,
// or end address is on a later page in the linked list of
// semi-space pages.
if (page == end_page) {
DCHECK_LE(start, end);
} else {
while (page != end_page) {
page = page->next_page();
}
DCHECK(page);
}
}
#endif
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator implementation.
SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
Initialize(space->first_allocatable_address(), space->top());
}
void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
SemiSpace::AssertValidRange(start, end);
current_ = start;
limit_ = end;
}
size_t NewSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
size += from_space_.CommittedPhysicalMemory();
}
return size;
}
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
......
......@@ -6,8 +6,6 @@
#define V8_HEAP_SPACES_H_
#include <atomic>
#include <list>
#include <map>
#include <memory>
#include <unordered_map>
#include <unordered_set>
......@@ -46,26 +44,16 @@ class TestCodePageAllocatorScope;
} // namespace heap
class AllocationObserver;
class CompactionSpace;
class CompactionSpaceCollection;
class FreeList;
class Isolate;
class LargeObjectSpace;
class LargePage;
class LinearAllocationArea;
class LocalArrayBufferTracker;
class LocalSpace;
class MemoryAllocator;
class MemoryChunk;
class MemoryChunkLayout;
class OffThreadSpace;
class Page;
class PagedSpace;
class SemiSpace;
class SlotsBuffer;
class SlotSet;
class TypedSlotSet;
class Space;
// -----------------------------------------------------------------------------
// Heap structures:
......@@ -1765,473 +1753,6 @@ class SpaceWithLinearArea : public Space {
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
// The mark-compact collector uses the memory of the first page in the from
// space as a marking stack when tracing live objects.
class SemiSpace : public Space {
public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
static void Swap(SemiSpace* from, SemiSpace* to);
SemiSpace(Heap* heap, SemiSpaceId semispace)
: Space(heap, NEW_SPACE, new NoFreeList()),
current_capacity_(0),
maximum_capacity_(0),
minimum_capacity_(0),
age_mark_(kNullAddress),
committed_(false),
id_(semispace),
current_page_(nullptr),
pages_used_(0) {}
inline bool Contains(HeapObject o) const;
inline bool Contains(Object o) const;
inline bool ContainsSlow(Address a) const;
void SetUp(size_t initial_capacity, size_t maximum_capacity);
void TearDown();
bool Commit();
bool Uncommit();
bool is_committed() { return committed_; }
// Grow the semispace to the new capacity. The new capacity requested must
// be larger than the current capacity and less than the maximum capacity.
bool GrowTo(size_t new_capacity);
// Shrinks the semispace to the new capacity. The new capacity requested
// must be more than the amount of used memory in the semispace and less
// than the current capacity.
bool ShrinkTo(size_t new_capacity);
bool EnsureCurrentCapacity();
Address space_end() { return memory_chunk_list_.back()->area_end(); }
// Returns the start address of the first page of the space.
Address space_start() {
DCHECK_NE(memory_chunk_list_.front(), nullptr);
return memory_chunk_list_.front()->area_start();
}
Page* current_page() { return current_page_; }
int pages_used() { return pages_used_; }
// Returns the start address of the current page of the space.
Address page_low() { return current_page_->area_start(); }
// Returns one past the end address of the current page of the space.
Address page_high() { return current_page_->area_end(); }
bool AdvancePage() {
Page* next_page = current_page_->next_page();
// We cannot expand if we reached the maximum number of pages already. Note
// that we need to account for the next page already for this check as we
// could potentially fill the whole page after advancing.
const bool reached_max_pages = (pages_used_ + 1) == max_pages();
if (next_page == nullptr || reached_max_pages) {
return false;
}
current_page_ = next_page;
pages_used_++;
return true;
}
// Resets the space to using the first page.
void Reset();
void RemovePage(Page* page);
void PrependPage(Page* page);
Page* InitializePage(MemoryChunk* chunk);
// Age mark accessors.
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
// Returns the current capacity of the semispace.
size_t current_capacity() { return current_capacity_; }
// Returns the maximum capacity of the semispace.
size_t maximum_capacity() { return maximum_capacity_; }
// Returns the initial capacity of the semispace.
size_t minimum_capacity() { return minimum_capacity_; }
SemiSpaceId id() { return id_; }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called:
size_t Size() override { UNREACHABLE(); }
size_t SizeOfObjects() override { return Size(); }
size_t Available() override { UNREACHABLE(); }
Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
const Page* first_page() const {
return reinterpret_cast<const Page*>(Space::first_page());
}
const Page* last_page() const {
return reinterpret_cast<const Page*>(Space::last_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
const_iterator begin() const { return const_iterator(first_page()); }
const_iterator end() const { return const_iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
#ifdef DEBUG
V8_EXPORT_PRIVATE void Print() override;
// Validate a range of of addresses in a SemiSpace.
// The "from" address must be on a page prior to the "to" address,
// in the linked page order, or it must be earlier on the same page.
static void AssertValidRange(Address from, Address to);
#else
// Do nothing.
inline static void AssertValidRange(Address from, Address to) {}
#endif
#ifdef VERIFY_HEAP
virtual void Verify();
#endif
private:
void RewindPages(int num_pages);
inline int max_pages() {
return static_cast<int>(current_capacity_ / Page::kPageSize);
}
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
// The currently committed space capacity.
size_t current_capacity_;
// The maximum capacity that can be used by this space. A space cannot grow
// beyond that size.
size_t maximum_capacity_;
// The minimum capacity for the space. A space cannot shrink below this size.
size_t minimum_capacity_;
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
bool committed_;
SemiSpaceId id_;
Page* current_page_;
int pages_used_;
friend class NewSpace;
friend class SemiSpaceObjectIterator;
};
// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
// semispace of the heap's new space. It iterates over the objects in the
// semispace from a given start address (defaulting to the bottom of the
// semispace) to the top of the semispace. New objects allocated after the
// iterator is created are not iterated.
class SemiSpaceObjectIterator : public ObjectIterator {
public:
// Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceObjectIterator(NewSpace* space);
inline HeapObject Next() override;
private:
void Initialize(Address start, Address end);
// The current iteration point.
Address current_;
// The end of iteration.
Address limit_;
};
// -----------------------------------------------------------------------------
// The young generation space.
//
// The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace.
class V8_EXPORT_PRIVATE NewSpace
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
using iterator = PageIterator;
using const_iterator = ConstPageIterator;
NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity, size_t max_semispace_capacity);
~NewSpace() override { TearDown(); }
inline bool ContainsSlow(Address a) const;
inline bool Contains(Object o) const;
inline bool Contains(HeapObject o) const;
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
void TearDown();
// Flip the pair of spaces.
void Flip();
// Grow the capacity of the semispaces. Assumes that they are not at
// their maximum capacity.
void Grow();
// Shrink the capacity of the semispaces.
void Shrink();
// Return the allocated bytes in the active semispace.
size_t Size() final {
DCHECK_GE(top(), to_space_.page_low());
return to_space_.pages_used() *
MemoryChunkLayout::AllocatableMemoryInDataPage() +
static_cast<size_t>(top() - to_space_.page_low());
}
size_t SizeOfObjects() final { return Size(); }
// Return the allocatable capacity of a semispace.
size_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) *
MemoryChunkLayout::AllocatableMemoryInDataPage();
}
// Return the current size of a semispace, allocatable and non-allocatable
// memory.
size_t TotalCapacity() {
DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return to_space_.current_capacity();
}
// Committed memory for NewSpace is the committed memory of both semi-spaces
// combined.
size_t CommittedMemory() final {
return from_space_.CommittedMemory() + to_space_.CommittedMemory();
}
size_t MaximumCommittedMemory() final {
return from_space_.MaximumCommittedMemory() +
to_space_.MaximumCommittedMemory();
}
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() final;
// Return the available bytes without growing.
size_t Available() final {
DCHECK_GE(Capacity(), Size());
return Capacity() - Size();
}
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
type == ExternalBackingStoreType::kArrayBuffer)
return heap()->YoungArrayBufferBytes();
DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
return to_space_.ExternalBackingStoreBytes(type);
}
size_t ExternalBackingStoreBytes() {
size_t result = 0;
for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
result +=
ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
}
return result;
}
size_t AllocatedSinceLastGC() {
const Address age_mark = to_space_.age_mark();
DCHECK_NE(age_mark, kNullAddress);
DCHECK_NE(top(), kNullAddress);
Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
Page* const last_page = Page::FromAllocationAreaAddress(top());
Page* current_page = age_mark_page;
size_t allocated = 0;
if (current_page != last_page) {
DCHECK_EQ(current_page, age_mark_page);
DCHECK_GE(age_mark_page->area_end(), age_mark);
allocated += age_mark_page->area_end() - age_mark;
current_page = current_page->next_page();
} else {
DCHECK_GE(top(), age_mark);
return top() - age_mark;
}
while (current_page != last_page) {
DCHECK_NE(current_page, age_mark_page);
allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
current_page = current_page->next_page();
}
DCHECK_GE(top(), current_page->area_start());
allocated += top() - current_page->area_start();
DCHECK_LE(allocated, Size());
return allocated;
}
void MovePageFromSpaceToSpace(Page* page) {
DCHECK(page->IsFromPage());
from_space_.RemovePage(page);
to_space_.PrependPage(page);
}
bool Rebalance();
// Return the maximum capacity of a semispace.
size_t MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
return to_space_.maximum_capacity();
}
bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
// Returns the initial capacity of a semispace.
size_t InitialTotalCapacity() {
DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
return to_space_.minimum_capacity();
}
void ResetOriginalTop() {
DCHECK_GE(top(), original_top_);
DCHECK_LE(top(), original_limit_);
original_top_.store(top(), std::memory_order_release);
}
Address original_top_acquire() {
return original_top_.load(std::memory_order_acquire);
}
Address original_limit_relaxed() {
return original_limit_.load(std::memory_order_relaxed);
}
// Return the address of the first allocatable address in the active
// semispace. This may be the address where the first object resides.
Address first_allocatable_address() { return to_space_.space_start(); }
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); }
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea();
// When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
// inline allocation every once in a while. This is done by setting
// allocation_info_.limit to be lower than the actual limit and and increasing
// it in steps to guarantee that the observers are notified periodically.
void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
inline bool ToSpaceContainsSlow(Address a) const;
inline bool ToSpaceContains(Object o) const;
inline bool FromSpaceContains(Object o) const;
// Try to switch the active semispace to a new, empty, page.
// Returns false if this isn't possible or reasonable (i.e., there
// are no pages, or the current page is already empty), or true
// if successful.
bool AddFreshPage();
bool AddFreshPageSynchronized();
#ifdef VERIFY_HEAP
// Verify the active semispace.
virtual void Verify(Isolate* isolate);
#endif
#ifdef DEBUG
// Print the active semispace.
void Print() override { to_space_.Print(); }
#endif
// Return whether the operation succeeded.
bool CommitFromSpaceIfNeeded() {
if (from_space_.is_committed()) return true;
return from_space_.Commit();
}
bool UncommitFromSpace() {
if (!from_space_.is_committed()) return true;
return from_space_.Uncommit();
}
bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
SemiSpace* active_space() { return &to_space_; }
Page* first_page() { return to_space_.first_page(); }
Page* last_page() { return to_space_.last_page(); }
iterator begin() { return to_space_.begin(); }
iterator end() { return to_space_.end(); }
const_iterator begin() const { return to_space_.begin(); }
const_iterator end() const { return to_space_.end(); }
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
SemiSpace& from_space() { return from_space_; }
SemiSpace& to_space() { return to_space_; }
private:
// Update linear allocation area to match the current to-space page.
void UpdateLinearAllocationArea();
base::Mutex mutex_;
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
VirtualMemory reservation_;
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsInlineAllocation() override { return true; }
friend class SemiSpaceObjectIterator;
};
class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
public:
explicit PauseAllocationObserversScope(Heap* heap);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment