Commit ce570aa5 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Implement PagedNewSpace

See a description of the different classes and how they integrate in
https://docs.google.com/document/d/1wNj_akGSh6RBMC2RvH8HIlSUqkjWrpGkxI_BTZ-zdXE/edit#

Bug: v8:12612
Change-Id: I0f2141f4ada5c964e985d109133902172d1ab605
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3641178Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81872}
parent 49cbac30
......@@ -179,9 +179,9 @@ class BasicMemoryChunk {
void ClearFlag(Flag flag) {
main_thread_flags_ = main_thread_flags_.without(flag);
}
void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
// Set or clear multiple flags at a time. `mask` indicates which flags are
// should be replaced with new `flags`.
void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
void SetFlags(MainThreadFlags flags, MainThreadFlags mask = kAllFlagsMask) {
main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
}
......
......@@ -9,6 +9,7 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/new-spaces.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/tagged-impl.h"
......@@ -98,6 +99,20 @@ V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
return true;
}
// -----------------------------------------------------------------------------
// PagedSpaceForNewSpace
V8_INLINE bool PagedSpaceForNewSpace::EnsureAllocation(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin,
int* out_max_aligned_size) {
if (!PagedSpaceBase::EnsureAllocation(size_in_bytes, alignment, origin,
out_max_aligned_size)) {
return false;
}
allocated_linear_areas_ += limit() - top();
return true;
}
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator
......
......@@ -4,7 +4,9 @@
#include "src/heap/new-spaces.h"
#include "paged-spaces.h"
#include "src/common/globals.h"
#include "src/heap/allocation-observer.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
......@@ -462,20 +464,6 @@ void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
#endif
}
void NewSpace::MakeLinearAllocationAreaIterable() {
Address to_top = top();
Page* page = Page::FromAddress(to_top - kTaggedSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
heap_->CreateFillerObjectAt(to_top, remaining_in_page);
}
}
void NewSpace::FreeLinearAllocationArea() {
MakeLinearAllocationAreaIterable();
UpdateInlineAllocationLimit(0);
}
#if DEBUG
void NewSpace::VerifyTop() const {
SpaceWithLinearArea::VerifyTop();
......@@ -571,6 +559,7 @@ void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
#endif // VERIFY_HEAP
void NewSpace::PromotePageToOldSpace(Page* page) {
DCHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
DCHECK(page->InYoungGeneration());
RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page);
......@@ -751,6 +740,11 @@ bool SemiSpaceNewSpace::AddParkedAllocationBuffer(
return false;
}
void SemiSpaceNewSpace::FreeLinearAllocationArea() {
MakeLinearAllocationAreaIterable();
UpdateInlineAllocationLimit(0);
}
#if DEBUG
void SemiSpaceNewSpace::VerifyTop() const {
NewSpace::VerifyTop();
......@@ -880,5 +874,171 @@ bool SemiSpaceNewSpace::IsPromotionCandidate(const MemoryChunk* page) const {
return !page->Contains(age_mark());
}
void SemiSpaceNewSpace::MakeLinearAllocationAreaIterable() {
Address to_top = top();
Page* page = Page::FromAddress(to_top - kTaggedSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
heap_->CreateFillerObjectAt(to_top, remaining_in_page);
}
}
// -----------------------------------------------------------------------------
// PagedSpaceForNewSpace implementation
PagedSpaceForNewSpace::PagedSpaceForNewSpace(
Heap* heap, size_t initial_capacity, size_t max_capacity,
AllocationCounter* allocation_counter,
LinearAllocationArea* allocation_info,
LinearAreaOriginalData& linear_area_original_data)
: PagedSpaceBase(heap, NEW_SPACE, NOT_EXECUTABLE,
FreeList::CreateFreeList(), allocation_counter,
allocation_info, linear_area_original_data,
CompactionSpaceKind::kNone),
initial_capacity_(RoundDown(initial_capacity, Page::kPageSize)),
max_capacity_(RoundDown(max_capacity, Page::kPageSize)),
target_capacity_(initial_capacity_) {
DCHECK_LE(initial_capacity_, max_capacity_);
// Preallocate pages for the initial capacity but don't allocate a linear
// allocation area yet.
CHECK(EnsureCurrentCapacity());
}
Page* PagedSpaceForNewSpace::InitializePage(MemoryChunk* chunk) {
DCHECK_EQ(identity(), NEW_SPACE);
Page* page = static_cast<Page*>(chunk);
DCHECK_EQ(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocationStatistics();
page->SetFlags(Page::TO_PAGE);
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
heap()
->minor_mark_compact_collector()
->non_atomic_marking_state()
->ClearLiveness(page);
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
page->list_node().Initialize();
page->InitializationMemoryFence();
return page;
}
void PagedSpaceForNewSpace::Grow() {
heap()->safepoint()->AssertActive();
// Double the space size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity());
target_capacity_ =
std::min(MaximumCapacity(),
RoundUp(static_cast<size_t>(FLAG_semi_space_growth_factor) *
TotalCapacity(),
Page::kPageSize));
CHECK(EnsureCurrentCapacity());
}
void PagedSpaceForNewSpace::Shrink() {
target_capacity_ =
RoundUp(std::max(initial_capacity_, 2 * Size()), Page::kPageSize);
if (target_capacity_ < current_capacity_) {
// Try to shrink by freeing empty pages.
for (Page* page = first_page();
page != last_page() && (current_capacity_ > target_capacity_);) {
Page* current_page = page;
page = page->next_page();
if (current_page->allocated_bytes() == 0) {
ReleasePage(current_page);
}
}
}
// Shrinking to target capacity may not have been possible.
target_capacity_ = current_capacity_;
}
void PagedSpaceForNewSpace::EvacuatePrologue() { FreeLinearAllocationArea(); }
void PagedSpaceForNewSpace::UpdateInlineAllocationLimit(size_t size_in_bytes) {
PagedSpaceBase::UpdateInlineAllocationLimit(size_in_bytes);
}
size_t PagedSpaceForNewSpace::AddPage(Page* page) {
current_capacity_ += Page::kPageSize;
DCHECK_LE(current_capacity_, target_capacity_);
return PagedSpaceBase::AddPage(page);
}
void PagedSpaceForNewSpace::RemovePage(Page* page) {
DCHECK_LE(Page::kPageSize, current_capacity_);
current_capacity_ -= Page::kPageSize;
PagedSpaceBase::RemovePage(page);
}
void PagedSpaceForNewSpace::ReleasePage(Page* page) {
DCHECK_LE(Page::kPageSize, current_capacity_);
current_capacity_ -= Page::kPageSize;
PagedSpaceBase::ReleasePage(page);
}
bool PagedSpaceForNewSpace::AddFreshPage() {
if (TotalCapacity() >= MaximumCapacity()) return false;
return TryExpandImpl();
}
bool PagedSpaceForNewSpace::EnsureCurrentCapacity() {
while (current_capacity_ < target_capacity_) {
if (!TryExpandImpl()) return false;
}
DCHECK_EQ(current_capacity_, target_capacity_);
return true;
}
void PagedSpaceForNewSpace::FreeLinearAllocationArea() {
size_t remaining_allocation_area_size = limit() - top();
DCHECK_GE(allocated_linear_areas_, remaining_allocation_area_size);
allocated_linear_areas_ -= remaining_allocation_area_size;
PagedSpaceBase::FreeLinearAllocationArea();
}
#ifdef VERIFY_HEAP
void PagedSpaceForNewSpace::Verify(Isolate* isolate,
ObjectVisitor* visitor) const {
PagedSpaceBase::Verify(isolate, visitor);
DCHECK_EQ(current_capacity_, target_capacity_);
DCHECK_EQ(current_capacity_, Page::kPageSize * CountTotalPages());
}
#endif // VERIFY_HEAP
// -----------------------------------------------------------------------------
// PagedNewSpace implementation
PagedNewSpace::PagedNewSpace(Heap* heap, size_t initial_capacity,
size_t max_capacity,
LinearAllocationArea* allocation_info)
: NewSpace(heap, allocation_info),
paged_space_(heap, initial_capacity, max_capacity, &allocation_counter_,
allocation_info_, linear_area_original_data_) {}
PagedNewSpace::~PagedNewSpace() {
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
allocation_info_->Reset(kNullAddress, kNullAddress);
paged_space_.TearDown();
}
#ifdef VERIFY_HEAP
void PagedNewSpace::Verify(Isolate* isolate) const {
const Page* first_page = paged_space_.first_page();
VerifyImpl(isolate, first_page, first_page->area_start());
// Check paged-spaces.
VerifyPointersVisitor visitor(heap());
paged_space_.Verify(isolate, &visitor);
}
#endif // VERIFY_HEAP
} // namespace internal
} // namespace v8
This diff is collapsed.
......@@ -90,7 +90,7 @@ V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
if (!is_compaction_space()) {
if ((identity() != NEW_SPACE) && !is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
// running.
......
......@@ -6,8 +6,10 @@
#include <atomic>
#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/allocation-observer.h"
......@@ -139,10 +141,8 @@ void PagedSpaceBase::TearDown() {
void PagedSpaceBase::RefillFreeList() {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
identity() != MAP_SPACE) {
return;
}
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
identity() == MAP_SPACE || identity() == NEW_SPACE);
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
......@@ -162,7 +162,7 @@ void PagedSpaceBase::RefillFreeList() {
// during compaction.
if (is_compaction_space()) {
DCHECK_NE(this, p->owner());
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
PagedSpaceBase* owner = reinterpret_cast<PagedSpaceBase*>(p->owner());
base::MutexGuard guard(owner->mutex());
owner->RefineAllocatedBytesAfterSweeping(p);
owner->RemovePage(p);
......@@ -183,6 +183,7 @@ void PagedSpaceBase::RefillFreeList() {
void PagedSpaceBase::MergeCompactionSpace(CompactionSpace* other) {
base::MutexGuard guard(mutex());
DCHECK_NE(NEW_SPACE, identity());
DCHECK(identity() == other->identity());
// Unmerged fields:
......@@ -302,8 +303,11 @@ Page* PagedSpaceBase::RemovePageSafe(int size_in_bytes) {
}
size_t PagedSpaceBase::AddPage(Page* page) {
DCHECK_NOT_NULL(page);
CHECK(page->SweepingDone());
page->set_owner(this);
DCHECK_IMPLIES(identity() == NEW_SPACE, page->IsFlagSet(Page::TO_PAGE));
DCHECK_IMPLIES(identity() != NEW_SPACE, !page->IsFlagSet(Page::TO_PAGE));
memory_chunk_list_.PushBack(page);
AccountCommitted(page->size());
IncreaseCapacity(page->area_size());
......@@ -318,6 +322,7 @@ size_t PagedSpaceBase::AddPage(Page* page) {
void PagedSpaceBase::RemovePage(Page* page) {
CHECK(page->SweepingDone());
DCHECK_IMPLIES(identity() == NEW_SPACE, page->IsFlagSet(Page::TO_PAGE));
memory_chunk_list_.Remove(page);
UnlinkFreeListCategories(page);
DecreaseAllocatedBytes(page->allocated_bytes(), page);
......@@ -367,7 +372,7 @@ void PagedSpaceBase::ShrinkImmortalImmovablePages() {
}
}
Page* PagedSpaceBase::Expand() {
Page* PagedSpaceBase::TryExpandImpl() {
Page* page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, this, executable());
if (page == nullptr) return nullptr;
......@@ -378,8 +383,9 @@ Page* PagedSpaceBase::Expand() {
return page;
}
base::Optional<std::pair<Address, size_t>> PagedSpaceBase::ExpandBackground(
base::Optional<std::pair<Address, size_t>> PagedSpaceBase::TryExpandBackground(
size_t size_in_bytes) {
DCHECK_NE(NEW_SPACE, identity());
Page* page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kRegular, this, executable());
if (page == nullptr) return {};
......@@ -407,7 +413,7 @@ int PagedSpaceBase::CountTotalPages() const {
void PagedSpaceBase::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != kNullAddress && top != limit &&
if (top != kNullAddress && top != limit && identity() != NEW_SPACE &&
heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
}
......@@ -429,7 +435,8 @@ void PagedSpaceBase::DecreaseLimit(Address new_limit) {
SetTopAndLimit(top(), new_limit);
Free(new_limit, old_limit - new_limit,
SpaceAccountingMode::kSpaceAccounted);
if (heap()->incremental_marking()->black_allocation()) {
if (heap()->incremental_marking()->black_allocation() &&
identity() != NEW_SPACE) {
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
old_limit);
}
......@@ -527,6 +534,8 @@ void PagedSpaceBase::ReleasePage(Page* page) {
page));
DCHECK_EQ(page->owner(), this);
DCHECK_IMPLIES(identity() == NEW_SPACE, page->IsFlagSet(Page::TO_PAGE));
free_list_->EvictFreeListItems(page);
if (Page::FromAllocationAreaAddress(allocation_info_->top()) == page) {
......@@ -675,7 +684,7 @@ PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
result = ExpandBackground(max_size_in_bytes);
result = TryExpandBackground(max_size_in_bytes);
if (result) return result;
}
......@@ -929,8 +938,9 @@ bool PagedSpaceBase::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
return RawRefillLabMain(size_in_bytes, origin);
}
Page* CompactionSpace::Expand() {
Page* page = PagedSpaceBase::Expand();
Page* CompactionSpace::TryExpandImpl() {
DCHECK_NE(NEW_SPACE, identity());
Page* page = PagedSpaceBase::TryExpandImpl();
new_pages_.push_back(page);
return page;
}
......@@ -941,9 +951,10 @@ bool CompactionSpace::RefillLabMain(int size_in_bytes,
}
bool PagedSpaceBase::TryExpand(int size_in_bytes, AllocationOrigin origin) {
Page* page = Expand();
DCHECK_NE(NEW_SPACE, identity());
Page* page = TryExpandImpl();
if (!page) return false;
if (!is_compaction_space()) {
if (!is_compaction_space() && identity() != NEW_SPACE) {
heap()->NotifyOldGenerationExpansion(identity(), page);
}
DCHECK((CountTotalPages() > 1) ||
......@@ -960,6 +971,12 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
if (TryAllocationFromFreeListMain(size_in_bytes, origin)) return true;
if (identity() == NEW_SPACE) {
// New space should not allocate new pages when running out of space and it
// is not currently swept.
return false;
}
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
if (collector->sweeping_in_progress()) {
......@@ -978,9 +995,10 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
}
if (is_compaction_space()) {
DCHECK_NE(NEW_SPACE, identity());
// The main thread may have acquired all swept pages. Try to steal from
// it. This can only happen during young generation evacuation.
PagedSpace* main_space = heap()->paged_space(identity());
PagedSpaceBase* main_space = heap()->paged_space(identity());
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
......@@ -1013,6 +1031,10 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
int max_pages, int size_in_bytes,
AllocationOrigin origin) {
// TODO(v8:12612): New space is not currently swept so new space allocation
// shoudl not contribute to sweeping, Revisit this once sweeping for young gen
// is implemented.
DCHECK_NE(NEW_SPACE, identity());
// Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause.
Sweeper::SweepingMode sweeping_mode =
......
......@@ -6,6 +6,7 @@
#define V8_HEAP_PAGED_SPACES_H_
#include <atomic>
#include <limits>
#include <memory>
#include <utility>
......@@ -29,6 +30,7 @@ class Heap;
class HeapObject;
class Isolate;
class ObjectVisitor;
class PagedSpaceBase;
// -----------------------------------------------------------------------------
// Heap object iterator in paged spaces.
......@@ -213,12 +215,12 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
Page* InitializePage(MemoryChunk* chunk) override;
void ReleasePage(Page* page);
virtual void ReleasePage(Page* page);
// Adds the page to this space and returns the number of bytes added to the
// free list of the space.
size_t AddPage(Page* page);
void RemovePage(Page* page);
virtual size_t AddPage(Page* page);
virtual void RemovePage(Page* page);
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
......@@ -293,6 +295,13 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
return reinterpret_cast<const Page*>(memory_chunk_list_.front());
}
Page* last_page() override {
return reinterpret_cast<Page*>(memory_chunk_list_.back());
}
const Page* last_page() const override {
return reinterpret_cast<const Page*>(memory_chunk_list_.back());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
......@@ -330,12 +339,13 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit);
void DecreaseLimit(Address new_limit);
void UpdateInlineAllocationLimit(size_t min_size) override;
bool SupportsAllocationObserver() const override {
return !is_compaction_space();
}
protected:
void UpdateInlineAllocationLimit(size_t min_size) override;
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() const { return true; }
......@@ -353,17 +363,17 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
virtual Page* Expand();
virtual Page* TryExpandImpl();
// Expands the space by a single page from a background thread and allocates
// a memory area of the given size in it. If successful the method returns
// the address and size of the area.
base::Optional<std::pair<Address, size_t>> ExpandBackground(
base::Optional<std::pair<Address, size_t>> TryExpandBackground(
size_t size_in_bytes);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) final;
int* out_max_aligned_size) override;
V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin);
......@@ -454,9 +464,9 @@ class V8_EXPORT_PRIVATE CompactionSpace final : public PagedSpace {
V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin) override;
Page* Expand() override;
Page* TryExpandImpl() final;
// The space is temporary and not included in any snapshots.
bool snapshotable() const override { return false; }
bool snapshotable() const final { return false; }
// Pages that were allocated in this local space and need to be merged
// to the main space.
std::vector<Page*> new_pages_;
......
......@@ -169,6 +169,7 @@ size_t Page::ShrinkToHighWaterMark() {
}
void Page::CreateBlackArea(Address start, Address end) {
DCHECK_NE(NEW_SPACE, owner_identity());
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
......@@ -180,6 +181,7 @@ void Page::CreateBlackArea(Address start, Address end) {
}
void Page::CreateBlackAreaBackground(Address start, Address end) {
DCHECK_NE(NEW_SPACE, owner_identity());
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
......@@ -193,6 +195,7 @@ void Page::CreateBlackAreaBackground(Address start, Address end) {
}
void Page::DestroyBlackArea(Address start, Address end) {
DCHECK_NE(NEW_SPACE, owner_identity());
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
......@@ -204,6 +207,7 @@ void Page::DestroyBlackArea(Address start, Address end) {
}
void Page::DestroyBlackAreaBackground(Address start, Address end) {
DCHECK_NE(NEW_SPACE, owner_identity());
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
......
......@@ -66,7 +66,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
{
HandleScope scope2(isolate);
CHECK(heap->old_space()->Expand());
CHECK(heap->old_space()->TryExpandImpl());
auto compaction_page_handles = heap::CreatePadding(
heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
......@@ -136,7 +136,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
HandleScope scope2(isolate);
// Fill another page with objects of size {object_size} (last one is
// properly adjusted).
CHECK(heap->old_space()->Expand());
CHECK(heap->old_space()->TryExpandImpl());
auto compaction_page_handles = heap::CreatePadding(
heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
......@@ -151,7 +151,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
// Add another page that is filled with {num_objects} objects of size
// {object_size}.
HandleScope scope3(isolate);
CHECK(heap->old_space()->Expand());
CHECK(heap->old_space()->TryExpandImpl());
const int num_objects = 3;
std::vector<Handle<FixedArray>> page_to_fill_handles =
heap::CreatePadding(heap, object_size * num_objects,
......@@ -218,7 +218,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
HandleScope scope2(isolate);
// Fill another page with objects of size {object_size} (last one is
// properly adjusted).
CHECK(heap->old_space()->Expand());
CHECK(heap->old_space()->TryExpandImpl());
auto compaction_page_handles = heap::CreatePadding(
heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
......@@ -250,7 +250,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
// Add another page that is filled with {num_objects} objects of size
// {object_size}.
HandleScope scope3(isolate);
CHECK(heap->old_space()->Expand());
CHECK(heap->old_space()->TryExpandImpl());
const int num_objects = 3;
std::vector<Handle<FixedArray>> page_to_fill_handles =
heap::CreatePadding(heap, object_size * num_objects,
......@@ -308,7 +308,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
HandleScope temporary_scope(isolate);
// Fill a fresh page with objects of size {object_size} (last one is
// properly adjusted).
CHECK(heap->old_space()->Expand());
CHECK(heap->old_space()->TryExpandImpl());
std::vector<Handle<FixedArray>> compaction_page_handles =
heap::CreatePadding(
heap,
......@@ -329,7 +329,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
// Add another page that is filled with {num_objects} objects of size
// {object_size}.
HandleScope scope3(isolate);
CHECK(heap->old_space()->Expand());
CHECK(heap->old_space()->TryExpandImpl());
const int num_objects = 2;
int used_memory = object_size * num_objects;
std::vector<Handle<FixedArray>> page_to_fill_handles =
......@@ -404,7 +404,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
HandleScope temporary_scope(isolate);
// Fill another page with objects of size {object_size} (last one is
// properly adjusted).
CHECK(heap->old_space()->Expand());
CHECK(heap->old_space()->TryExpandImpl());
auto compaction_page_handles = heap::CreatePadding(
heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
......@@ -431,7 +431,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
// Add another page that is filled with {num_objects} objects of size
// {object_size}.
HandleScope scope3(isolate);
CHECK(heap->old_space()->Expand());
CHECK(heap->old_space()->TryExpandImpl());
const int num_objects = 2;
int used_memory = object_size * num_objects;
std::vector<Handle<FixedArray>> page_to_fill_handles =
......
......@@ -35,6 +35,7 @@
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/common/globals.h"
#include "src/heap/allocation-result.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
......@@ -315,6 +316,30 @@ TEST(SemiSpaceNewSpace) {
memory_allocator->unmapper()->EnsureUnmappingCompleted();
}
TEST(PagedNewSpace) {
if (FLAG_single_generation) return;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
LinearAllocationArea allocation_info;
std::unique_ptr<PagedNewSpace> new_space = std::make_unique<PagedNewSpace>(
heap, CcTest::heap()->InitialSemiSpaceSize(),
CcTest::heap()->InitialSemiSpaceSize(), &allocation_info);
CHECK(new_space->MaximumCapacity());
AllocationResult allocation_result;
while (!(allocation_result = new_space->AllocateRaw(kMaxRegularHeapObjectSize,
kTaggedAligned))
.IsFailure()) {
CHECK(new_space->Contains(allocation_result.ToObjectChecked()));
}
new_space.reset();
memory_allocator->unmapper()->EnsureUnmappingCompleted();
}
TEST(OldSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment