Commit b415cd7c authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Extend and rename NewSpace base class

NewSpace is renamed to SemiSpaceNewSpace and NewSpaceBase is renamed to
NewSpace (the new PagedSpace new space implementation will be named
PagedNewSpace).

Most usecases are updated to use the base class rather than the concrete
semi space based implementation. To that end, the base class is extended
with additional virtual methods (for delegating to the concrete class).

This CL follows these guidelines:
(*) If at a method callsite we should know the exact new space
implementation we use, we cast to the concrete class. This is the case
for example for callsites in scavenger.*.
(*) If a method is called from outside the heap implementation or should
be present regardless of the concrete implementation, that method is
made virtual.
(*) Other cases are usually methods that are specific to a concrete
implementation but the concrete implementation is not known at the
callsite and there's no clear way to nicely abstract the method. In such
cases we cast to the concrete SemiSpaceNewSpace implementation for now
and we will revisit these cases once PagedNewSpace exists.

Bug: v8:12612
Change-Id: I7b85626774ce0d785b0257bf8d32b9f50eeaf292
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3625975
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80482}
parent a6805703
......@@ -323,13 +323,6 @@ Heap* Heap::FromWritableHeapObject(HeapObject obj) {
return heap;
}
bool Heap::ShouldBePromoted(Address old_address) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
}
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
DCHECK(IsAligned(byte_size, kTaggedSize));
CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
......
......@@ -39,35 +39,37 @@ void HeapLayoutTracer::GCEpiloguePrintHeapLayout(v8::Isolate* isolate,
// static
void HeapLayoutTracer::PrintBasicMemoryChunk(std::ostream& os,
BasicMemoryChunk* chunk,
const BasicMemoryChunk& chunk,
const char* owner_name) {
os << "{owner:" << owner_name << ","
<< "address:" << chunk << ","
<< "size:" << chunk->size() << ","
<< "allocated_bytes:" << chunk->allocated_bytes() << ","
<< "wasted_memory:" << chunk->wasted_memory() << "}" << std::endl;
<< "address:" << &chunk << ","
<< "size:" << chunk.size() << ","
<< "allocated_bytes:" << chunk.allocated_bytes() << ","
<< "wasted_memory:" << chunk.wasted_memory() << "}" << std::endl;
}
// static
void HeapLayoutTracer::PrintHeapLayout(std::ostream& os, Heap* heap) {
for (PageIterator it = heap->new_space()->to_space().begin();
it != heap->new_space()->to_space().end(); ++it) {
PrintBasicMemoryChunk(os, *it, "to_space");
const SemiSpaceNewSpace* semi_space_new_space =
SemiSpaceNewSpace::From(heap->new_space());
for (ConstPageIterator it = semi_space_new_space->to_space().begin();
it != semi_space_new_space->to_space().end(); ++it) {
PrintBasicMemoryChunk(os, **it, "to_space");
}
for (PageIterator it = heap->new_space()->from_space().begin();
it != heap->new_space()->from_space().end(); ++it) {
PrintBasicMemoryChunk(os, *it, "from_space");
for (ConstPageIterator it = semi_space_new_space->from_space().begin();
it != semi_space_new_space->from_space().end(); ++it) {
PrintBasicMemoryChunk(os, **it, "from_space");
}
OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
PrintBasicMemoryChunk(os, chunk, chunk->owner()->name());
PrintBasicMemoryChunk(os, *chunk, chunk->owner()->name());
}
for (ReadOnlyPage* page : heap->read_only_space()->pages()) {
PrintBasicMemoryChunk(os, page, "ro_space");
PrintBasicMemoryChunk(os, *page, "ro_space");
}
}
} // namespace internal
......
......@@ -24,7 +24,8 @@ class HeapLayoutTracer : AllStatic {
v8::GCCallbackFlags flags, void* data);
private:
static void PrintBasicMemoryChunk(std::ostream& os, BasicMemoryChunk* chunk,
static void PrintBasicMemoryChunk(std::ostream& os,
const BasicMemoryChunk& chunk,
const char* owner_name);
static void PrintHeapLayout(std::ostream& os, Heap* heap);
};
......
......@@ -49,6 +49,7 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-allocator.h"
#include "src/heap/heap-controller.h"
#include "src/heap/heap-layout-tracer.h"
#include "src/heap/heap-write-barrier-inl.h"
......@@ -1015,7 +1016,10 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
deferred_counters_[feature]++;
}
bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
void Heap::UncommitFromSpace() {
DCHECK_NOT_NULL(new_space_);
SemiSpaceNewSpace::From(new_space_)->UncommitFromSpace();
}
void Heap::GarbageCollectionPrologue(
GarbageCollectionReason gc_reason,
......@@ -2159,11 +2163,7 @@ void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot,
void Heap::EnsureFromSpaceIsCommitted() {
if (!new_space_) return;
if (new_space_->CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
// Memory is exhausted and we will die.
FatalProcessOutOfMemory("Committing semi space failed.");
SemiSpaceNewSpace::From(new_space_)->CommitFromSpaceIfNeeded();
}
bool Heap::CollectionRequested() {
......@@ -2699,22 +2699,25 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
}
SemiSpaceNewSpace* semi_space_new_space =
SemiSpaceNewSpace::From(new_space());
// Move pages from new->old generation.
PageRange range(new_space()->first_allocatable_address(), new_space()->top());
PageRange range(semi_space_new_space->first_allocatable_address(),
semi_space_new_space->top());
for (auto it = range.begin(); it != range.end();) {
Page* p = (*++it)->prev_page();
new_space()->from_space().RemovePage(p);
semi_space_new_space->from_space().RemovePage(p);
Page::ConvertNewToOld(p);
if (incremental_marking()->IsMarking())
mark_compact_collector()->RecordLiveSlotsOnPage(p);
}
// Reset new space.
if (!new_space()->Rebalance()) {
if (!semi_space_new_space->Rebalance()) {
FatalProcessOutOfMemory("NewSpace::Rebalance");
}
new_space()->ResetLinearAllocationArea();
new_space()->set_age_mark(new_space()->top());
semi_space_new_space->ResetLinearAllocationArea();
semi_space_new_space->set_age_mark(semi_space_new_space->top());
for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
LargePage* page = *it;
......@@ -2783,8 +2786,12 @@ void Heap::Scavenge() {
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space()->Flip();
new_space()->ResetLinearAllocationArea();
{
SemiSpaceNewSpace* semi_space_new_space =
SemiSpaceNewSpace::From(new_space());
semi_space_new_space->Flip();
semi_space_new_space->ResetLinearAllocationArea();
}
// We also flip the young generation large object space. All large objects
// will be in the from space.
......@@ -3232,7 +3239,7 @@ void* Heap::AllocateExternalBackingStore(
const std::function<void*(size_t)>& allocate, size_t byte_length) {
if (!always_allocate() && new_space()) {
size_t new_space_backing_store_bytes =
new_space()->ExternalBackingStoreBytes();
new_space()->ExternalBackingStoreOverallBytes();
if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
new_space_backing_store_bytes >= byte_length) {
// Performing a young generation GC amortizes over the allocated backing
......@@ -4853,8 +4860,11 @@ void Heap::VerifyCommittedPhysicalMemory() {
#endif // DEBUG
void Heap::ZapFromSpace() {
if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return;
for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
if (!new_space_) return;
SemiSpaceNewSpace* semi_space_new_space = SemiSpaceNewSpace::From(new_space_);
if (!semi_space_new_space->IsFromSpaceCommitted()) return;
for (Page* page :
PageRange(semi_space_new_space->from_space().first_page(), nullptr)) {
memory_allocator()->ZapBlock(page->area_start(),
page->HighWaterMark() - page->area_start(),
ZapValue());
......@@ -5831,7 +5841,7 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
DCHECK_NOT_NULL(read_only_space_);
const bool has_young_gen = !FLAG_single_generation && !IsShared();
if (has_young_gen) {
space_[NEW_SPACE] = new_space_ = new NewSpace(
space_[NEW_SPACE] = new_space_ = new SemiSpaceNewSpace(
this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
max_semi_space_size_, new_allocation_info);
space_[NEW_LO_SPACE] = new_lo_space_ =
......
......@@ -737,10 +737,6 @@ class Heap {
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
// An object should be promoted if the object has survived a
// scavenge operation.
inline bool ShouldBePromoted(Address old_address);
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
inline int NextScriptId();
......@@ -1818,7 +1814,7 @@ class Heap {
void EnsureFromSpaceIsCommitted();
// Uncommit unused semi space.
V8_EXPORT_PRIVATE bool UncommitFromSpace();
V8_EXPORT_PRIVATE void UncommitFromSpace();
// Fill in bogus values in from space
void ZapFromSpace();
......
......@@ -40,6 +40,7 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/new-spaces.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/parallel-work-item.h"
......@@ -1860,7 +1861,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
return true;
}
if (heap_->ShouldBePromoted(object.address()) &&
if (heap_->new_space()->ShouldBePromoted(object.address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
promoted_size_ += size;
return true;
......@@ -1948,11 +1949,14 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
static void Move(Page* page) {
switch (mode) {
case NEW_TO_NEW:
page->heap()->new_space()->MovePageFromSpaceToSpace(page);
SemiSpaceNewSpace::From(page->heap()->new_space())
->MovePageFromSpaceToSpace(page);
page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
break;
case NEW_TO_OLD: {
page->heap()->new_space()->from_space().RemovePage(page);
SemiSpaceNewSpace::From(page->heap()->new_space())
->from_space()
.RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page);
DCHECK(!new_page->InYoungGeneration());
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
......@@ -3616,7 +3620,7 @@ void MarkCompactCollector::EvacuatePrologue() {
PageRange(new_space->first_allocatable_address(), new_space->top())) {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
SemiSpaceNewSpace::From(new_space)->Flip();
new_space->ResetLinearAllocationArea();
DCHECK_EQ(new_space->Size(), 0);
......@@ -3640,7 +3644,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
// New space.
if (heap()->new_space()) {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
SemiSpaceNewSpace::From(heap()->new_space())
->set_age_mark(heap()->new_space()->top());
DCHECK_EQ(0, heap()->new_space()->Size());
}
......@@ -3814,7 +3819,8 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
chunk->Contains(heap()->new_space()->age_mark()),
chunk->Contains(
SemiSpaceNewSpace::From(heap()->new_space())->age_mark()),
saved_live_bytes, evacuation_time,
chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
......@@ -4030,7 +4036,8 @@ bool ShouldMovePage(Page* p, intptr_t live_bytes,
AlwaysPromoteYoung always_promote_young) {
Heap* heap = p->heap();
const bool reduce_memory = heap->ShouldReduceMemory();
const Address age_mark = heap->new_space()->age_mark();
const Address age_mark =
SemiSpaceNewSpace::From(heap->new_space())->age_mark();
return !reduce_memory && !p->NeverEvacuate() &&
(live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
(always_promote_young == AlwaysPromoteYoung::kYes ||
......@@ -4270,7 +4277,7 @@ void MarkCompactCollector::Evacuate() {
if (heap()->new_space()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) {
if (!SemiSpaceNewSpace::From(heap()->new_space())->Rebalance()) {
heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
}
}
......@@ -5543,8 +5550,10 @@ void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
for (Page* p :
PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
for (Page* p : PageRange(SemiSpaceNewSpace::From(heap()->new_space())
->from_space()
.first_page(),
nullptr)) {
DCHECK_EQ(promoted_pages_.end(),
std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
non_atomic_marking_state()->ClearLiveness(p);
......@@ -5703,7 +5712,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
SemiSpaceNewSpace::From(new_space)->Flip();
new_space->ResetLinearAllocationArea();
heap()->new_lo_space()->Flip();
......@@ -5711,7 +5720,8 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
}
void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
SemiSpaceNewSpace::From(heap()->new_space())
->set_age_mark(heap()->new_space()->top());
// Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
......@@ -6096,7 +6106,7 @@ void MinorMarkCompactCollector::Evacuate() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) {
if (!SemiSpaceNewSpace::From(heap()->new_space())->Rebalance()) {
heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
}
}
......
......@@ -38,35 +38,28 @@ bool SemiSpace::ContainsSlow(Address a) const {
}
// --------------------------------------------------------------------------
// NewSpaceBase
// NewSpace
bool NewSpaceBase::Contains(Object o) const {
bool NewSpace::Contains(Object o) const {
return o.IsHeapObject() && Contains(HeapObject::cast(o));
}
bool NewSpaceBase::Contains(HeapObject o) const {
bool NewSpace::Contains(HeapObject o) const {
return BasicMemoryChunk::FromHeapObject(o)->InNewSpace();
}
V8_WARN_UNUSED_RESULT inline AllocationResult
NewSpaceBase::AllocateRawSynchronized(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment, origin);
}
// -----------------------------------------------------------------------------
// NewSpace
bool NewSpace::ContainsSlow(Address a) const {
return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
}
// SemiSpaceNewSpace
V8_INLINE bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin,
int* out_max_aligned_size) {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
VerifyTop();
......
This diff is collapsed.
This diff is collapsed.
......@@ -8,6 +8,7 @@
#include "src/heap/evacuation-allocator-inl.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/new-spaces.h"
#include "src/heap/scavenger.h"
#include "src/objects/map.h"
#include "src/objects/objects-inl.h"
......@@ -245,7 +246,8 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(
SLOW_DCHECK(static_cast<size_t>(object_size) <=
MemoryChunkLayout::AllocatableMemoryInDataPage());
if (!heap()->ShouldBePromoted(object.address())) {
if (!SemiSpaceNewSpace::From(heap()->new_space())
->ShouldBePromoted(object.address())) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
......
......@@ -404,11 +404,14 @@ void ScavengerCollector::CollectGarbage() {
}
}
SemiSpaceNewSpace* semi_space_new_space =
SemiSpaceNewSpace::From(heap_->new_space());
if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are
// going to be unmapped.
for (Page* p :
PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
PageRange(semi_space_new_space->from_space().first_page(), nullptr)) {
heap_->concurrent_marking()->ClearMemoryChunkData(p);
}
}
......@@ -416,7 +419,7 @@ void ScavengerCollector::CollectGarbage() {
ProcessWeakReferences(&ephemeron_table_list);
// Set age mark.
heap_->new_space_->set_age_mark(heap_->new_space()->top());
semi_space_new_space->set_age_mark(semi_space_new_space->top());
// Since we promote all surviving large objects immediatelly, all remaining
// large objects must be dead.
......@@ -532,7 +535,10 @@ void ScavengerCollector::MergeSurvivingNewLargeObjects(
int ScavengerCollector::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1;
static_cast<int>(
SemiSpaceNewSpace::From(heap_->new_space())->TotalCapacity()) /
MB +
1;
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
int tasks = std::max(
1, std::min({num_scavenge_tasks, kMaxScavengerTasks, num_cores}));
......
......@@ -910,6 +910,16 @@ int FixedArrayLenFromSize(int size) {
FixedArray::kMaxRegularLength});
}
int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) {
Address top = space->top();
if ((top & kPageAlignmentMask) == 0) {
// `top` points to the start of a page signifies that there is not room in
// the current page.
return 0;
}
return static_cast<int>(Page::FromAddress(space->top())->area_end() - top);
}
void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
DCHECK(!FLAG_single_generation);
PauseAllocationObserversScope pause_observers(heap);
......@@ -919,8 +929,7 @@ void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
// the current allocation pointer.
DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining =
static_cast<int>(space->to_space().page_high() - space->top());
int space_remaining = GetSpaceRemainingOnCurrentPage(space);
while (space_remaining > 0) {
int length = FixedArrayLenFromSize(space_remaining);
if (length > 0) {
......
......@@ -5,6 +5,7 @@
#include "test/cctest/heap/heap-utils.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
......@@ -134,6 +135,18 @@ bool FillCurrentPage(v8::internal::NewSpace* space,
return heap::FillCurrentPageButNBytes(space, 0, out_handles);
}
namespace {
int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) {
Address top = space->top();
if ((top & kPageAlignmentMask) == 0) {
// `top` points to the start of a page signifies that there is not room in
// the current page.
return 0;
}
return static_cast<int>(Page::FromAddress(space->top())->area_end() - top);
}
} // namespace
bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
std::vector<Handle<FixedArray>>* out_handles) {
PauseAllocationObserversScope pause_observers(space->heap());
......@@ -142,8 +155,7 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
// the current allocation pointer.
DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top());
int space_remaining =
static_cast<int>(space->to_space().page_high() - space->top());
int space_remaining = GetSpaceRemainingOnCurrentPage(space);
CHECK(space_remaining >= extra_bytes);
int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return false;
......
......@@ -78,7 +78,8 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
CHECK_GT(handles.size(), 0u);
Page* const to_be_promoted_page = FindLastPageInNewSpace(handles);
CHECK_NOT_NULL(to_be_promoted_page);
CHECK(!to_be_promoted_page->Contains(heap->new_space()->age_mark()));
CHECK(!to_be_promoted_page->Contains(
SemiSpaceNewSpace::From(heap->new_space())->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
......
......@@ -289,7 +289,7 @@ TEST(ComputeDiscardMemoryAreas) {
CHECK_EQ(memory_area.size(), page_size * 2);
}
TEST(NewSpace) {
TEST(SemiSpaceNewSpace) {
if (FLAG_single_generation) return;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
......@@ -297,10 +297,11 @@ TEST(NewSpace) {
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
LinearAllocationArea allocation_info;
std::unique_ptr<NewSpace> new_space = std::make_unique<NewSpace>(
heap, memory_allocator->data_page_allocator(),
CcTest::heap()->InitialSemiSpaceSize(),
CcTest::heap()->InitialSemiSpaceSize(), &allocation_info);
std::unique_ptr<SemiSpaceNewSpace> new_space =
std::make_unique<SemiSpaceNewSpace>(
heap, memory_allocator->data_page_allocator(),
CcTest::heap()->InitialSemiSpaceSize(),
CcTest::heap()->InitialSemiSpaceSize(), &allocation_info);
CHECK(new_space->MaximumCapacity());
while (new_space->Available() >= kMaxRegularHeapObjectSize) {
......@@ -313,7 +314,6 @@ TEST(NewSpace) {
memory_allocator->unmapper()->EnsureUnmappingCompleted();
}
TEST(OldSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment