Commit b415cd7c authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Extend and rename NewSpace base class

NewSpace is renamed to SemiSpaceNewSpace and NewSpaceBase is renamed to
NewSpace (the new PagedSpace new space implementation will be named
PagedNewSpace).

Most usecases are updated to use the base class rather than the concrete
semi space based implementation. To that end, the base class is extended
with additional virtual methods (for delegating to the concrete class).

This CL follows these guidelines:
(*) If at a method callsite we should know the exact new space
implementation we use, we cast to the concrete class. This is the case
for example for callsites in scavenger.*.
(*) If a method is called from outside the heap implementation or should
be present regardless of the concrete implementation, that method is
made virtual.
(*) Other cases are usually methods that are specific to a concrete
implementation but the concrete implementation is not known at the
callsite and there's no clear way to nicely abstract the method. In such
cases we cast to the concrete SemiSpaceNewSpace implementation for now
and we will revisit these cases once PagedNewSpace exists.

Bug: v8:12612
Change-Id: I7b85626774ce0d785b0257bf8d32b9f50eeaf292
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3625975
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80482}
parent a6805703
...@@ -323,13 +323,6 @@ Heap* Heap::FromWritableHeapObject(HeapObject obj) { ...@@ -323,13 +323,6 @@ Heap* Heap::FromWritableHeapObject(HeapObject obj) {
return heap; return heap;
} }
bool Heap::ShouldBePromoted(Address old_address) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
}
void Heap::CopyBlock(Address dst, Address src, int byte_size) { void Heap::CopyBlock(Address dst, Address src, int byte_size) {
DCHECK(IsAligned(byte_size, kTaggedSize)); DCHECK(IsAligned(byte_size, kTaggedSize));
CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize)); CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
......
...@@ -39,35 +39,37 @@ void HeapLayoutTracer::GCEpiloguePrintHeapLayout(v8::Isolate* isolate, ...@@ -39,35 +39,37 @@ void HeapLayoutTracer::GCEpiloguePrintHeapLayout(v8::Isolate* isolate,
// static // static
void HeapLayoutTracer::PrintBasicMemoryChunk(std::ostream& os, void HeapLayoutTracer::PrintBasicMemoryChunk(std::ostream& os,
BasicMemoryChunk* chunk, const BasicMemoryChunk& chunk,
const char* owner_name) { const char* owner_name) {
os << "{owner:" << owner_name << "," os << "{owner:" << owner_name << ","
<< "address:" << chunk << "," << "address:" << &chunk << ","
<< "size:" << chunk->size() << "," << "size:" << chunk.size() << ","
<< "allocated_bytes:" << chunk->allocated_bytes() << "," << "allocated_bytes:" << chunk.allocated_bytes() << ","
<< "wasted_memory:" << chunk->wasted_memory() << "}" << std::endl; << "wasted_memory:" << chunk.wasted_memory() << "}" << std::endl;
} }
// static // static
void HeapLayoutTracer::PrintHeapLayout(std::ostream& os, Heap* heap) { void HeapLayoutTracer::PrintHeapLayout(std::ostream& os, Heap* heap) {
for (PageIterator it = heap->new_space()->to_space().begin(); const SemiSpaceNewSpace* semi_space_new_space =
it != heap->new_space()->to_space().end(); ++it) { SemiSpaceNewSpace::From(heap->new_space());
PrintBasicMemoryChunk(os, *it, "to_space"); for (ConstPageIterator it = semi_space_new_space->to_space().begin();
it != semi_space_new_space->to_space().end(); ++it) {
PrintBasicMemoryChunk(os, **it, "to_space");
} }
for (PageIterator it = heap->new_space()->from_space().begin(); for (ConstPageIterator it = semi_space_new_space->from_space().begin();
it != heap->new_space()->from_space().end(); ++it) { it != semi_space_new_space->from_space().end(); ++it) {
PrintBasicMemoryChunk(os, *it, "from_space"); PrintBasicMemoryChunk(os, **it, "from_space");
} }
OldGenerationMemoryChunkIterator it(heap); OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk; MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) { while ((chunk = it.next()) != nullptr) {
PrintBasicMemoryChunk(os, chunk, chunk->owner()->name()); PrintBasicMemoryChunk(os, *chunk, chunk->owner()->name());
} }
for (ReadOnlyPage* page : heap->read_only_space()->pages()) { for (ReadOnlyPage* page : heap->read_only_space()->pages()) {
PrintBasicMemoryChunk(os, page, "ro_space"); PrintBasicMemoryChunk(os, *page, "ro_space");
} }
} }
} // namespace internal } // namespace internal
......
...@@ -24,7 +24,8 @@ class HeapLayoutTracer : AllStatic { ...@@ -24,7 +24,8 @@ class HeapLayoutTracer : AllStatic {
v8::GCCallbackFlags flags, void* data); v8::GCCallbackFlags flags, void* data);
private: private:
static void PrintBasicMemoryChunk(std::ostream& os, BasicMemoryChunk* chunk, static void PrintBasicMemoryChunk(std::ostream& os,
const BasicMemoryChunk& chunk,
const char* owner_name); const char* owner_name);
static void PrintHeapLayout(std::ostream& os, Heap* heap); static void PrintHeapLayout(std::ostream& os, Heap* heap);
}; };
......
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include "src/heap/gc-idle-time-handler.h" #include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer-inl.h" #include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h" #include "src/heap/gc-tracer.h"
#include "src/heap/heap-allocator.h"
#include "src/heap/heap-controller.h" #include "src/heap/heap-controller.h"
#include "src/heap/heap-layout-tracer.h" #include "src/heap/heap-layout-tracer.h"
#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/heap-write-barrier-inl.h"
...@@ -1015,7 +1016,10 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) { ...@@ -1015,7 +1016,10 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
deferred_counters_[feature]++; deferred_counters_[feature]++;
} }
bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); } void Heap::UncommitFromSpace() {
DCHECK_NOT_NULL(new_space_);
SemiSpaceNewSpace::From(new_space_)->UncommitFromSpace();
}
void Heap::GarbageCollectionPrologue( void Heap::GarbageCollectionPrologue(
GarbageCollectionReason gc_reason, GarbageCollectionReason gc_reason,
...@@ -2159,11 +2163,7 @@ void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot, ...@@ -2159,11 +2163,7 @@ void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot,
void Heap::EnsureFromSpaceIsCommitted() { void Heap::EnsureFromSpaceIsCommitted() {
if (!new_space_) return; if (!new_space_) return;
if (new_space_->CommitFromSpaceIfNeeded()) return; SemiSpaceNewSpace::From(new_space_)->CommitFromSpaceIfNeeded();
// Committing memory to from space failed.
// Memory is exhausted and we will die.
FatalProcessOutOfMemory("Committing semi space failed.");
} }
bool Heap::CollectionRequested() { bool Heap::CollectionRequested() {
...@@ -2699,22 +2699,25 @@ void Heap::EvacuateYoungGeneration() { ...@@ -2699,22 +2699,25 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanPromoteYoungAndExpandOldGeneration(0)); DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
} }
SemiSpaceNewSpace* semi_space_new_space =
SemiSpaceNewSpace::From(new_space());
// Move pages from new->old generation. // Move pages from new->old generation.
PageRange range(new_space()->first_allocatable_address(), new_space()->top()); PageRange range(semi_space_new_space->first_allocatable_address(),
semi_space_new_space->top());
for (auto it = range.begin(); it != range.end();) { for (auto it = range.begin(); it != range.end();) {
Page* p = (*++it)->prev_page(); Page* p = (*++it)->prev_page();
new_space()->from_space().RemovePage(p); semi_space_new_space->from_space().RemovePage(p);
Page::ConvertNewToOld(p); Page::ConvertNewToOld(p);
if (incremental_marking()->IsMarking()) if (incremental_marking()->IsMarking())
mark_compact_collector()->RecordLiveSlotsOnPage(p); mark_compact_collector()->RecordLiveSlotsOnPage(p);
} }
// Reset new space. // Reset new space.
if (!new_space()->Rebalance()) { if (!semi_space_new_space->Rebalance()) {
FatalProcessOutOfMemory("NewSpace::Rebalance"); FatalProcessOutOfMemory("NewSpace::Rebalance");
} }
new_space()->ResetLinearAllocationArea(); semi_space_new_space->ResetLinearAllocationArea();
new_space()->set_age_mark(new_space()->top()); semi_space_new_space->set_age_mark(semi_space_new_space->top());
for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) { for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
LargePage* page = *it; LargePage* page = *it;
...@@ -2783,8 +2786,12 @@ void Heap::Scavenge() { ...@@ -2783,8 +2786,12 @@ void Heap::Scavenge() {
// Flip the semispaces. After flipping, to space is empty, from space has // Flip the semispaces. After flipping, to space is empty, from space has
// live objects. // live objects.
new_space()->Flip(); {
new_space()->ResetLinearAllocationArea(); SemiSpaceNewSpace* semi_space_new_space =
SemiSpaceNewSpace::From(new_space());
semi_space_new_space->Flip();
semi_space_new_space->ResetLinearAllocationArea();
}
// We also flip the young generation large object space. All large objects // We also flip the young generation large object space. All large objects
// will be in the from space. // will be in the from space.
...@@ -3232,7 +3239,7 @@ void* Heap::AllocateExternalBackingStore( ...@@ -3232,7 +3239,7 @@ void* Heap::AllocateExternalBackingStore(
const std::function<void*(size_t)>& allocate, size_t byte_length) { const std::function<void*(size_t)>& allocate, size_t byte_length) {
if (!always_allocate() && new_space()) { if (!always_allocate() && new_space()) {
size_t new_space_backing_store_bytes = size_t new_space_backing_store_bytes =
new_space()->ExternalBackingStoreBytes(); new_space()->ExternalBackingStoreOverallBytes();
if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize && if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
new_space_backing_store_bytes >= byte_length) { new_space_backing_store_bytes >= byte_length) {
// Performing a young generation GC amortizes over the allocated backing // Performing a young generation GC amortizes over the allocated backing
...@@ -4853,8 +4860,11 @@ void Heap::VerifyCommittedPhysicalMemory() { ...@@ -4853,8 +4860,11 @@ void Heap::VerifyCommittedPhysicalMemory() {
#endif // DEBUG #endif // DEBUG
void Heap::ZapFromSpace() { void Heap::ZapFromSpace() {
if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return; if (!new_space_) return;
for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) { SemiSpaceNewSpace* semi_space_new_space = SemiSpaceNewSpace::From(new_space_);
if (!semi_space_new_space->IsFromSpaceCommitted()) return;
for (Page* page :
PageRange(semi_space_new_space->from_space().first_page(), nullptr)) {
memory_allocator()->ZapBlock(page->area_start(), memory_allocator()->ZapBlock(page->area_start(),
page->HighWaterMark() - page->area_start(), page->HighWaterMark() - page->area_start(),
ZapValue()); ZapValue());
...@@ -5831,7 +5841,7 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info, ...@@ -5831,7 +5841,7 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
DCHECK_NOT_NULL(read_only_space_); DCHECK_NOT_NULL(read_only_space_);
const bool has_young_gen = !FLAG_single_generation && !IsShared(); const bool has_young_gen = !FLAG_single_generation && !IsShared();
if (has_young_gen) { if (has_young_gen) {
space_[NEW_SPACE] = new_space_ = new NewSpace( space_[NEW_SPACE] = new_space_ = new SemiSpaceNewSpace(
this, memory_allocator_->data_page_allocator(), initial_semispace_size_, this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
max_semi_space_size_, new_allocation_info); max_semi_space_size_, new_allocation_info);
space_[NEW_LO_SPACE] = new_lo_space_ = space_[NEW_LO_SPACE] = new_lo_space_ =
......
...@@ -737,10 +737,6 @@ class Heap { ...@@ -737,10 +737,6 @@ class Heap {
void VisitExternalResources(v8::ExternalResourceVisitor* visitor); void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
// An object should be promoted if the object has survived a
// scavenge operation.
inline bool ShouldBePromoted(Address old_address);
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature); void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
inline int NextScriptId(); inline int NextScriptId();
...@@ -1818,7 +1814,7 @@ class Heap { ...@@ -1818,7 +1814,7 @@ class Heap {
void EnsureFromSpaceIsCommitted(); void EnsureFromSpaceIsCommitted();
// Uncommit unused semi space. // Uncommit unused semi space.
V8_EXPORT_PRIVATE bool UncommitFromSpace(); V8_EXPORT_PRIVATE void UncommitFromSpace();
// Fill in bogus values in from space // Fill in bogus values in from space
void ZapFromSpace(); void ZapFromSpace();
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
#include "src/heap/memory-measurement-inl.h" #include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h" #include "src/heap/memory-measurement.h"
#include "src/heap/new-spaces.h"
#include "src/heap/object-stats.h" #include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h" #include "src/heap/objects-visiting-inl.h"
#include "src/heap/parallel-work-item.h" #include "src/heap/parallel-work-item.h"
...@@ -1860,7 +1861,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1860,7 +1861,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
return true; return true;
} }
if (heap_->ShouldBePromoted(object.address()) && if (heap_->new_space()->ShouldBePromoted(object.address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) { TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
promoted_size_ += size; promoted_size_ += size;
return true; return true;
...@@ -1948,11 +1949,14 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor { ...@@ -1948,11 +1949,14 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
static void Move(Page* page) { static void Move(Page* page) {
switch (mode) { switch (mode) {
case NEW_TO_NEW: case NEW_TO_NEW:
page->heap()->new_space()->MovePageFromSpaceToSpace(page); SemiSpaceNewSpace::From(page->heap()->new_space())
->MovePageFromSpaceToSpace(page);
page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION); page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
break; break;
case NEW_TO_OLD: { case NEW_TO_OLD: {
page->heap()->new_space()->from_space().RemovePage(page); SemiSpaceNewSpace::From(page->heap()->new_space())
->from_space()
.RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page); Page* new_page = Page::ConvertNewToOld(page);
DCHECK(!new_page->InYoungGeneration()); DCHECK(!new_page->InYoungGeneration());
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
...@@ -3616,7 +3620,7 @@ void MarkCompactCollector::EvacuatePrologue() { ...@@ -3616,7 +3620,7 @@ void MarkCompactCollector::EvacuatePrologue() {
PageRange(new_space->first_allocatable_address(), new_space->top())) { PageRange(new_space->first_allocatable_address(), new_space->top())) {
new_space_evacuation_pages_.push_back(p); new_space_evacuation_pages_.push_back(p);
} }
new_space->Flip(); SemiSpaceNewSpace::From(new_space)->Flip();
new_space->ResetLinearAllocationArea(); new_space->ResetLinearAllocationArea();
DCHECK_EQ(new_space->Size(), 0); DCHECK_EQ(new_space->Size(), 0);
...@@ -3640,7 +3644,8 @@ void MarkCompactCollector::EvacuateEpilogue() { ...@@ -3640,7 +3644,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
// New space. // New space.
if (heap()->new_space()) { if (heap()->new_space()) {
heap()->new_space()->set_age_mark(heap()->new_space()->top()); SemiSpaceNewSpace::From(heap()->new_space())
->set_age_mark(heap()->new_space()->top());
DCHECK_EQ(0, heap()->new_space()->Size()); DCHECK_EQ(0, heap()->new_space()->Size());
} }
...@@ -3814,7 +3819,8 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) { ...@@ -3814,7 +3819,8 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE), chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
chunk->Contains(heap()->new_space()->age_mark()), chunk->Contains(
SemiSpaceNewSpace::From(heap()->new_space())->age_mark()),
saved_live_bytes, evacuation_time, saved_live_bytes, evacuation_time,
chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
} }
...@@ -4030,7 +4036,8 @@ bool ShouldMovePage(Page* p, intptr_t live_bytes, ...@@ -4030,7 +4036,8 @@ bool ShouldMovePage(Page* p, intptr_t live_bytes,
AlwaysPromoteYoung always_promote_young) { AlwaysPromoteYoung always_promote_young) {
Heap* heap = p->heap(); Heap* heap = p->heap();
const bool reduce_memory = heap->ShouldReduceMemory(); const bool reduce_memory = heap->ShouldReduceMemory();
const Address age_mark = heap->new_space()->age_mark(); const Address age_mark =
SemiSpaceNewSpace::From(heap->new_space())->age_mark();
return !reduce_memory && !p->NeverEvacuate() && return !reduce_memory && !p->NeverEvacuate() &&
(live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) && (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
(always_promote_young == AlwaysPromoteYoung::kYes || (always_promote_young == AlwaysPromoteYoung::kYes ||
...@@ -4270,7 +4277,7 @@ void MarkCompactCollector::Evacuate() { ...@@ -4270,7 +4277,7 @@ void MarkCompactCollector::Evacuate() {
if (heap()->new_space()) { if (heap()->new_space()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) { if (!SemiSpaceNewSpace::From(heap()->new_space())->Rebalance()) {
heap()->FatalProcessOutOfMemory("NewSpace::Rebalance"); heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
} }
} }
...@@ -5543,8 +5550,10 @@ void MinorMarkCompactCollector::CollectGarbage() { ...@@ -5543,8 +5550,10 @@ void MinorMarkCompactCollector::CollectGarbage() {
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS); TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
for (Page* p : for (Page* p : PageRange(SemiSpaceNewSpace::From(heap()->new_space())
PageRange(heap()->new_space()->from_space().first_page(), nullptr)) { ->from_space()
.first_page(),
nullptr)) {
DCHECK_EQ(promoted_pages_.end(), DCHECK_EQ(promoted_pages_.end(),
std::find(promoted_pages_.begin(), promoted_pages_.end(), p)); std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
non_atomic_marking_state()->ClearLiveness(p); non_atomic_marking_state()->ClearLiveness(p);
...@@ -5703,7 +5712,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() { ...@@ -5703,7 +5712,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
new_space_evacuation_pages_.push_back(p); new_space_evacuation_pages_.push_back(p);
} }
new_space->Flip(); SemiSpaceNewSpace::From(new_space)->Flip();
new_space->ResetLinearAllocationArea(); new_space->ResetLinearAllocationArea();
heap()->new_lo_space()->Flip(); heap()->new_lo_space()->Flip();
...@@ -5711,7 +5720,8 @@ void MinorMarkCompactCollector::EvacuatePrologue() { ...@@ -5711,7 +5720,8 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
} }
void MinorMarkCompactCollector::EvacuateEpilogue() { void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top()); SemiSpaceNewSpace::From(heap()->new_space())
->set_age_mark(heap()->new_space()->top());
// Give pages that are queued to be freed back to the OS. // Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
} }
...@@ -6096,7 +6106,7 @@ void MinorMarkCompactCollector::Evacuate() { ...@@ -6096,7 +6106,7 @@ void MinorMarkCompactCollector::Evacuate() {
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE); TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) { if (!SemiSpaceNewSpace::From(heap()->new_space())->Rebalance()) {
heap()->FatalProcessOutOfMemory("NewSpace::Rebalance"); heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
} }
} }
......
...@@ -38,35 +38,28 @@ bool SemiSpace::ContainsSlow(Address a) const { ...@@ -38,35 +38,28 @@ bool SemiSpace::ContainsSlow(Address a) const {
} }
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// NewSpaceBase // NewSpace
bool NewSpaceBase::Contains(Object o) const { bool NewSpace::Contains(Object o) const {
return o.IsHeapObject() && Contains(HeapObject::cast(o)); return o.IsHeapObject() && Contains(HeapObject::cast(o));
} }
bool NewSpaceBase::Contains(HeapObject o) const { bool NewSpace::Contains(HeapObject o) const {
return BasicMemoryChunk::FromHeapObject(o)->InNewSpace(); return BasicMemoryChunk::FromHeapObject(o)->InNewSpace();
} }
V8_WARN_UNUSED_RESULT inline AllocationResult V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
NewSpaceBase::AllocateRawSynchronized(int size_in_bytes, int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationAlignment alignment,
AllocationOrigin origin) {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment, origin); return AllocateRaw(size_in_bytes, alignment, origin);
} }
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// NewSpace // SemiSpaceNewSpace
bool NewSpace::ContainsSlow(Address a) const {
return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
}
V8_INLINE bool NewSpace::EnsureAllocation(int size_in_bytes, V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
AllocationAlignment alignment, int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin,
AllocationOrigin origin, int* out_max_aligned_size) {
int* out_max_aligned_size) {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG #if DEBUG
VerifyTop(); VerifyTop();
......
This diff is collapsed.
This diff is collapsed.
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/heap/evacuation-allocator-inl.h" #include "src/heap/evacuation-allocator-inl.h"
#include "src/heap/incremental-marking-inl.h" #include "src/heap/incremental-marking-inl.h"
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
#include "src/heap/new-spaces.h"
#include "src/heap/scavenger.h" #include "src/heap/scavenger.h"
#include "src/objects/map.h" #include "src/objects/map.h"
#include "src/objects/objects-inl.h" #include "src/objects/objects-inl.h"
...@@ -245,7 +246,8 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault( ...@@ -245,7 +246,8 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(
SLOW_DCHECK(static_cast<size_t>(object_size) <= SLOW_DCHECK(static_cast<size_t>(object_size) <=
MemoryChunkLayout::AllocatableMemoryInDataPage()); MemoryChunkLayout::AllocatableMemoryInDataPage());
if (!heap()->ShouldBePromoted(object.address())) { if (!SemiSpaceNewSpace::From(heap()->new_space())
->ShouldBePromoted(object.address())) {
// A semi-space copy may fail due to fragmentation. In that case, we // A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object. // try to promote the object.
result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields); result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
......
...@@ -404,11 +404,14 @@ void ScavengerCollector::CollectGarbage() { ...@@ -404,11 +404,14 @@ void ScavengerCollector::CollectGarbage() {
} }
} }
SemiSpaceNewSpace* semi_space_new_space =
SemiSpaceNewSpace::From(heap_->new_space());
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are // Ensure that concurrent marker does not track pages that are
// going to be unmapped. // going to be unmapped.
for (Page* p : for (Page* p :
PageRange(heap_->new_space()->from_space().first_page(), nullptr)) { PageRange(semi_space_new_space->from_space().first_page(), nullptr)) {
heap_->concurrent_marking()->ClearMemoryChunkData(p); heap_->concurrent_marking()->ClearMemoryChunkData(p);
} }
} }
...@@ -416,7 +419,7 @@ void ScavengerCollector::CollectGarbage() { ...@@ -416,7 +419,7 @@ void ScavengerCollector::CollectGarbage() {
ProcessWeakReferences(&ephemeron_table_list); ProcessWeakReferences(&ephemeron_table_list);
// Set age mark. // Set age mark.
heap_->new_space_->set_age_mark(heap_->new_space()->top()); semi_space_new_space->set_age_mark(semi_space_new_space->top());
// Since we promote all surviving large objects immediatelly, all remaining // Since we promote all surviving large objects immediatelly, all remaining
// large objects must be dead. // large objects must be dead.
...@@ -532,7 +535,10 @@ void ScavengerCollector::MergeSurvivingNewLargeObjects( ...@@ -532,7 +535,10 @@ void ScavengerCollector::MergeSurvivingNewLargeObjects(
int ScavengerCollector::NumberOfScavengeTasks() { int ScavengerCollector::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1; if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks = const int num_scavenge_tasks =
static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1; static_cast<int>(
SemiSpaceNewSpace::From(heap_->new_space())->TotalCapacity()) /
MB +
1;
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1; static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
int tasks = std::max( int tasks = std::max(
1, std::min({num_scavenge_tasks, kMaxScavengerTasks, num_cores})); 1, std::min({num_scavenge_tasks, kMaxScavengerTasks, num_cores}));
......
...@@ -910,6 +910,16 @@ int FixedArrayLenFromSize(int size) { ...@@ -910,6 +910,16 @@ int FixedArrayLenFromSize(int size) {
FixedArray::kMaxRegularLength}); FixedArray::kMaxRegularLength});
} }
int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) {
Address top = space->top();
if ((top & kPageAlignmentMask) == 0) {
// `top` points to the start of a page signifies that there is not room in
// the current page.
return 0;
}
return static_cast<int>(Page::FromAddress(space->top())->area_end() - top);
}
void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) { void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
DCHECK(!FLAG_single_generation); DCHECK(!FLAG_single_generation);
PauseAllocationObserversScope pause_observers(heap); PauseAllocationObserversScope pause_observers(heap);
...@@ -919,8 +929,7 @@ void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) { ...@@ -919,8 +929,7 @@ void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
// the current allocation pointer. // the current allocation pointer.
DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(), DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top()); space->limit() == space->top());
int space_remaining = int space_remaining = GetSpaceRemainingOnCurrentPage(space);
static_cast<int>(space->to_space().page_high() - space->top());
while (space_remaining > 0) { while (space_remaining > 0) {
int length = FixedArrayLenFromSize(space_remaining); int length = FixedArrayLenFromSize(space_remaining);
if (length > 0) { if (length > 0) {
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "test/cctest/heap/heap-utils.h" #include "test/cctest/heap/heap-utils.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/heap/factory.h" #include "src/heap/factory.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
...@@ -134,6 +135,18 @@ bool FillCurrentPage(v8::internal::NewSpace* space, ...@@ -134,6 +135,18 @@ bool FillCurrentPage(v8::internal::NewSpace* space,
return heap::FillCurrentPageButNBytes(space, 0, out_handles); return heap::FillCurrentPageButNBytes(space, 0, out_handles);
} }
namespace {
int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) {
Address top = space->top();
if ((top & kPageAlignmentMask) == 0) {
// `top` points to the start of a page signifies that there is not room in
// the current page.
return 0;
}
return static_cast<int>(Page::FromAddress(space->top())->area_end() - top);
}
} // namespace
bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes, bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
std::vector<Handle<FixedArray>>* out_handles) { std::vector<Handle<FixedArray>>* out_handles) {
PauseAllocationObserversScope pause_observers(space->heap()); PauseAllocationObserversScope pause_observers(space->heap());
...@@ -142,8 +155,7 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes, ...@@ -142,8 +155,7 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
// the current allocation pointer. // the current allocation pointer.
DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(), DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(),
space->limit() == space->top()); space->limit() == space->top());
int space_remaining = int space_remaining = GetSpaceRemainingOnCurrentPage(space);
static_cast<int>(space->to_space().page_high() - space->top());
CHECK(space_remaining >= extra_bytes); CHECK(space_remaining >= extra_bytes);
int new_linear_size = space_remaining - extra_bytes; int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return false; if (new_linear_size == 0) return false;
......
...@@ -78,7 +78,8 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) { ...@@ -78,7 +78,8 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
CHECK_GT(handles.size(), 0u); CHECK_GT(handles.size(), 0u);
Page* const to_be_promoted_page = FindLastPageInNewSpace(handles); Page* const to_be_promoted_page = FindLastPageInNewSpace(handles);
CHECK_NOT_NULL(to_be_promoted_page); CHECK_NOT_NULL(to_be_promoted_page);
CHECK(!to_be_promoted_page->Contains(heap->new_space()->age_mark())); CHECK(!to_be_promoted_page->Contains(
SemiSpaceNewSpace::From(heap->new_space())->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap. // To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true); heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion. // Sanity check that the page meets the requirements for promotion.
......
...@@ -289,7 +289,7 @@ TEST(ComputeDiscardMemoryAreas) { ...@@ -289,7 +289,7 @@ TEST(ComputeDiscardMemoryAreas) {
CHECK_EQ(memory_area.size(), page_size * 2); CHECK_EQ(memory_area.size(), page_size * 2);
} }
TEST(NewSpace) { TEST(SemiSpaceNewSpace) {
if (FLAG_single_generation) return; if (FLAG_single_generation) return;
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
...@@ -297,10 +297,11 @@ TEST(NewSpace) { ...@@ -297,10 +297,11 @@ TEST(NewSpace) {
MemoryAllocator* memory_allocator = test_allocator_scope.allocator(); MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
LinearAllocationArea allocation_info; LinearAllocationArea allocation_info;
std::unique_ptr<NewSpace> new_space = std::make_unique<NewSpace>( std::unique_ptr<SemiSpaceNewSpace> new_space =
heap, memory_allocator->data_page_allocator(), std::make_unique<SemiSpaceNewSpace>(
CcTest::heap()->InitialSemiSpaceSize(), heap, memory_allocator->data_page_allocator(),
CcTest::heap()->InitialSemiSpaceSize(), &allocation_info); CcTest::heap()->InitialSemiSpaceSize(),
CcTest::heap()->InitialSemiSpaceSize(), &allocation_info);
CHECK(new_space->MaximumCapacity()); CHECK(new_space->MaximumCapacity());
while (new_space->Available() >= kMaxRegularHeapObjectSize) { while (new_space->Available() >= kMaxRegularHeapObjectSize) {
...@@ -313,7 +314,6 @@ TEST(NewSpace) { ...@@ -313,7 +314,6 @@ TEST(NewSpace) {
memory_allocator->unmapper()->EnsureUnmappingCompleted(); memory_allocator->unmapper()->EnsureUnmappingCompleted();
} }
TEST(OldSpace) { TEST(OldSpace) {
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment