Commit a02eac53 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Limit the capacity of the young large object space

For the initial implementation we simply keep the capacity of
the young large object space in sync with the capacity of the
new space. The only subtlety is that we allow at least one
large object independent from its size. So it may exceed the
capacity of the space.

This also fixes setting of the large page flags for incremental
marking.

Bug: chromium:852420
Change-Id: I12a9d4a7350464ca291710917ecad782ae73b8e1
Reviewed-on: https://chromium-review.googlesource.com/c/1456092
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59422}
parent 2f2f0724
......@@ -1176,6 +1176,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
new_lo_space_->SetCapacity(new_space_->Capacity());
UncommitFromSpace();
EagerlyFreeExternalMemory();
......@@ -1945,6 +1946,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
new_space_->Grow();
survived_since_last_expansion_ = 0;
}
new_lo_space()->SetCapacity(new_space()->Capacity());
}
void Heap::EvacuateYoungGeneration() {
......@@ -2924,6 +2926,7 @@ void Heap::ReduceNewSpaceSize() {
((allocation_throughput != 0) &&
(allocation_throughput < kLowAllocationThroughput))) {
new_space_->Shrink();
new_lo_space_->SetCapacity(new_space_->Capacity());
UncommitFromSpace();
}
}
......@@ -4466,7 +4469,8 @@ void Heap::SetUp() {
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
space_[NEW_LO_SPACE] = new_lo_space_ = new NewLargeObjectSpace(this);
space_[NEW_LO_SPACE] = new_lo_space_ =
new NewLargeObjectSpace(this, new_space_->Capacity());
space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
......
......@@ -2025,6 +2025,7 @@ class Heap {
friend class MarkCompactCollector;
friend class MarkCompactCollectorBase;
friend class MinorMarkCompactCollector;
friend class NewLargeObjectSpace;
friend class NewSpace;
friend class ObjectStatsCollector;
friend class Page;
......
......@@ -209,6 +209,11 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) {
p->SetYoungGenerationPageFlags(false);
DCHECK(p->IsLargePage());
}
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(false);
}
......@@ -239,6 +244,11 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) {
p->SetYoungGenerationPageFlags(true);
DCHECK(p->IsLargePage());
}
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(true);
}
......
......@@ -326,7 +326,7 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObjectReference::Update(slot, target);
object->map_slot().Release_Store(
MapWord::FromForwardingAddress(target).ToMap());
return Heap::InToPage(target) ? KEEP_SLOT : REMOVE_SLOT;
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
SlotCallbackResult result =
......
......@@ -245,6 +245,8 @@ void ScavengerCollector::CollectGarbage() {
// Finalize parallel scavenging.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
DCHECK(surviving_new_large_objects_.empty());
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i]->Finalize();
delete scavengers[i];
......
......@@ -3653,7 +3653,8 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
object->IsFreeSpace() || object->IsFeedbackMetadata() ||
object->IsContext() ||
object->IsUncompiledDataWithoutPreparseData() ||
object->IsPreparseData())) {
object->IsPreparseData()) &&
!FLAG_young_generation_large_objects) {
FATAL("Found invalid Object (instance_type=%i) in large object space.",
object->map()->instance_type());
}
......@@ -3736,25 +3737,40 @@ void Page::Print() {
#endif // DEBUG
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, NEW_LO_SPACE), pending_object_(0) {}
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
: LargeObjectSpace(heap, NEW_LO_SPACE),
pending_object_(0),
capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// TODO(hpayer): Add heap growing strategy here.
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
return AllocationResult::Retry(identity());
}
// Allocation for the first object must succeed independent from the capacity.
if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
// The size of the first object may exceed the capacity.
capacity_ = Max(capacity_, SizeOfObjects());
HeapObject result = page->GetObject();
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE);
pending_object_.store(result->address(), std::memory_order_relaxed);
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
return result;
}
size_t NewLargeObjectSpace::Available() {
// TODO(hpayer): Update as soon as we have a growing strategy.
return 0;
}
size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
void NewLargeObjectSpace::Flip() {
for (LargePage* chunk = first_page(); chunk != nullptr;
......@@ -3778,6 +3794,10 @@ void NewLargeObjectSpace::FreeAllObjects() {
objects_size_ = 0;
}
void NewLargeObjectSpace::SetCapacity(size_t capacity) {
capacity_ = Max(capacity, SizeOfObjects());
}
CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, CODE_LO_SPACE),
chunk_map_(kInitialChunkMapCapacity) {}
......
......@@ -3080,7 +3080,7 @@ class LargeObjectSpace : public Space {
class NewLargeObjectSpace : public LargeObjectSpace {
public:
explicit NewLargeObjectSpace(Heap* heap);
NewLargeObjectSpace(Heap* heap, size_t capacity);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
......@@ -3091,6 +3091,8 @@ class NewLargeObjectSpace : public LargeObjectSpace {
void FreeAllObjects();
void SetCapacity(size_t capacity);
// The last allocated object that is not guaranteed to be initialized when
// the concurrent marker visits it.
Address pending_object() {
......@@ -3101,6 +3103,7 @@ class NewLargeObjectSpace : public LargeObjectSpace {
private:
std::atomic<Address> pending_object_;
size_t capacity_;
};
class CodeLargeObjectSpace : public LargeObjectSpace {
......
......@@ -19017,16 +19017,9 @@ TEST(GetHeapSpaceStatistics) {
v8::HeapSpaceStatistics space_statistics;
isolate->GetHeapSpaceStatistics(&space_statistics, i);
CHECK_NOT_NULL(space_statistics.space_name());
if (strcmp(space_statistics.space_name(), "new_large_object_space") == 0 ||
strcmp(space_statistics.space_name(), "code_large_object_space") == 0) {
continue;
}
CHECK_GT(space_statistics.space_size(), 0u);
total_size += space_statistics.space_size();
CHECK_GT(space_statistics.space_used_size(), 0u);
total_used_size += space_statistics.space_used_size();
total_available_size += space_statistics.space_available_size();
CHECK_GT(space_statistics.physical_space_size(), 0u);
total_physical_size += space_statistics.physical_space_size();
}
total_available_size += CcTest::heap()->memory_allocator()->Available();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment