Commit a02eac53 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Limit the capacity of the young large object space

For the initial implementation we simply keep the capacity of
the young large object space in sync with the capacity of the
new space. The only subtlety is that we allow at least one
large object independent from its size. So it may exceed the
capacity of the space.

This also fixes setting of the large page flags for incremental
marking.

Bug: chromium:852420
Change-Id: I12a9d4a7350464ca291710917ecad782ae73b8e1
Reviewed-on: https://chromium-review.googlesource.com/c/1456092
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59422}
parent 2f2f0724
...@@ -1176,6 +1176,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) { ...@@ -1176,6 +1176,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags); set_current_gc_flags(kNoGCFlags);
new_space_->Shrink(); new_space_->Shrink();
new_lo_space_->SetCapacity(new_space_->Capacity());
UncommitFromSpace(); UncommitFromSpace();
EagerlyFreeExternalMemory(); EagerlyFreeExternalMemory();
...@@ -1945,6 +1946,7 @@ void Heap::CheckNewSpaceExpansionCriteria() { ...@@ -1945,6 +1946,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
new_space_->Grow(); new_space_->Grow();
survived_since_last_expansion_ = 0; survived_since_last_expansion_ = 0;
} }
new_lo_space()->SetCapacity(new_space()->Capacity());
} }
void Heap::EvacuateYoungGeneration() { void Heap::EvacuateYoungGeneration() {
...@@ -2924,6 +2926,7 @@ void Heap::ReduceNewSpaceSize() { ...@@ -2924,6 +2926,7 @@ void Heap::ReduceNewSpaceSize() {
((allocation_throughput != 0) && ((allocation_throughput != 0) &&
(allocation_throughput < kLowAllocationThroughput))) { (allocation_throughput < kLowAllocationThroughput))) {
new_space_->Shrink(); new_space_->Shrink();
new_lo_space_->SetCapacity(new_space_->Capacity());
UncommitFromSpace(); UncommitFromSpace();
} }
} }
...@@ -4466,7 +4469,8 @@ void Heap::SetUp() { ...@@ -4466,7 +4469,8 @@ void Heap::SetUp() {
space_[CODE_SPACE] = code_space_ = new CodeSpace(this); space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this); space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this); space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
space_[NEW_LO_SPACE] = new_lo_space_ = new NewLargeObjectSpace(this); space_[NEW_LO_SPACE] = new_lo_space_ =
new NewLargeObjectSpace(this, new_space_->Capacity());
space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this); space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount); for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
......
...@@ -2025,6 +2025,7 @@ class Heap { ...@@ -2025,6 +2025,7 @@ class Heap {
friend class MarkCompactCollector; friend class MarkCompactCollector;
friend class MarkCompactCollectorBase; friend class MarkCompactCollectorBase;
friend class MinorMarkCompactCollector; friend class MinorMarkCompactCollector;
friend class NewLargeObjectSpace;
friend class NewSpace; friend class NewSpace;
friend class ObjectStatsCollector; friend class ObjectStatsCollector;
friend class Page; friend class Page;
......
...@@ -209,6 +209,11 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() { ...@@ -209,6 +209,11 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space()); DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) {
p->SetYoungGenerationPageFlags(false);
DCHECK(p->IsLargePage());
}
for (LargePage* p : *heap_->lo_space()) { for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(false); p->SetOldGenerationPageFlags(false);
} }
...@@ -239,6 +244,11 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() { ...@@ -239,6 +244,11 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->code_space()); ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space()); ActivateIncrementalWriteBarrier(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) {
p->SetYoungGenerationPageFlags(true);
DCHECK(p->IsLargePage());
}
for (LargePage* p : *heap_->lo_space()) { for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(true); p->SetOldGenerationPageFlags(true);
} }
......
...@@ -326,7 +326,7 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map, ...@@ -326,7 +326,7 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObjectReference::Update(slot, target); HeapObjectReference::Update(slot, target);
object->map_slot().Release_Store( object->map_slot().Release_Store(
MapWord::FromForwardingAddress(target).ToMap()); MapWord::FromForwardingAddress(target).ToMap());
return Heap::InToPage(target) ? KEEP_SLOT : REMOVE_SLOT; return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
} }
Map map = first_word.ToMap(); Map map = first_word.ToMap();
SlotCallbackResult result = SlotCallbackResult result =
......
...@@ -245,6 +245,8 @@ void ScavengerCollector::CollectGarbage() { ...@@ -245,6 +245,8 @@ void ScavengerCollector::CollectGarbage() {
// Finalize parallel scavenging. // Finalize parallel scavenging.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE); TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
DCHECK(surviving_new_large_objects_.empty());
for (int i = 0; i < num_scavenge_tasks; i++) { for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i]->Finalize(); scavengers[i]->Finalize();
delete scavengers[i]; delete scavengers[i];
......
...@@ -3653,7 +3653,8 @@ void LargeObjectSpace::Verify(Isolate* isolate) { ...@@ -3653,7 +3653,8 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
object->IsFreeSpace() || object->IsFeedbackMetadata() || object->IsFreeSpace() || object->IsFeedbackMetadata() ||
object->IsContext() || object->IsContext() ||
object->IsUncompiledDataWithoutPreparseData() || object->IsUncompiledDataWithoutPreparseData() ||
object->IsPreparseData())) { object->IsPreparseData()) &&
!FLAG_young_generation_large_objects) {
FATAL("Found invalid Object (instance_type=%i) in large object space.", FATAL("Found invalid Object (instance_type=%i) in large object space.",
object->map()->instance_type()); object->map()->instance_type());
} }
...@@ -3736,25 +3737,40 @@ void Page::Print() { ...@@ -3736,25 +3737,40 @@ void Page::Print() {
#endif // DEBUG #endif // DEBUG
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap) NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
: LargeObjectSpace(heap, NEW_LO_SPACE), pending_object_(0) {} : LargeObjectSpace(heap, NEW_LO_SPACE),
pending_object_(0),
capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) { AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// TODO(hpayer): Add heap growing strategy here. // Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
return AllocationResult::Retry(identity());
}
// Allocation for the first object must succeed independent from the capacity.
if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE); LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity()); if (page == nullptr) return AllocationResult::Retry(identity());
// The size of the first object may exceed the capacity.
capacity_ = Max(capacity_, SizeOfObjects());
HeapObject result = page->GetObject(); HeapObject result = page->GetObject();
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking()); page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE); page->SetFlag(MemoryChunk::TO_PAGE);
pending_object_.store(result->address(), std::memory_order_relaxed); pending_object_.store(result->address(), std::memory_order_relaxed);
page->InitializationMemoryFence(); page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
return result; return result;
} }
size_t NewLargeObjectSpace::Available() { size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
// TODO(hpayer): Update as soon as we have a growing strategy.
return 0;
}
void NewLargeObjectSpace::Flip() { void NewLargeObjectSpace::Flip() {
for (LargePage* chunk = first_page(); chunk != nullptr; for (LargePage* chunk = first_page(); chunk != nullptr;
...@@ -3778,6 +3794,10 @@ void NewLargeObjectSpace::FreeAllObjects() { ...@@ -3778,6 +3794,10 @@ void NewLargeObjectSpace::FreeAllObjects() {
objects_size_ = 0; objects_size_ = 0;
} }
void NewLargeObjectSpace::SetCapacity(size_t capacity) {
capacity_ = Max(capacity, SizeOfObjects());
}
CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap) CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, CODE_LO_SPACE), : LargeObjectSpace(heap, CODE_LO_SPACE),
chunk_map_(kInitialChunkMapCapacity) {} chunk_map_(kInitialChunkMapCapacity) {}
......
...@@ -3080,7 +3080,7 @@ class LargeObjectSpace : public Space { ...@@ -3080,7 +3080,7 @@ class LargeObjectSpace : public Space {
class NewLargeObjectSpace : public LargeObjectSpace { class NewLargeObjectSpace : public LargeObjectSpace {
public: public:
explicit NewLargeObjectSpace(Heap* heap); NewLargeObjectSpace(Heap* heap, size_t capacity);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size); V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
...@@ -3091,6 +3091,8 @@ class NewLargeObjectSpace : public LargeObjectSpace { ...@@ -3091,6 +3091,8 @@ class NewLargeObjectSpace : public LargeObjectSpace {
void FreeAllObjects(); void FreeAllObjects();
void SetCapacity(size_t capacity);
// The last allocated object that is not guaranteed to be initialized when // The last allocated object that is not guaranteed to be initialized when
// the concurrent marker visits it. // the concurrent marker visits it.
Address pending_object() { Address pending_object() {
...@@ -3101,6 +3103,7 @@ class NewLargeObjectSpace : public LargeObjectSpace { ...@@ -3101,6 +3103,7 @@ class NewLargeObjectSpace : public LargeObjectSpace {
private: private:
std::atomic<Address> pending_object_; std::atomic<Address> pending_object_;
size_t capacity_;
}; };
class CodeLargeObjectSpace : public LargeObjectSpace { class CodeLargeObjectSpace : public LargeObjectSpace {
......
...@@ -19017,16 +19017,9 @@ TEST(GetHeapSpaceStatistics) { ...@@ -19017,16 +19017,9 @@ TEST(GetHeapSpaceStatistics) {
v8::HeapSpaceStatistics space_statistics; v8::HeapSpaceStatistics space_statistics;
isolate->GetHeapSpaceStatistics(&space_statistics, i); isolate->GetHeapSpaceStatistics(&space_statistics, i);
CHECK_NOT_NULL(space_statistics.space_name()); CHECK_NOT_NULL(space_statistics.space_name());
if (strcmp(space_statistics.space_name(), "new_large_object_space") == 0 ||
strcmp(space_statistics.space_name(), "code_large_object_space") == 0) {
continue;
}
CHECK_GT(space_statistics.space_size(), 0u);
total_size += space_statistics.space_size(); total_size += space_statistics.space_size();
CHECK_GT(space_statistics.space_used_size(), 0u);
total_used_size += space_statistics.space_used_size(); total_used_size += space_statistics.space_used_size();
total_available_size += space_statistics.space_available_size(); total_available_size += space_statistics.space_available_size();
CHECK_GT(space_statistics.physical_space_size(), 0u);
total_physical_size += space_statistics.physical_space_size(); total_physical_size += space_statistics.physical_space_size();
} }
total_available_size += CcTest::heap()->memory_allocator()->Available(); total_available_size += CcTest::heap()->memory_allocator()->Available();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment