Commit 17dd1051 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

Reland "[heap] Make objects allocated in new_lo_space look like young generation objects."

Bug: chromium:852420
Change-Id: I6edaa7c3e0a07eb69fa497fdeddeacf082cdadc8
Reviewed-on: https://chromium-review.googlesource.com/1126109Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54213}
parent 0ca0e093
...@@ -184,7 +184,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space, ...@@ -184,7 +184,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
} else if (LO_SPACE == space) { } else if (LO_SPACE == space) {
DCHECK(large_object); DCHECK(large_object);
if (FLAG_young_generation_large_objects) { if (FLAG_young_generation_large_objects) {
allocation = new_lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE); allocation = new_lo_space_->AllocateRaw(size_in_bytes);
} else { } else {
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE); allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} }
......
...@@ -43,7 +43,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr, ...@@ -43,7 +43,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
// Ensure that the new object is marked black. // Ensure that the new object is marked black.
HeapObject* object = HeapObject::FromAddress(addr); HeapObject* object = HeapObject::FromAddress(addr);
if (incremental_marking_.marking_state()->IsWhite(object) && if (incremental_marking_.marking_state()->IsWhite(object) &&
!heap->InNewSpace(object)) { !(heap->InNewSpace(object) || heap->new_lo_space()->Contains(object))) {
if (heap->lo_space()->Contains(object)) { if (heap->lo_space()->Contains(object)) {
incremental_marking_.marking_state()->WhiteToBlack(object); incremental_marking_.marking_state()->WhiteToBlack(object);
} else { } else {
...@@ -229,33 +229,10 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor { ...@@ -229,33 +229,10 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
Heap* heap_; Heap* heap_;
}; };
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
bool is_marking) {
if (is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
bool is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
if (is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
PagedSpace* space) { PagedSpace* space) {
for (Page* p : *space) { for (Page* p : *space) {
SetOldSpacePageFlags(p, false); p->SetOldGenerationPageFlags(false);
} }
} }
...@@ -263,7 +240,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( ...@@ -263,7 +240,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
NewSpace* space) { NewSpace* space) {
for (Page* p : *space) { for (Page* p : *space) {
SetNewSpacePageFlags(p, false); p->SetYoungGenerationPageFlags(false);
} }
} }
...@@ -274,22 +251,22 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() { ...@@ -274,22 +251,22 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space()); DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
for (LargePage* lop : *heap_->lo_space()) { for (LargePage* p : *heap_->lo_space()) {
SetOldSpacePageFlags(lop, false); p->SetOldGenerationPageFlags(false);
} }
} }
void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) { void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
for (Page* p : *space) { for (Page* p : *space) {
SetOldSpacePageFlags(p, true); p->SetOldGenerationPageFlags(true);
} }
} }
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) { void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
for (Page* p : *space) { for (Page* p : *space) {
SetNewSpacePageFlags(p, true); p->SetYoungGenerationPageFlags(true);
} }
} }
...@@ -300,8 +277,8 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() { ...@@ -300,8 +277,8 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->code_space()); ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space()); ActivateIncrementalWriteBarrier(heap_->new_space());
for (LargePage* lop : *heap_->lo_space()) { for (LargePage* p : *heap_->lo_space()) {
SetOldSpacePageFlags(lop, true); p->SetOldGenerationPageFlags(true);
} }
} }
......
...@@ -226,14 +226,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -226,14 +226,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// the concurrent marker. // the concurrent marker.
void MarkBlackAndPush(HeapObject* obj); void MarkBlackAndPush(HeapObject* obj);
inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
SetOldSpacePageFlags(chunk, IsMarking());
}
inline void SetNewSpacePageFlags(Page* chunk) {
SetNewSpacePageFlags(chunk, IsMarking());
}
bool IsCompacting() { return IsMarking() && is_compacting_; } bool IsCompacting() { return IsMarking() && is_compacting_; }
void ActivateGeneratedStub(Code* stub); void ActivateGeneratedStub(Code* stub);
...@@ -279,10 +271,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -279,10 +271,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
IncrementalMarking& incremental_marking_; IncrementalMarking& incremental_marking_;
}; };
static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking);
static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
void StartMarking(); void StartMarking();
void StartBlackAllocation(); void StartBlackAllocation();
......
...@@ -678,7 +678,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) { ...@@ -678,7 +678,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK_GE(Page::kAllocatableMemory, page->area_size()); DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area. // Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes(); page->ResetAllocatedBytes();
heap()->incremental_marking()->SetOldSpacePageFlags(page); page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateFreeListCategories(); page->AllocateFreeListCategories();
page->InitializeFreeListCategories(); page->InitializeFreeListCategories();
page->list_node().Initialize(); page->list_node().Initialize();
...@@ -694,7 +694,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) { ...@@ -694,7 +694,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE)); : MemoryChunk::IN_TO_SPACE));
Page* page = static_cast<Page*>(chunk); Page* page = static_cast<Page*>(chunk);
heap()->incremental_marking()->SetNewSpacePageFlags(page); page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateLocalTracker(); page->AllocateLocalTracker();
page->list_node().Initialize(); page->list_node().Initialize();
#ifdef ENABLE_MINOR_MC #ifdef ENABLE_MINOR_MC
...@@ -716,13 +716,11 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk, ...@@ -716,13 +716,11 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset); STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large."); FATAL("Code page is too large.");
} }
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size()); MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
LargePage* page = static_cast<LargePage*>(chunk); LargePage* page = static_cast<LargePage*>(chunk);
page->list_node().Initialize(); page->list_node().Initialize();
page->InitializationMemoryFence();
return page; return page;
} }
...@@ -905,6 +903,25 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, ...@@ -905,6 +903,25 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
return chunk; return chunk;
} }
void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); } void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
void Page::AllocateLocalTracker() { void Page::AllocateLocalTracker() {
...@@ -3353,7 +3370,6 @@ void LargeObjectSpace::TearDown() { ...@@ -3353,7 +3370,6 @@ void LargeObjectSpace::TearDown() {
} }
} }
AllocationResult LargeObjectSpace::AllocateRaw(int object_size, AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) { Executability executable) {
// Check if we want to force a GC before growing the old space further. // Check if we want to force a GC before growing the old space further.
...@@ -3363,9 +3379,28 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, ...@@ -3363,9 +3379,28 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity()); return AllocationResult::Retry(identity());
} }
LargePage* page = AllocateLargePage(object_size, executable);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject* object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
return object;
}
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage( LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable); object_size, this, executable);
if (page == nullptr) return AllocationResult::Retry(identity()); if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size)); DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
size_ += static_cast<int>(page->size()); size_ += static_cast<int>(page->size());
...@@ -3385,20 +3420,10 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, ...@@ -3385,20 +3420,10 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
ReadOnlyRoots(heap()).fixed_array_map(); ReadOnlyRoots(heap()).fixed_array_map();
reinterpret_cast<Object**>(object->address())[1] = Smi::kZero; reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
} }
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
heap()->CreateFillerObjectAt(object->address(), object_size, heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
AllocationStep(object_size, object->address(), object_size); AllocationStep(object_size, object->address(), object_size);
DCHECK_IMPLIES( return page;
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
return object;
} }
...@@ -3651,6 +3676,16 @@ void Page::Print() { ...@@ -3651,6 +3676,16 @@ void Page::Print() {
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap) NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, NEW_LO_SPACE) {} : LargeObjectSpace(heap, NEW_LO_SPACE) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// TODO(hpayer): Add heap growing strategy here.
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::IN_TO_SPACE);
page->InitializationMemoryFence();
return page->GetObject();
}
size_t NewLargeObjectSpace::Available() { size_t NewLargeObjectSpace::Available() {
// TODO(hpayer): Update as soon as we have a growing strategy. // TODO(hpayer): Update as soon as we have a growing strategy.
return 0; return 0;
......
...@@ -420,6 +420,9 @@ class MemoryChunk { ...@@ -420,6 +420,9 @@ class MemoryChunk {
~kAlignmentMask); ~kAlignmentMask);
} }
void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking);
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
static inline void UpdateHighWaterMark(Address mark) { static inline void UpdateHighWaterMark(Address mark) {
...@@ -2953,8 +2956,6 @@ class LargeObjectSpace : public Space { ...@@ -2953,8 +2956,6 @@ class LargeObjectSpace : public Space {
return chunk_size - Page::kPageSize - Page::kObjectStartOffset; return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
} }
// Shared implementation of AllocateRaw, AllocateRawCode and
// AllocateRawFixedArray.
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size, V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable); Executability executable);
...@@ -3021,6 +3022,9 @@ class LargeObjectSpace : public Space { ...@@ -3021,6 +3022,9 @@ class LargeObjectSpace : public Space {
void Print() override; void Print() override;
#endif #endif
protected:
LargePage* AllocateLargePage(int object_size, Executability executable);
private: private:
size_t size_; // allocated bytes size_t size_; // allocated bytes
int page_count_; // number of chunks int page_count_; // number of chunks
...@@ -3040,6 +3044,8 @@ class NewLargeObjectSpace : public LargeObjectSpace { ...@@ -3040,6 +3044,8 @@ class NewLargeObjectSpace : public LargeObjectSpace {
public: public:
explicit NewLargeObjectSpace(Heap* heap); explicit NewLargeObjectSpace(Heap* heap);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
// Available bytes for objects in this space. // Available bytes for objects in this space.
size_t Available() override; size_t Available() override;
}; };
......
...@@ -5671,6 +5671,7 @@ TEST(YoungGenerationLargeObjectAllocation) { ...@@ -5671,6 +5671,7 @@ TEST(YoungGenerationLargeObjectAllocation) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000); Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address()); MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
CHECK(chunk->owner()->identity() == NEW_LO_SPACE); CHECK(chunk->owner()->identity() == NEW_LO_SPACE);
CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
} }
TEST(UncommitUnusedLargeObjectMemory) { TEST(UncommitUnusedLargeObjectMemory) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment