Commit e438d5d4 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Make objects allocated in new_lo_space look like young generation objects.

The scavenger still does not handle young generation large objects correctly.
This will be added in a follow-up CL.

Bug: chromium:852420
Change-Id: I2587509d6e7f329aeff3db246d949bb30b3a91a5
Reviewed-on: https://chromium-review.googlesource.com/1124477
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54197}
parent ba4301b4
......@@ -184,7 +184,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
} else if (LO_SPACE == space) {
DCHECK(large_object);
if (FLAG_young_generation_large_objects) {
allocation = new_lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
allocation = new_lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
}
......
......@@ -43,7 +43,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
// Ensure that the new object is marked black.
HeapObject* object = HeapObject::FromAddress(addr);
if (incremental_marking_.marking_state()->IsWhite(object) &&
!heap->InNewSpace(object)) {
!(heap->InNewSpace(object) || heap->new_lo_space()->Contains(object))) {
if (heap->lo_space()->Contains(object)) {
incremental_marking_.marking_state()->WhiteToBlack(object);
} else {
......@@ -229,33 +229,10 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
Heap* heap_;
};
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
bool is_marking) {
if (is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
bool is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
if (is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
PagedSpace* space) {
for (Page* p : *space) {
SetOldSpacePageFlags(p, false);
p->SetOldGenerationPageFlags(false);
}
}
......@@ -263,7 +240,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
NewSpace* space) {
for (Page* p : *space) {
SetNewSpacePageFlags(p, false);
p->SetYoungGenerationPageFlags(false);
}
}
......@@ -274,22 +251,22 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
for (LargePage* lop : *heap_->lo_space()) {
SetOldSpacePageFlags(lop, false);
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(false);
}
}
void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
for (Page* p : *space) {
SetOldSpacePageFlags(p, true);
p->SetOldGenerationPageFlags(true);
}
}
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
for (Page* p : *space) {
SetNewSpacePageFlags(p, true);
p->SetYoungGenerationPageFlags(true);
}
}
......@@ -300,8 +277,8 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space());
for (LargePage* lop : *heap_->lo_space()) {
SetOldSpacePageFlags(lop, true);
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(true);
}
}
......
......@@ -226,14 +226,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// the concurrent marker.
void MarkBlackAndPush(HeapObject* obj);
inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
SetOldSpacePageFlags(chunk, IsMarking());
}
inline void SetNewSpacePageFlags(Page* chunk) {
SetNewSpacePageFlags(chunk, IsMarking());
}
bool IsCompacting() { return IsMarking() && is_compacting_; }
void ActivateGeneratedStub(Code* stub);
......@@ -279,10 +271,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
IncrementalMarking& incremental_marking_;
};
static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking);
static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
void StartMarking();
void StartBlackAllocation();
......
......@@ -678,7 +678,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes();
heap()->incremental_marking()->SetOldSpacePageFlags(page);
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
page->list_node().Initialize();
......@@ -694,7 +694,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
Page* page = static_cast<Page*>(chunk);
heap()->incremental_marking()->SetNewSpacePageFlags(page);
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateLocalTracker();
page->list_node().Initialize();
#ifdef ENABLE_MINOR_MC
......@@ -716,7 +716,6 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
......@@ -905,6 +904,25 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
return chunk;
}
void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
void Page::AllocateLocalTracker() {
......@@ -3353,7 +3371,6 @@ void LargeObjectSpace::TearDown() {
}
}
AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
......@@ -3363,9 +3380,27 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, executable);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject* object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
return object;
}
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
if (page == nullptr) return AllocationResult::Retry(identity());
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
size_ += static_cast<int>(page->size());
......@@ -3385,20 +3420,10 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->fixed_array_map();
reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
}
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
AllocationStep(object_size, object->address(), object_size);
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
return object;
return page;
}
......@@ -3651,6 +3676,15 @@ void Page::Print() {
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, NEW_LO_SPACE) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// TODO(hpayer): Add heap growing strategy here.
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::IN_TO_SPACE);
return page->GetObject();
}
size_t NewLargeObjectSpace::Available() {
// TODO(hpayer): Update as soon as we have a growing strategy.
return 0;
......
......@@ -420,6 +420,9 @@ class MemoryChunk {
~kAlignmentMask);
}
void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking);
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
static inline void UpdateHighWaterMark(Address mark) {
......@@ -2953,8 +2956,6 @@ class LargeObjectSpace : public Space {
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
// Shared implementation of AllocateRaw, AllocateRawCode and
// AllocateRawFixedArray.
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
......@@ -3021,6 +3022,9 @@ class LargeObjectSpace : public Space {
void Print() override;
#endif
protected:
LargePage* AllocateLargePage(int object_size, Executability executable);
private:
size_t size_; // allocated bytes
int page_count_; // number of chunks
......@@ -3040,6 +3044,8 @@ class NewLargeObjectSpace : public LargeObjectSpace {
public:
explicit NewLargeObjectSpace(Heap* heap);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
// Available bytes for objects in this space.
size_t Available() override;
};
......
......@@ -5666,6 +5666,7 @@ TEST(YoungGenerationLargeObjectAllocation) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
CHECK(chunk->owner()->identity() == NEW_LO_SPACE);
CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
}
TEST(UncommitUnusedLargeObjectMemory) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment