Commit 12b47ec6 authored by Yang Guo's avatar Yang Guo Committed by Commit Bot

Revert "[heap] Make objects allocated in new_lo_space look like young generation objects."

This reverts commit e438d5d4.

Reason for revert: TSAN failures - https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Linux64%20TSAN/21357

Original change's description:
> [heap] Make objects allocated in new_lo_space look like young generation objects.
> 
> The scavenger still does not handle young generation large objects correctly.
> This will be added in a follow-up CL.
> 
> Bug: chromium:852420
> Change-Id: I2587509d6e7f329aeff3db246d949bb30b3a91a5
> Reviewed-on: https://chromium-review.googlesource.com/1124477
> Commit-Queue: Hannes Payer <hpayer@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#54197}

TBR=ulan@chromium.org,hpayer@chromium.org

Change-Id: Ief44cfd841278f2d53d51f3a21c03b70b8025a7a
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:852420
Reviewed-on: https://chromium-review.googlesource.com/1125979Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Commit-Queue: Yang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54205}
parent 345bb904
......@@ -184,7 +184,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
} else if (LO_SPACE == space) {
DCHECK(large_object);
if (FLAG_young_generation_large_objects) {
allocation = new_lo_space_->AllocateRaw(size_in_bytes);
allocation = new_lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else {
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
}
......
......@@ -43,7 +43,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
// Ensure that the new object is marked black.
HeapObject* object = HeapObject::FromAddress(addr);
if (incremental_marking_.marking_state()->IsWhite(object) &&
!(heap->InNewSpace(object) || heap->new_lo_space()->Contains(object))) {
!heap->InNewSpace(object)) {
if (heap->lo_space()->Contains(object)) {
incremental_marking_.marking_state()->WhiteToBlack(object);
} else {
......@@ -229,10 +229,33 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
Heap* heap_;
};
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
bool is_marking) {
if (is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
bool is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
if (is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
PagedSpace* space) {
for (Page* p : *space) {
p->SetOldGenerationPageFlags(false);
SetOldSpacePageFlags(p, false);
}
}
......@@ -240,7 +263,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
NewSpace* space) {
for (Page* p : *space) {
p->SetYoungGenerationPageFlags(false);
SetNewSpacePageFlags(p, false);
}
}
......@@ -251,22 +274,22 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(false);
for (LargePage* lop : *heap_->lo_space()) {
SetOldSpacePageFlags(lop, false);
}
}
void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
for (Page* p : *space) {
p->SetOldGenerationPageFlags(true);
SetOldSpacePageFlags(p, true);
}
}
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
for (Page* p : *space) {
p->SetYoungGenerationPageFlags(true);
SetNewSpacePageFlags(p, true);
}
}
......@@ -277,8 +300,8 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space());
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(true);
for (LargePage* lop : *heap_->lo_space()) {
SetOldSpacePageFlags(lop, true);
}
}
......
......@@ -226,6 +226,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// the concurrent marker.
void MarkBlackAndPush(HeapObject* obj);
inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
SetOldSpacePageFlags(chunk, IsMarking());
}
inline void SetNewSpacePageFlags(Page* chunk) {
SetNewSpacePageFlags(chunk, IsMarking());
}
bool IsCompacting() { return IsMarking() && is_compacting_; }
void ActivateGeneratedStub(Code* stub);
......@@ -271,6 +279,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
IncrementalMarking& incremental_marking_;
};
static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking);
static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
void StartMarking();
void StartBlackAllocation();
......
......@@ -678,7 +678,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
heap()->incremental_marking()->SetOldSpacePageFlags(page);
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
page->list_node().Initialize();
......@@ -694,7 +694,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
Page* page = static_cast<Page*>(chunk);
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
heap()->incremental_marking()->SetNewSpacePageFlags(page);
page->AllocateLocalTracker();
page->list_node().Initialize();
#ifdef ENABLE_MINOR_MC
......@@ -716,6 +716,7 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
......@@ -904,25 +905,6 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
return chunk;
}
void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
} else {
ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
}
void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
void Page::AllocateLocalTracker() {
......@@ -3371,6 +3353,7 @@ void LargeObjectSpace::TearDown() {
}
}
AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
......@@ -3380,27 +3363,9 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity());
}
LargePage* page = AllocateLargePage(object_size, executable);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject* object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
return object;
}
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
Executability executable) {
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
if (page == nullptr) return nullptr;
if (page == nullptr) return AllocationResult::Retry(identity());
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
size_ += static_cast<int>(page->size());
......@@ -3420,10 +3385,20 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
heap()->fixed_array_map();
reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
}
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
AllocationStep(object_size, object->address(), object_size);
return page;
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
return object;
}
......@@ -3676,15 +3651,6 @@ void Page::Print() {
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, NEW_LO_SPACE) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// TODO(hpayer): Add heap growing strategy here.
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Retry(identity());
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::IN_TO_SPACE);
return page->GetObject();
}
size_t NewLargeObjectSpace::Available() {
// TODO(hpayer): Update as soon as we have a growing strategy.
return 0;
......
......@@ -420,9 +420,6 @@ class MemoryChunk {
~kAlignmentMask);
}
void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking);
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
static inline void UpdateHighWaterMark(Address mark) {
......@@ -2956,6 +2953,8 @@ class LargeObjectSpace : public Space {
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
// Shared implementation of AllocateRaw, AllocateRawCode and
// AllocateRawFixedArray.
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
......@@ -3022,9 +3021,6 @@ class LargeObjectSpace : public Space {
void Print() override;
#endif
protected:
LargePage* AllocateLargePage(int object_size, Executability executable);
private:
size_t size_; // allocated bytes
int page_count_; // number of chunks
......@@ -3044,8 +3040,6 @@ class NewLargeObjectSpace : public LargeObjectSpace {
public:
explicit NewLargeObjectSpace(Heap* heap);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
// Available bytes for objects in this space.
size_t Available() override;
};
......
......@@ -5671,7 +5671,6 @@ TEST(YoungGenerationLargeObjectAllocation) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
CHECK(chunk->owner()->identity() == NEW_LO_SPACE);
CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
}
TEST(UncommitUnusedLargeObjectMemory) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment