Commit 13ddba2a authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Add flag to promote young objects in MC

Add FLAG_always_promote_young_mc that always promotes young objects
during a Full GC when enabled. This flag guarantees that the young gen
and the sweeping remembered set are empty after a full GC.

This CL also makes use of the fact that the sweeping remembered set is
empty and only invalidates an object when there were old-to-new slots
recorded on its page.

Bug: chromium:1014943
Change-Id: Idfb13dfbe76bad5ec8b485a60bebc30531aec649
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863201
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64348}
parent efba2827
...@@ -354,6 +354,8 @@ DEFINE_BOOL(assert_types, false, ...@@ -354,6 +354,8 @@ DEFINE_BOOL(assert_types, false,
DEFINE_BOOL(allocation_site_pretenuring, true, DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites") "pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization") DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_BOOL(always_promote_young_mc, false,
"always promote young objects during mark-compact")
DEFINE_INT(page_promotion_threshold, 70, DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation") "min percentage of live bytes on a page to enable fast evacuation")
DEFINE_BOOL(trace_pretenuring, false, DEFINE_BOOL(trace_pretenuring, false,
......
...@@ -1348,24 +1348,42 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1348,24 +1348,42 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
explicit EvacuateNewSpaceVisitor( explicit EvacuateNewSpaceVisitor(
Heap* heap, LocalAllocator* local_allocator, Heap* heap, LocalAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor, RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback) Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
bool always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, record_visitor), : EvacuateVisitorBase(heap, local_allocator, record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()), buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0), promoted_size_(0),
semispace_copied_size_(0), semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback), local_pretenuring_feedback_(local_pretenuring_feedback),
is_incremental_marking_(heap->incremental_marking()->IsMarking()) {} is_incremental_marking_(heap->incremental_marking()->IsMarking()),
always_promote_young_(always_promote_young) {}
inline bool Visit(HeapObject object, int size) override { inline bool Visit(HeapObject object, int size) override {
if (TryEvacuateWithoutCopy(object)) return true; if (TryEvacuateWithoutCopy(object)) return true;
HeapObject target_object; HeapObject target_object;
if (always_promote_young_) {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: young object promotion failed");
}
promoted_size_ += size;
return true;
}
if (heap_->ShouldBePromoted(object.address()) && if (heap_->ShouldBePromoted(object.address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) { TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
promoted_size_ += size; promoted_size_ += size;
return true; return true;
} }
heap_->UpdateAllocationSite(object.map(), object, heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_); local_pretenuring_feedback_);
HeapObject target; HeapObject target;
AllocationSpace space = AllocateTargetObject(object, size, &target); AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space); MigrateObject(HeapObject::cast(target), object, size, space);
...@@ -1427,6 +1445,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1427,6 +1445,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t semispace_copied_size_; intptr_t semispace_copied_size_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_; Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
bool is_incremental_marking_; bool is_incremental_marking_;
bool always_promote_young_;
}; };
template <PageEvacuationMode mode> template <PageEvacuationMode mode>
...@@ -2663,6 +2682,8 @@ void MarkCompactCollector::EvacuatePrologue() { ...@@ -2663,6 +2682,8 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space->Flip(); new_space->Flip();
new_space->ResetLinearAllocationArea(); new_space->ResetLinearAllocationArea();
DCHECK_EQ(new_space->Size(), 0);
heap()->new_lo_space()->Flip(); heap()->new_lo_space()->Flip();
heap()->new_lo_space()->ResetPendingObject(); heap()->new_lo_space()->ResetPendingObject();
...@@ -2677,6 +2698,8 @@ void MarkCompactCollector::EvacuateEpilogue() { ...@@ -2677,6 +2698,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
aborted_evacuation_candidates_.clear(); aborted_evacuation_candidates_.clear();
// New space. // New space.
heap()->new_space()->set_age_mark(heap()->new_space()->top()); heap()->new_space()->set_age_mark(heap()->new_space()->top());
DCHECK_IMPLIES(FLAG_always_promote_young_mc,
heap()->new_space()->Size() == 0);
// Deallocate unmarked large objects. // Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects(); heap()->lo_space()->FreeUnmarkedObjects();
heap()->code_lo_space()->FreeUnmarkedObjects(); heap()->code_lo_space()->FreeUnmarkedObjects();
...@@ -2724,12 +2747,13 @@ class Evacuator : public Malloced { ...@@ -2724,12 +2747,13 @@ class Evacuator : public Malloced {
return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize; return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
} }
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
bool always_promote_young)
: heap_(heap), : heap_(heap),
local_allocator_(heap_), local_allocator_(heap_),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(heap_, &local_allocator_, record_visitor, new_space_visitor_(heap_, &local_allocator_, record_visitor,
&local_pretenuring_feedback_), &local_pretenuring_feedback_, always_promote_young),
new_to_new_page_visitor_(heap_, record_visitor, new_to_new_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_), &local_pretenuring_feedback_),
new_to_old_page_visitor_(heap_, record_visitor, new_to_old_page_visitor_(heap_, record_visitor,
...@@ -2834,7 +2858,8 @@ void Evacuator::Finalize() { ...@@ -2834,7 +2858,8 @@ void Evacuator::Finalize() {
class FullEvacuator : public Evacuator { class FullEvacuator : public Evacuator {
public: public:
explicit FullEvacuator(MarkCompactCollector* collector) explicit FullEvacuator(MarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_), : Evacuator(collector->heap(), &record_visitor_,
FLAG_always_promote_young_mc),
record_visitor_(collector, &ephemeron_remembered_set_), record_visitor_(collector, &ephemeron_remembered_set_),
collector_(collector) {} collector_(collector) {}
...@@ -3024,7 +3049,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3024,7 +3049,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue; if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
live_bytes += live_bytes_on_page; live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page)) { if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) ||
FLAG_always_promote_young_mc) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
DCHECK_EQ(heap()->old_space(), page->owner()); DCHECK_EQ(heap()->old_space(), page->owner());
// The move added page->allocated_bytes to the old space, but we are // The move added page->allocated_bytes to the old space, but we are
...@@ -3342,11 +3368,13 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -3342,11 +3368,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
public: public:
explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state, explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
MemoryChunk* chunk, MemoryChunk* chunk,
RememberedSetUpdatingMode updating_mode) RememberedSetUpdatingMode updating_mode,
bool always_promote_young)
: heap_(heap), : heap_(heap),
marking_state_(marking_state), marking_state_(marking_state),
chunk_(chunk), chunk_(chunk),
updating_mode_(updating_mode) {} updating_mode_(updating_mode),
always_promote_young_(always_promote_young) {}
~RememberedSetUpdatingItem() override = default; ~RememberedSetUpdatingItem() override = default;
void Process() override { void Process() override {
...@@ -3413,24 +3441,36 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -3413,24 +3441,36 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() { void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) { if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSet<OLD_TO_NEW>::Iterate( int slots = RememberedSet<OLD_TO_NEW>::Iterate(
chunk_, chunk_,
[this, &filter](MaybeObjectSlot slot) { [this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT; if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndUpdateOldToNewSlot(slot); return CheckAndUpdateOldToNewSlot(slot);
}, },
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
DCHECK_IMPLIES(always_promote_young_, slots == 0);
if (slots == 0) {
chunk_->ReleaseSlotSet<OLD_TO_NEW>();
}
} }
if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) { if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSetSweeping::Iterate( int slots = RememberedSetSweeping::Iterate(
chunk_, chunk_,
[this, &filter](MaybeObjectSlot slot) { [this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT; if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndUpdateOldToNewSlot(slot); return CheckAndUpdateOldToNewSlot(slot);
}, },
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
DCHECK_IMPLIES(always_promote_young_, slots == 0);
if (slots == 0) {
chunk_->ReleaseSweepingSlotSet();
}
} }
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) { if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
...@@ -3492,6 +3532,7 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -3492,6 +3532,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
MarkingState* marking_state_; MarkingState* marking_state_;
MemoryChunk* chunk_; MemoryChunk* chunk_;
RememberedSetUpdatingMode updating_mode_; RememberedSetUpdatingMode updating_mode_;
bool always_promote_young_;
}; };
UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem( UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
...@@ -3503,7 +3544,8 @@ UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem( ...@@ -3503,7 +3544,8 @@ UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem( UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) { MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
return new RememberedSetUpdatingItem<NonAtomicMarkingState>( return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
heap(), non_atomic_marking_state(), chunk, updating_mode); heap(), non_atomic_marking_state(), chunk, updating_mode,
FLAG_always_promote_young_mc);
} }
// Update array buffers on a page that has been evacuated by copying objects. // Update array buffers on a page that has been evacuated by copying objects.
...@@ -4545,7 +4587,7 @@ UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem( ...@@ -4545,7 +4587,7 @@ UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem( UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) { MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
return new RememberedSetUpdatingItem<NonAtomicMarkingState>( return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
heap(), non_atomic_marking_state(), chunk, updating_mode); heap(), non_atomic_marking_state(), chunk, updating_mode, false);
} }
class MarkingItem; class MarkingItem;
...@@ -4849,7 +4891,7 @@ namespace { ...@@ -4849,7 +4891,7 @@ namespace {
class YoungGenerationEvacuator : public Evacuator { class YoungGenerationEvacuator : public Evacuator {
public: public:
explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector) explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_), : Evacuator(collector->heap(), &record_visitor_, false),
record_visitor_(collector->heap()->mark_compact_collector()), record_visitor_(collector->heap()->mark_compact_collector()),
collector_(collector) {} collector_(collector) {}
......
...@@ -31,15 +31,17 @@ class RememberedSetOperations { ...@@ -31,15 +31,17 @@ class RememberedSetOperations {
} }
template <typename Callback> template <typename Callback>
static void Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback, static int Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) { SlotSet::EmptyBucketMode mode) {
int number_slots = 0;
if (slots != nullptr) { if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize; size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) { for (size_t page = 0; page < pages; page++) {
slots[page].Iterate(chunk->address() + page * Page::kPageSize, callback, number_slots += slots[page].Iterate(
mode); chunk->address() + page * Page::kPageSize, callback, mode);
} }
} }
return number_slots;
} }
static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) { static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
...@@ -172,10 +174,10 @@ class RememberedSet : public AllStatic { ...@@ -172,10 +174,10 @@ class RememberedSet : public AllStatic {
// Notice that |mode| can only be of FREE* or PREFREE* if there are no other // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
// threads concurrently inserting slots. // threads concurrently inserting slots.
template <typename Callback> template <typename Callback>
static void Iterate(MemoryChunk* chunk, Callback callback, static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) { SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>(); SlotSet* slots = chunk->slot_set<type>();
RememberedSetOperations::Iterate(slots, chunk, callback, mode); return RememberedSetOperations::Iterate(slots, chunk, callback, mode);
} }
static void FreeEmptyBuckets(MemoryChunk* chunk) { static void FreeEmptyBuckets(MemoryChunk* chunk) {
...@@ -378,10 +380,10 @@ class RememberedSetSweeping { ...@@ -378,10 +380,10 @@ class RememberedSetSweeping {
// Notice that |mode| can only be of FREE* or PREFREE* if there are no other // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
// threads concurrently inserting slots. // threads concurrently inserting slots.
template <typename Callback> template <typename Callback>
static void Iterate(MemoryChunk* chunk, Callback callback, static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) { SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->sweeping_slot_set(); SlotSet* slots = chunk->sweeping_slot_set();
RememberedSetOperations::Iterate(slots, chunk, callback, mode); return RememberedSetOperations::Iterate(slots, chunk, callback, mode);
} }
}; };
......
...@@ -866,16 +866,16 @@ void Page::MoveOldToNewRememberedSetForSweeping() { ...@@ -866,16 +866,16 @@ void Page::MoveOldToNewRememberedSetForSweeping() {
void Page::MergeOldToNewRememberedSets() { void Page::MergeOldToNewRememberedSets() {
if (sweeping_slot_set_ == nullptr) return; if (sweeping_slot_set_ == nullptr) return;
RememberedSet<OLD_TO_NEW>::Iterate(
this,
[this](MaybeObjectSlot slot) {
Address address = slot.address();
RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
if (slot_set_[OLD_TO_NEW]) { if (slot_set_[OLD_TO_NEW]) {
RememberedSet<OLD_TO_NEW>::Iterate(
this,
[this](MaybeObjectSlot slot) {
Address address = slot.address();
RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
ReleaseSlotSet<OLD_TO_NEW>(); ReleaseSlotSet<OLD_TO_NEW>();
} }
...@@ -1404,7 +1404,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() { ...@@ -1404,7 +1404,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
} }
ReleaseSlotSet<OLD_TO_NEW>(); ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet(&sweeping_slot_set_); ReleaseSweepingSlotSet();
ReleaseSlotSet<OLD_TO_OLD>(); ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>(); ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>(); ReleaseTypedSlotSet<OLD_TO_OLD>();
...@@ -1463,6 +1463,10 @@ void MemoryChunk::ReleaseSlotSet() { ...@@ -1463,6 +1463,10 @@ void MemoryChunk::ReleaseSlotSet() {
ReleaseSlotSet(&slot_set_[type]); ReleaseSlotSet(&slot_set_[type]);
} }
void MemoryChunk::ReleaseSweepingSlotSet() {
ReleaseSlotSet(&sweeping_slot_set_);
}
void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) { void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
if (*slot_set) { if (*slot_set) {
delete[] * slot_set; delete[] * slot_set;
...@@ -1552,7 +1556,8 @@ void MemoryChunk::InvalidateRecordedSlots(HeapObject object) { ...@@ -1552,7 +1556,8 @@ void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object); RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
} }
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object); if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
} }
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>( template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
......
...@@ -723,6 +723,7 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -723,6 +723,7 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type> template <RememberedSetType type>
void ReleaseSlotSet(); void ReleaseSlotSet();
void ReleaseSlotSet(SlotSet** slot_set); void ReleaseSlotSet(SlotSet** slot_set);
void ReleaseSweepingSlotSet();
template <RememberedSetType type> template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet(); TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently. // Not safe to be called concurrently.
......
...@@ -10,53 +10,53 @@ ...@@ -10,53 +10,53 @@
// Tests that should have access to private methods of {v8::internal::Heap}. // Tests that should have access to private methods of {v8::internal::Heap}.
// Those tests need to be defined using HEAP_TEST(Name) { ... }. // Those tests need to be defined using HEAP_TEST(Name) { ... }.
#define HEAP_TEST_METHODS(V) \ #define HEAP_TEST_METHODS(V) \
V(CompactionFullAbortedPage) \ V(CompactionFullAbortedPage) \
V(CompactionPartiallyAbortedPage) \ V(CompactionPartiallyAbortedPage) \
V(CompactionPartiallyAbortedPageIntraAbortedPointers) \ V(CompactionPartiallyAbortedPageIntraAbortedPointers) \
V(CompactionPartiallyAbortedPageWithInvalidatedSlots) \ V(CompactionPartiallyAbortedPageWithInvalidatedSlots) \
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \ V(CompactionPartiallyAbortedPageWithRememberedSetEntries) \
V(CompactionSpaceDivideMultiplePages) \ V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \ V(CompactionSpaceDivideSinglePage) \
V(InvalidatedSlotsAfterTrimming) \ V(InvalidatedSlotsAfterTrimming) \
V(InvalidatedSlotsAllInvalidatedRanges) \ V(InvalidatedSlotsAllInvalidatedRanges) \
V(InvalidatedSlotsCleanupEachObject) \ V(InvalidatedSlotsCleanupEachObject) \
V(InvalidatedSlotsCleanupFull) \ V(InvalidatedSlotsCleanupFull) \
V(InvalidatedSlotsCleanupRightTrim) \ V(InvalidatedSlotsCleanupRightTrim) \
V(InvalidatedSlotsCleanupOverlapRight) \ V(InvalidatedSlotsCleanupOverlapRight) \
V(InvalidatedSlotsEvacuationCandidate) \ V(InvalidatedSlotsEvacuationCandidate) \
V(InvalidatedSlotsNoInvalidatedRanges) \ V(InvalidatedSlotsNoInvalidatedRanges) \
V(InvalidatedSlotsResetObjectRegression) \ V(InvalidatedSlotsResetObjectRegression) \
V(InvalidatedSlotsRightTrimFixedArray) \ V(InvalidatedSlotsRightTrimFixedArray) \
V(InvalidatedSlotsRightTrimLargeFixedArray) \ V(InvalidatedSlotsRightTrimLargeFixedArray) \
V(InvalidatedSlotsLeftTrimFixedArray) \ V(InvalidatedSlotsLeftTrimFixedArray) \
V(InvalidatedSlotsFastToSlow) \ V(InvalidatedSlotsFastToSlow) \
V(InvalidatedSlotsSomeInvalidatedRanges) \ V(InvalidatedSlotsSomeInvalidatedRanges) \
V(TestNewSpaceRefsInCopiedCode) \ V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \ V(GCFlags) \
V(MarkCompactCollector) \ V(MarkCompactCollector) \
V(MarkCompactEpochCounter) \ V(MarkCompactEpochCounter) \
V(MemoryReducerActivationForSmallHeaps) \ V(MemoryReducerActivationForSmallHeaps) \
V(NoPromotion) \ V(NoPromotion) \
V(NumberStringCacheSize) \ V(NumberStringCacheSize) \
V(ObjectGroups) \ V(ObjectGroups) \
V(Promotion) \ V(Promotion) \
V(Regression39128) \ V(Regression39128) \
V(ResetWeakHandle) \ V(ResetWeakHandle) \
V(StressHandles) \ V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \ V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \ V(TestSizeOfObjects) \
V(Regress5831) \ V(Regress5831) \
V(Regress538257) \ V(Regress538257) \
V(Regress587004) \ V(Regress587004) \
V(Regress589413) \ V(Regress589413) \
V(Regress658718) \ V(Regress658718) \
V(Regress670675) \ V(Regress670675) \
V(Regress777177) \ V(Regress777177) \
V(Regress779503) \ V(Regress779503) \
V(Regress791582) \ V(Regress791582) \
V(Regress845060) \ V(Regress845060) \
V(RegressMissingWriteBarrierInAllocate) \ V(RegressMissingWriteBarrierInAllocate) \
V(WriteBarriersInCopyJSObject) V(WriteBarriersInCopyJSObject)
#define HEAP_TEST(Name) \ #define HEAP_TEST(Name) \
......
...@@ -335,13 +335,12 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) { ...@@ -335,13 +335,12 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
} }
} }
HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) { if (FLAG_never_compact || FLAG_always_promote_young_mc) return;
if (FLAG_never_compact) return;
// Test the scenario where we reach OOM during compaction and parts of the // Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one. Objects on the aborted page // page have already been migrated to a new one. Objects on the aborted page
// are linked together and the very first object on the aborted page points // are linked together and the very first object on the aborted page points
// into new space. The test verifies that the store buffer entries are // into new space. The test verifies that the remembered set entries are
// properly cleared and rebuilt after aborting a page. Failing to do so can // properly cleared and rebuilt after aborting a page. Failing to do so can
// result in other objects being allocated in the free space where their // result in other objects being allocated in the free space where their
// payload looks like a valid new space pointer. // payload looks like a valid new space pointer.
...@@ -452,7 +451,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) { ...@@ -452,7 +451,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
.ToHandleChecked(); .ToHandleChecked();
} while (Page::FromHeapObject(*string) != to_be_aborted_page); } while (Page::FromHeapObject(*string) != to_be_aborted_page);
// If store buffer entries are not properly filtered/reset for aborted // If remembered set entries are not properly filtered/reset for aborted
// pages we have now a broken address at an object slot in old space and // pages we have now a broken address at an object slot in old space and
// the following scavenge will crash. // the following scavenge will crash.
CcTest::CollectGarbage(NEW_SPACE); CcTest::CollectGarbage(NEW_SPACE);
......
...@@ -2561,8 +2561,7 @@ TEST(OptimizedPretenuringDoubleArrayProperties) { ...@@ -2561,8 +2561,7 @@ TEST(OptimizedPretenuringDoubleArrayProperties) {
ReadOnlyRoots(CcTest::heap()).empty_property_array()); ReadOnlyRoots(CcTest::heap()).empty_property_array());
} }
TEST(OptimizedPretenuringDoubleArrayLiterals) {
TEST(OptimizedPretenuringdoubleArrayLiterals) {
FLAG_allow_natives_syntax = true; FLAG_allow_natives_syntax = true;
FLAG_expose_gc = true; FLAG_expose_gc = true;
CcTest::InitializeVM(); CcTest::InitializeVM();
...@@ -2572,7 +2571,7 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) { ...@@ -2572,7 +2571,7 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
return; return;
v8::HandleScope scope(CcTest::isolate()); v8::HandleScope scope(CcTest::isolate());
// Grow new space unitl maximum capacity reached. // Grow new space until maximum capacity reached.
while (!CcTest::heap()->new_space()->IsAtMaximumCapacity()) { while (!CcTest::heap()->new_space()->IsAtMaximumCapacity()) {
CcTest::heap()->new_space()->Grow(); CcTest::heap()->new_space()->Grow();
} }
...@@ -2603,7 +2602,6 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) { ...@@ -2603,7 +2602,6 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
CHECK(CcTest::heap()->InOldSpace(*o)); CHECK(CcTest::heap()->InOldSpace(*o));
} }
TEST(OptimizedPretenuringNestedMixedArrayLiterals) { TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
FLAG_allow_natives_syntax = true; FLAG_allow_natives_syntax = true;
FLAG_expose_gc = true; FLAG_expose_gc = true;
...@@ -5398,19 +5396,32 @@ HEAP_TEST(Regress589413) { ...@@ -5398,19 +5396,32 @@ HEAP_TEST(Regress589413) {
// Fill the new space with byte arrays with elements looking like pointers. // Fill the new space with byte arrays with elements looking like pointers.
const int M = 256; const int M = 256;
ByteArray byte_array; ByteArray byte_array;
Page* young_page = nullptr;
while (AllocateByteArrayForTest(heap, M, AllocationType::kYoung) while (AllocateByteArrayForTest(heap, M, AllocationType::kYoung)
.To(&byte_array)) { .To(&byte_array)) {
// Only allocate objects on one young page as a rough estimate on
// how much memory can be promoted into the old generation.
// Otherwise we would crash when forcing promotion of all young
// live objects.
if (!young_page) young_page = Page::FromHeapObject(byte_array);
if (Page::FromHeapObject(byte_array) != young_page) break;
for (int j = 0; j < M; j++) { for (int j = 0; j < M; j++) {
byte_array.set(j, 0x31); byte_array.set(j, 0x31);
} }
// Add the array in root set. // Add the array in root set.
handle(byte_array, isolate); handle(byte_array, isolate);
} }
// Make sure the byte arrays will be promoted on the next GC.
CcTest::CollectGarbage(NEW_SPACE);
// This number is close to large free list category threshold.
const int N = 0x3EEE;
{ {
// Ensure that incremental marking is not started unexpectedly.
AlwaysAllocateScope always_allocate(isolate);
// Make sure the byte arrays will be promoted on the next GC.
CcTest::CollectGarbage(NEW_SPACE);
// This number is close to large free list category threshold.
const int N = 0x3EEE;
std::vector<FixedArray> arrays; std::vector<FixedArray> arrays;
std::set<Page*> pages; std::set<Page*> pages;
FixedArray array; FixedArray array;
...@@ -5423,7 +5434,7 @@ HEAP_TEST(Regress589413) { ...@@ -5423,7 +5434,7 @@ HEAP_TEST(Regress589413) {
// Add the array in root set. // Add the array in root set.
handle(array, isolate); handle(array, isolate);
} }
// Expand and full one complete page with fixed arrays. // Expand and fill one complete page with fixed arrays.
heap->set_force_oom(false); heap->set_force_oom(false);
while ( while (
AllocateFixedArrayForTest(heap, N, AllocationType::kOld).To(&array)) { AllocateFixedArrayForTest(heap, N, AllocationType::kOld).To(&array)) {
...@@ -5437,7 +5448,6 @@ HEAP_TEST(Regress589413) { ...@@ -5437,7 +5448,6 @@ HEAP_TEST(Regress589413) {
// Expand and mark the new page as evacuation candidate. // Expand and mark the new page as evacuation candidate.
heap->set_force_oom(false); heap->set_force_oom(false);
{ {
AlwaysAllocateScope always_allocate(isolate);
Handle<HeapObject> ec_obj = Handle<HeapObject> ec_obj =
factory->NewFixedArray(5000, AllocationType::kOld); factory->NewFixedArray(5000, AllocationType::kOld);
Page* ec_page = Page::FromHeapObject(*ec_obj); Page* ec_page = Page::FromHeapObject(*ec_obj);
...@@ -5451,11 +5461,13 @@ HEAP_TEST(Regress589413) { ...@@ -5451,11 +5461,13 @@ HEAP_TEST(Regress589413) {
} }
} }
} }
CHECK(heap->incremental_marking()->IsStopped());
heap::SimulateIncrementalMarking(heap); heap::SimulateIncrementalMarking(heap);
for (size_t j = 0; j < arrays.size(); j++) { for (size_t j = 0; j < arrays.size(); j++) {
heap->RightTrimFixedArray(arrays[j], N - 1); heap->RightTrimFixedArray(arrays[j], N - 1);
} }
} }
// Force allocation from the free list. // Force allocation from the free list.
heap->set_force_oom(true); heap->set_force_oom(true);
CcTest::CollectGarbage(OLD_SPACE); CcTest::CollectGarbage(OLD_SPACE);
...@@ -6542,7 +6554,8 @@ HEAP_TEST(Regress779503) { ...@@ -6542,7 +6554,8 @@ HEAP_TEST(Regress779503) {
// currently scavenging. // currently scavenging.
heap->delay_sweeper_tasks_for_testing_ = true; heap->delay_sweeper_tasks_for_testing_ = true;
CcTest::CollectGarbage(OLD_SPACE); CcTest::CollectGarbage(OLD_SPACE);
CHECK(Heap::InYoungGeneration(*byte_array)); CHECK(FLAG_always_promote_young_mc ? !Heap::InYoungGeneration(*byte_array)
: Heap::InYoungGeneration(*byte_array));
} }
// Scavenging and sweeping the same page will crash as slots will be // Scavenging and sweeping the same page will crash as slots will be
// overridden. // overridden.
......
...@@ -72,6 +72,7 @@ TEST(Promotion) { ...@@ -72,6 +72,7 @@ TEST(Promotion) {
} }
HEAP_TEST(NoPromotion) { HEAP_TEST(NoPromotion) {
if (FLAG_always_promote_young_mc) return;
// Page promotion allows pages to be moved to old space even in the case of // Page promotion allows pages to be moved to old space even in the case of
// OOM scenarios. // OOM scenarios.
FLAG_page_promotion = false; FLAG_page_promotion = false;
......
...@@ -101,7 +101,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) { ...@@ -101,7 +101,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
} }
UNINITIALIZED_TEST(PagePromotion_NewToNew) { UNINITIALIZED_TEST(PagePromotion_NewToNew) {
if (!i::FLAG_page_promotion) return; if (!i::FLAG_page_promotion || FLAG_always_promote_young_mc) return;
v8::Isolate* isolate = NewIsolateForPagePromotion(); v8::Isolate* isolate = NewIsolateForPagePromotion();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate); Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
...@@ -129,7 +129,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToNew) { ...@@ -129,7 +129,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToNew) {
} }
UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) { UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
if (!i::FLAG_page_promotion) return; if (!i::FLAG_page_promotion || FLAG_always_promote_young_mc) return;
// Test makes sure JSArrayBuffer backing stores are still tracked after // Test makes sure JSArrayBuffer backing stores are still tracked after
// new-to-new promotion. // new-to-new promotion.
...@@ -218,7 +218,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToOldJSArrayBuffer) { ...@@ -218,7 +218,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToOldJSArrayBuffer) {
} }
UNINITIALIZED_HEAP_TEST(Regress658718) { UNINITIALIZED_HEAP_TEST(Regress658718) {
if (!i::FLAG_page_promotion) return; if (!i::FLAG_page_promotion || FLAG_always_promote_young_mc) return;
v8::Isolate* isolate = NewIsolateForPagePromotion(4, 8); v8::Isolate* isolate = NewIsolateForPagePromotion(4, 8);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate); Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
......
...@@ -37,7 +37,8 @@ TEST(WeakReferencesBasic) { ...@@ -37,7 +37,8 @@ TEST(WeakReferencesBasic) {
MaybeObject code_object = lh->data1(); MaybeObject code_object = lh->data1();
CHECK(code_object->IsSmi()); CHECK(code_object->IsSmi());
CcTest::CollectAllGarbage(); CcTest::CollectAllGarbage();
CHECK(Heap::InYoungGeneration(*lh)); CHECK(FLAG_always_promote_young_mc ? !Heap::InYoungGeneration(*lh)
: Heap::InYoungGeneration(*lh));
CHECK_EQ(code_object, lh->data1()); CHECK_EQ(code_object, lh->data1());
{ {
......
...@@ -157,7 +157,7 @@ bool EphemeronHashTableContainsKey(EphemeronHashTable table, HeapObject key) { ...@@ -157,7 +157,7 @@ bool EphemeronHashTableContainsKey(EphemeronHashTable table, HeapObject key) {
} }
} // namespace } // namespace
TEST(WeakMapPromotion) { TEST(WeakMapPromotionMarkCompact) {
LocalContext context; LocalContext context;
Isolate* isolate = GetIsolateFrom(&context); Isolate* isolate = GetIsolateFrom(&context);
Factory* factory = isolate->factory(); Factory* factory = isolate->factory();
...@@ -165,7 +165,10 @@ TEST(WeakMapPromotion) { ...@@ -165,7 +165,10 @@ TEST(WeakMapPromotion) {
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap(); Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
CcTest::CollectAllGarbage(); CcTest::CollectAllGarbage();
CHECK(ObjectInYoungGeneration(weakmap->table()));
CHECK(FLAG_always_promote_young_mc
? !ObjectInYoungGeneration(weakmap->table())
: ObjectInYoungGeneration(weakmap->table()));
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
Handle<JSObject> object = factory->NewJSObjectFromMap(map); Handle<JSObject> object = factory->NewJSObjectFromMap(map);
...@@ -177,7 +180,8 @@ TEST(WeakMapPromotion) { ...@@ -177,7 +180,8 @@ TEST(WeakMapPromotion) {
EphemeronHashTable::cast(weakmap->table()), *object)); EphemeronHashTable::cast(weakmap->table()), *object));
CcTest::CollectAllGarbage(); CcTest::CollectAllGarbage();
CHECK(ObjectInYoungGeneration(*object)); CHECK(FLAG_always_promote_young_mc ? !ObjectInYoungGeneration(*object)
: ObjectInYoungGeneration(*object));
CHECK(!ObjectInYoungGeneration(weakmap->table())); CHECK(!ObjectInYoungGeneration(weakmap->table()));
CHECK(EphemeronHashTableContainsKey( CHECK(EphemeronHashTableContainsKey(
EphemeronHashTable::cast(weakmap->table()), *object)); EphemeronHashTable::cast(weakmap->table()), *object));
...@@ -196,7 +200,7 @@ TEST(WeakMapScavenge) { ...@@ -196,7 +200,7 @@ TEST(WeakMapScavenge) {
HandleScope scope(isolate); HandleScope scope(isolate);
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap(); Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
CcTest::CollectAllGarbage(); heap::GcAndSweep(isolate->heap(), NEW_SPACE);
CHECK(ObjectInYoungGeneration(weakmap->table())); CHECK(ObjectInYoungGeneration(weakmap->table()));
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment