Commit 13ddba2a authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Add flag to promote young objects in MC

Add FLAG_always_promote_young_mc that always promotes young objects
during a Full GC when enabled. This flag guarantees that the young gen
and the sweeping remembered set are empty after a full GC.

This CL also makes use of the fact that the sweeping remembered set is
empty and only invalidates an object when there were old-to-new slots
recorded on its page.

Bug: chromium:1014943
Change-Id: Idfb13dfbe76bad5ec8b485a60bebc30531aec649
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1863201
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64348}
parent efba2827
......@@ -354,6 +354,8 @@ DEFINE_BOOL(assert_types, false,
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_BOOL(always_promote_young_mc, false,
"always promote young objects during mark-compact")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")
DEFINE_BOOL(trace_pretenuring, false,
......
......@@ -1348,24 +1348,42 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
explicit EvacuateNewSpaceVisitor(
Heap* heap, LocalAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
bool always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback),
is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
always_promote_young_(always_promote_young) {}
inline bool Visit(HeapObject object, int size) override {
if (TryEvacuateWithoutCopy(object)) return true;
HeapObject target_object;
if (always_promote_young_) {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: young object promotion failed");
}
promoted_size_ += size;
return true;
}
if (heap_->ShouldBePromoted(object.address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
promoted_size_ += size;
return true;
}
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
HeapObject target;
AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
......@@ -1427,6 +1445,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t semispace_copied_size_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
bool is_incremental_marking_;
bool always_promote_young_;
};
template <PageEvacuationMode mode>
......@@ -2663,6 +2682,8 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space->Flip();
new_space->ResetLinearAllocationArea();
DCHECK_EQ(new_space->Size(), 0);
heap()->new_lo_space()->Flip();
heap()->new_lo_space()->ResetPendingObject();
......@@ -2677,6 +2698,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
aborted_evacuation_candidates_.clear();
// New space.
heap()->new_space()->set_age_mark(heap()->new_space()->top());
DCHECK_IMPLIES(FLAG_always_promote_young_mc,
heap()->new_space()->Size() == 0);
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
heap()->code_lo_space()->FreeUnmarkedObjects();
......@@ -2724,12 +2747,13 @@ class Evacuator : public Malloced {
return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
}
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
bool always_promote_young)
: heap_(heap),
local_allocator_(heap_),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(heap_, &local_allocator_, record_visitor,
&local_pretenuring_feedback_),
&local_pretenuring_feedback_, always_promote_young),
new_to_new_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_),
new_to_old_page_visitor_(heap_, record_visitor,
......@@ -2834,7 +2858,8 @@ void Evacuator::Finalize() {
class FullEvacuator : public Evacuator {
public:
explicit FullEvacuator(MarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_),
: Evacuator(collector->heap(), &record_visitor_,
FLAG_always_promote_young_mc),
record_visitor_(collector, &ephemeron_remembered_set_),
collector_(collector) {}
......@@ -3024,7 +3049,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) ||
FLAG_always_promote_young_mc) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
DCHECK_EQ(heap()->old_space(), page->owner());
// The move added page->allocated_bytes to the old space, but we are
......@@ -3342,11 +3368,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
public:
explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
MemoryChunk* chunk,
RememberedSetUpdatingMode updating_mode)
RememberedSetUpdatingMode updating_mode,
bool always_promote_young)
: heap_(heap),
marking_state_(marking_state),
chunk_(chunk),
updating_mode_(updating_mode) {}
updating_mode_(updating_mode),
always_promote_young_(always_promote_young) {}
~RememberedSetUpdatingItem() override = default;
void Process() override {
......@@ -3413,24 +3441,36 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSet<OLD_TO_NEW>::Iterate(
int slots = RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndUpdateOldToNewSlot(slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
DCHECK_IMPLIES(always_promote_young_, slots == 0);
if (slots == 0) {
chunk_->ReleaseSlotSet<OLD_TO_NEW>();
}
}
if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSetSweeping::Iterate(
int slots = RememberedSetSweeping::Iterate(
chunk_,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndUpdateOldToNewSlot(slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
DCHECK_IMPLIES(always_promote_young_, slots == 0);
if (slots == 0) {
chunk_->ReleaseSweepingSlotSet();
}
}
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
......@@ -3492,6 +3532,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
MarkingState* marking_state_;
MemoryChunk* chunk_;
RememberedSetUpdatingMode updating_mode_;
bool always_promote_young_;
};
UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
......@@ -3503,7 +3544,8 @@ UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
heap(), non_atomic_marking_state(), chunk, updating_mode,
FLAG_always_promote_young_mc);
}
// Update array buffers on a page that has been evacuated by copying objects.
......@@ -4545,7 +4587,7 @@ UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
heap(), non_atomic_marking_state(), chunk, updating_mode, false);
}
class MarkingItem;
......@@ -4849,7 +4891,7 @@ namespace {
class YoungGenerationEvacuator : public Evacuator {
public:
explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_),
: Evacuator(collector->heap(), &record_visitor_, false),
record_visitor_(collector->heap()->mark_compact_collector()),
collector_(collector) {}
......
......@@ -31,15 +31,17 @@ class RememberedSetOperations {
}
template <typename Callback>
static void Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
static int Iterate(SlotSet* slots, MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
int number_slots = 0;
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
for (size_t page = 0; page < pages; page++) {
slots[page].Iterate(chunk->address() + page * Page::kPageSize, callback,
mode);
number_slots += slots[page].Iterate(
chunk->address() + page * Page::kPageSize, callback, mode);
}
}
return number_slots;
}
static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
......@@ -172,10 +174,10 @@ class RememberedSet : public AllStatic {
// Notice that |mode| can only be of FREE* or PREFREE* if there are no other
// threads concurrently inserting slots.
template <typename Callback>
static void Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>();
RememberedSetOperations::Iterate(slots, chunk, callback, mode);
return RememberedSetOperations::Iterate(slots, chunk, callback, mode);
}
static void FreeEmptyBuckets(MemoryChunk* chunk) {
......@@ -378,10 +380,10 @@ class RememberedSetSweeping {
// Notice that |mode| can only be of FREE* or PREFREE* if there are no other
// threads concurrently inserting slots.
template <typename Callback>
static void Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->sweeping_slot_set();
RememberedSetOperations::Iterate(slots, chunk, callback, mode);
return RememberedSetOperations::Iterate(slots, chunk, callback, mode);
}
};
......
......@@ -866,16 +866,16 @@ void Page::MoveOldToNewRememberedSetForSweeping() {
void Page::MergeOldToNewRememberedSets() {
if (sweeping_slot_set_ == nullptr) return;
RememberedSet<OLD_TO_NEW>::Iterate(
this,
[this](MaybeObjectSlot slot) {
Address address = slot.address();
RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
if (slot_set_[OLD_TO_NEW]) {
RememberedSet<OLD_TO_NEW>::Iterate(
this,
[this](MaybeObjectSlot slot) {
Address address = slot.address();
RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
ReleaseSlotSet<OLD_TO_NEW>();
}
......@@ -1404,7 +1404,7 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
}
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet(&sweeping_slot_set_);
ReleaseSweepingSlotSet();
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
......@@ -1463,6 +1463,10 @@ void MemoryChunk::ReleaseSlotSet() {
ReleaseSlotSet(&slot_set_[type]);
}
void MemoryChunk::ReleaseSweepingSlotSet() {
ReleaseSlotSet(&sweeping_slot_set_);
}
void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
if (*slot_set) {
delete[] * slot_set;
......@@ -1552,7 +1556,8 @@ void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
......
......@@ -723,6 +723,7 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseSlotSet();
void ReleaseSlotSet(SlotSet** slot_set);
void ReleaseSweepingSlotSet();
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently.
......
......@@ -10,53 +10,53 @@
// Tests that should have access to private methods of {v8::internal::Heap}.
// Those tests need to be defined using HEAP_TEST(Name) { ... }.
#define HEAP_TEST_METHODS(V) \
V(CompactionFullAbortedPage) \
V(CompactionPartiallyAbortedPage) \
V(CompactionPartiallyAbortedPageIntraAbortedPointers) \
V(CompactionPartiallyAbortedPageWithInvalidatedSlots) \
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
V(InvalidatedSlotsAfterTrimming) \
V(InvalidatedSlotsAllInvalidatedRanges) \
V(InvalidatedSlotsCleanupEachObject) \
V(InvalidatedSlotsCleanupFull) \
V(InvalidatedSlotsCleanupRightTrim) \
V(InvalidatedSlotsCleanupOverlapRight) \
V(InvalidatedSlotsEvacuationCandidate) \
V(InvalidatedSlotsNoInvalidatedRanges) \
V(InvalidatedSlotsResetObjectRegression) \
V(InvalidatedSlotsRightTrimFixedArray) \
V(InvalidatedSlotsRightTrimLargeFixedArray) \
V(InvalidatedSlotsLeftTrimFixedArray) \
V(InvalidatedSlotsFastToSlow) \
V(InvalidatedSlotsSomeInvalidatedRanges) \
V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \
V(MarkCompactCollector) \
V(MarkCompactEpochCounter) \
V(MemoryReducerActivationForSmallHeaps) \
V(NoPromotion) \
V(NumberStringCacheSize) \
V(ObjectGroups) \
V(Promotion) \
V(Regression39128) \
V(ResetWeakHandle) \
V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(Regress5831) \
V(Regress538257) \
V(Regress587004) \
V(Regress589413) \
V(Regress658718) \
V(Regress670675) \
V(Regress777177) \
V(Regress779503) \
V(Regress791582) \
V(Regress845060) \
V(RegressMissingWriteBarrierInAllocate) \
#define HEAP_TEST_METHODS(V) \
V(CompactionFullAbortedPage) \
V(CompactionPartiallyAbortedPage) \
V(CompactionPartiallyAbortedPageIntraAbortedPointers) \
V(CompactionPartiallyAbortedPageWithInvalidatedSlots) \
V(CompactionPartiallyAbortedPageWithRememberedSetEntries) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
V(InvalidatedSlotsAfterTrimming) \
V(InvalidatedSlotsAllInvalidatedRanges) \
V(InvalidatedSlotsCleanupEachObject) \
V(InvalidatedSlotsCleanupFull) \
V(InvalidatedSlotsCleanupRightTrim) \
V(InvalidatedSlotsCleanupOverlapRight) \
V(InvalidatedSlotsEvacuationCandidate) \
V(InvalidatedSlotsNoInvalidatedRanges) \
V(InvalidatedSlotsResetObjectRegression) \
V(InvalidatedSlotsRightTrimFixedArray) \
V(InvalidatedSlotsRightTrimLargeFixedArray) \
V(InvalidatedSlotsLeftTrimFixedArray) \
V(InvalidatedSlotsFastToSlow) \
V(InvalidatedSlotsSomeInvalidatedRanges) \
V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \
V(MarkCompactCollector) \
V(MarkCompactEpochCounter) \
V(MemoryReducerActivationForSmallHeaps) \
V(NoPromotion) \
V(NumberStringCacheSize) \
V(ObjectGroups) \
V(Promotion) \
V(Regression39128) \
V(ResetWeakHandle) \
V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(Regress5831) \
V(Regress538257) \
V(Regress587004) \
V(Regress589413) \
V(Regress658718) \
V(Regress670675) \
V(Regress777177) \
V(Regress779503) \
V(Regress791582) \
V(Regress845060) \
V(RegressMissingWriteBarrierInAllocate) \
V(WriteBarriersInCopyJSObject)
#define HEAP_TEST(Name) \
......
......@@ -335,13 +335,12 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
}
}
HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
if (FLAG_never_compact) return;
HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
if (FLAG_never_compact || FLAG_always_promote_young_mc) return;
// Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one. Objects on the aborted page
// are linked together and the very first object on the aborted page points
// into new space. The test verifies that the store buffer entries are
// into new space. The test verifies that the remembered set entries are
// properly cleared and rebuilt after aborting a page. Failing to do so can
// result in other objects being allocated in the free space where their
// payload looks like a valid new space pointer.
......@@ -452,7 +451,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
.ToHandleChecked();
} while (Page::FromHeapObject(*string) != to_be_aborted_page);
// If store buffer entries are not properly filtered/reset for aborted
// If remembered set entries are not properly filtered/reset for aborted
// pages we have now a broken address at an object slot in old space and
// the following scavenge will crash.
CcTest::CollectGarbage(NEW_SPACE);
......
......@@ -2561,8 +2561,7 @@ TEST(OptimizedPretenuringDoubleArrayProperties) {
ReadOnlyRoots(CcTest::heap()).empty_property_array());
}
TEST(OptimizedPretenuringdoubleArrayLiterals) {
TEST(OptimizedPretenuringDoubleArrayLiterals) {
FLAG_allow_natives_syntax = true;
FLAG_expose_gc = true;
CcTest::InitializeVM();
......@@ -2572,7 +2571,7 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
return;
v8::HandleScope scope(CcTest::isolate());
// Grow new space unitl maximum capacity reached.
// Grow new space until maximum capacity reached.
while (!CcTest::heap()->new_space()->IsAtMaximumCapacity()) {
CcTest::heap()->new_space()->Grow();
}
......@@ -2603,7 +2602,6 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
CHECK(CcTest::heap()->InOldSpace(*o));
}
TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
FLAG_allow_natives_syntax = true;
FLAG_expose_gc = true;
......@@ -5398,19 +5396,32 @@ HEAP_TEST(Regress589413) {
// Fill the new space with byte arrays with elements looking like pointers.
const int M = 256;
ByteArray byte_array;
Page* young_page = nullptr;
while (AllocateByteArrayForTest(heap, M, AllocationType::kYoung)
.To(&byte_array)) {
// Only allocate objects on one young page as a rough estimate on
// how much memory can be promoted into the old generation.
// Otherwise we would crash when forcing promotion of all young
// live objects.
if (!young_page) young_page = Page::FromHeapObject(byte_array);
if (Page::FromHeapObject(byte_array) != young_page) break;
for (int j = 0; j < M; j++) {
byte_array.set(j, 0x31);
}
// Add the array in root set.
handle(byte_array, isolate);
}
// Make sure the byte arrays will be promoted on the next GC.
CcTest::CollectGarbage(NEW_SPACE);
// This number is close to large free list category threshold.
const int N = 0x3EEE;
{
// Ensure that incremental marking is not started unexpectedly.
AlwaysAllocateScope always_allocate(isolate);
// Make sure the byte arrays will be promoted on the next GC.
CcTest::CollectGarbage(NEW_SPACE);
// This number is close to large free list category threshold.
const int N = 0x3EEE;
std::vector<FixedArray> arrays;
std::set<Page*> pages;
FixedArray array;
......@@ -5423,7 +5434,7 @@ HEAP_TEST(Regress589413) {
// Add the array in root set.
handle(array, isolate);
}
// Expand and full one complete page with fixed arrays.
// Expand and fill one complete page with fixed arrays.
heap->set_force_oom(false);
while (
AllocateFixedArrayForTest(heap, N, AllocationType::kOld).To(&array)) {
......@@ -5437,7 +5448,6 @@ HEAP_TEST(Regress589413) {
// Expand and mark the new page as evacuation candidate.
heap->set_force_oom(false);
{
AlwaysAllocateScope always_allocate(isolate);
Handle<HeapObject> ec_obj =
factory->NewFixedArray(5000, AllocationType::kOld);
Page* ec_page = Page::FromHeapObject(*ec_obj);
......@@ -5451,11 +5461,13 @@ HEAP_TEST(Regress589413) {
}
}
}
CHECK(heap->incremental_marking()->IsStopped());
heap::SimulateIncrementalMarking(heap);
for (size_t j = 0; j < arrays.size(); j++) {
heap->RightTrimFixedArray(arrays[j], N - 1);
}
}
// Force allocation from the free list.
heap->set_force_oom(true);
CcTest::CollectGarbage(OLD_SPACE);
......@@ -6542,7 +6554,8 @@ HEAP_TEST(Regress779503) {
// currently scavenging.
heap->delay_sweeper_tasks_for_testing_ = true;
CcTest::CollectGarbage(OLD_SPACE);
CHECK(Heap::InYoungGeneration(*byte_array));
CHECK(FLAG_always_promote_young_mc ? !Heap::InYoungGeneration(*byte_array)
: Heap::InYoungGeneration(*byte_array));
}
// Scavenging and sweeping the same page will crash as slots will be
// overridden.
......
......@@ -72,6 +72,7 @@ TEST(Promotion) {
}
HEAP_TEST(NoPromotion) {
if (FLAG_always_promote_young_mc) return;
// Page promotion allows pages to be moved to old space even in the case of
// OOM scenarios.
FLAG_page_promotion = false;
......
......@@ -101,7 +101,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
}
UNINITIALIZED_TEST(PagePromotion_NewToNew) {
if (!i::FLAG_page_promotion) return;
if (!i::FLAG_page_promotion || FLAG_always_promote_young_mc) return;
v8::Isolate* isolate = NewIsolateForPagePromotion();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
......@@ -129,7 +129,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToNew) {
}
UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
if (!i::FLAG_page_promotion) return;
if (!i::FLAG_page_promotion || FLAG_always_promote_young_mc) return;
// Test makes sure JSArrayBuffer backing stores are still tracked after
// new-to-new promotion.
......@@ -218,7 +218,7 @@ UNINITIALIZED_TEST(PagePromotion_NewToOldJSArrayBuffer) {
}
UNINITIALIZED_HEAP_TEST(Regress658718) {
if (!i::FLAG_page_promotion) return;
if (!i::FLAG_page_promotion || FLAG_always_promote_young_mc) return;
v8::Isolate* isolate = NewIsolateForPagePromotion(4, 8);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
......
......@@ -37,7 +37,8 @@ TEST(WeakReferencesBasic) {
MaybeObject code_object = lh->data1();
CHECK(code_object->IsSmi());
CcTest::CollectAllGarbage();
CHECK(Heap::InYoungGeneration(*lh));
CHECK(FLAG_always_promote_young_mc ? !Heap::InYoungGeneration(*lh)
: Heap::InYoungGeneration(*lh));
CHECK_EQ(code_object, lh->data1());
{
......
......@@ -157,7 +157,7 @@ bool EphemeronHashTableContainsKey(EphemeronHashTable table, HeapObject key) {
}
} // namespace
TEST(WeakMapPromotion) {
TEST(WeakMapPromotionMarkCompact) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
Factory* factory = isolate->factory();
......@@ -165,7 +165,10 @@ TEST(WeakMapPromotion) {
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
CcTest::CollectAllGarbage();
CHECK(ObjectInYoungGeneration(weakmap->table()));
CHECK(FLAG_always_promote_young_mc
? !ObjectInYoungGeneration(weakmap->table())
: ObjectInYoungGeneration(weakmap->table()));
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
......@@ -177,7 +180,8 @@ TEST(WeakMapPromotion) {
EphemeronHashTable::cast(weakmap->table()), *object));
CcTest::CollectAllGarbage();
CHECK(ObjectInYoungGeneration(*object));
CHECK(FLAG_always_promote_young_mc ? !ObjectInYoungGeneration(*object)
: ObjectInYoungGeneration(*object));
CHECK(!ObjectInYoungGeneration(weakmap->table()));
CHECK(EphemeronHashTableContainsKey(
EphemeronHashTable::cast(weakmap->table()), *object));
......@@ -196,7 +200,7 @@ TEST(WeakMapScavenge) {
HandleScope scope(isolate);
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
CcTest::CollectAllGarbage();
heap::GcAndSweep(isolate->heap(), NEW_SPACE);
CHECK(ObjectInYoungGeneration(weakmap->table()));
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment