Commit ef1640b8 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Remove flag always_promote_young_mc

This flag is now enabled by default for quite some time in production.
In addition that flag was already defined readonly and couldn't be
disabled, so let's remove this flag for good.

Bug: v8:10064
Change-Id: I0e71eee9d25960a96324d56c8f0191fe678dc6e6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3268907
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77806}
parent 6366f334
...@@ -578,8 +578,6 @@ DEFINE_NEG_NEG_IMPLICATION(allocation_site_tracking, ...@@ -578,8 +578,6 @@ DEFINE_NEG_NEG_IMPLICATION(allocation_site_tracking,
DEFINE_BOOL(allocation_site_pretenuring, true, DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites") "pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization") DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_BOOL_READONLY(always_promote_young_mc, true,
"always promote young objects during mark-compact")
DEFINE_INT(page_promotion_threshold, 70, DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation") "min percentage of live bytes on a page to enable fast evacuation")
DEFINE_BOOL(trace_pretenuring, false, DEFINE_BOOL(trace_pretenuring, false,
......
...@@ -1560,7 +1560,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1560,7 +1560,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
Heap* heap, EvacuationAllocator* local_allocator, Heap* heap, EvacuationAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor, RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback, Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
bool always_promote_young) AlwaysPromoteYoung always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, record_visitor), : EvacuateVisitorBase(heap, local_allocator, record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()), buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0), promoted_size_(0),
...@@ -1573,7 +1573,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1573,7 +1573,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (TryEvacuateWithoutCopy(object)) return true; if (TryEvacuateWithoutCopy(object)) return true;
HeapObject target_object; HeapObject target_object;
if (always_promote_young_) { if (always_promote_young_ == AlwaysPromoteYoung::kYes) {
heap_->UpdateAllocationSite(object.map(), object, heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_); local_pretenuring_feedback_);
...@@ -1657,7 +1657,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { ...@@ -1657,7 +1657,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t semispace_copied_size_; intptr_t semispace_copied_size_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_; Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
bool is_incremental_marking_; bool is_incremental_marking_;
bool always_promote_young_; AlwaysPromoteYoung always_promote_young_;
}; };
template <PageEvacuationMode mode> template <PageEvacuationMode mode>
...@@ -3164,8 +3164,7 @@ void MarkCompactCollector::EvacuateEpilogue() { ...@@ -3164,8 +3164,7 @@ void MarkCompactCollector::EvacuateEpilogue() {
// New space. // New space.
if (heap()->new_space()) { if (heap()->new_space()) {
heap()->new_space()->set_age_mark(heap()->new_space()->top()); heap()->new_space()->set_age_mark(heap()->new_space()->top());
DCHECK_IMPLIES(FLAG_always_promote_young_mc, DCHECK_EQ(0, heap()->new_space()->Size());
heap()->new_space()->Size() == 0);
} }
// Deallocate unmarked large objects. // Deallocate unmarked large objects.
...@@ -3234,7 +3233,8 @@ class Evacuator : public Malloced { ...@@ -3234,7 +3233,8 @@ class Evacuator : public Malloced {
} }
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor, Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
EvacuationAllocator* local_allocator, bool always_promote_young) EvacuationAllocator* local_allocator,
AlwaysPromoteYoung always_promote_young)
: heap_(heap), : heap_(heap),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(heap_, local_allocator, record_visitor, new_space_visitor_(heap_, local_allocator, record_visitor,
...@@ -3346,7 +3346,7 @@ class FullEvacuator : public Evacuator { ...@@ -3346,7 +3346,7 @@ class FullEvacuator : public Evacuator {
public: public:
explicit FullEvacuator(MarkCompactCollector* collector) explicit FullEvacuator(MarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_, : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
FLAG_always_promote_young_mc), AlwaysPromoteYoung::kYes),
record_visitor_(collector, &ephemeron_remembered_set_), record_visitor_(collector, &ephemeron_remembered_set_),
local_allocator_(heap_, local_allocator_(heap_,
CompactionSpaceKind::kCompactionSpaceForMarkCompact), CompactionSpaceKind::kCompactionSpaceForMarkCompact),
...@@ -3525,13 +3525,14 @@ size_t MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( ...@@ -3525,13 +3525,14 @@ size_t MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
return wanted_num_tasks; return wanted_num_tasks;
} }
bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes, bool MarkCompactCollectorBase::ShouldMovePage(
bool always_promote_young) { Page* p, intptr_t live_bytes, AlwaysPromoteYoung always_promote_young) {
const bool reduce_memory = heap()->ShouldReduceMemory(); const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark(); const Address age_mark = heap()->new_space()->age_mark();
return !reduce_memory && !p->NeverEvacuate() && return !reduce_memory && !p->NeverEvacuate() &&
(live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) && (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
(always_promote_young || !p->Contains(age_mark)) && (always_promote_young == AlwaysPromoteYoung::kYes ||
!p->Contains(age_mark)) &&
heap()->CanExpandOldGeneration(live_bytes); heap()->CanExpandOldGeneration(live_bytes);
} }
...@@ -3565,19 +3566,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3565,19 +3566,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page); intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue; if (live_bytes_on_page == 0) continue;
live_bytes += live_bytes_on_page; live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page, if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kYes)) {
FLAG_always_promote_young_mc)) { EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) || DCHECK_EQ(heap()->old_space(), page->owner());
FLAG_always_promote_young_mc) { // The move added page->allocated_bytes to the old space, but we are
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); // going to sweep the page and add page->live_byte_count.
DCHECK_EQ(heap()->old_space(), page->owner()); heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
// The move added page->allocated_bytes to the old space, but we are page);
// going to sweep the page and add page->live_byte_count.
heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
page);
} else {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
} }
evacuation_items.emplace_back(ParallelWorkItem{}, page); evacuation_items.emplace_back(ParallelWorkItem{}, page);
} }
...@@ -4040,9 +4035,7 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -4040,9 +4035,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}, },
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR && DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
FLAG_always_promote_young_mc,
slots == 0);
if (slots == 0) { if (slots == 0) {
chunk_->ReleaseSlotSet<OLD_TO_NEW>(); chunk_->ReleaseSlotSet<OLD_TO_NEW>();
...@@ -4066,9 +4059,7 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -4066,9 +4059,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}, },
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR && DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
FLAG_always_promote_young_mc,
slots == 0);
if (slots == 0) { if (slots == 0) {
chunk_->ReleaseSweepingSlotSet(); chunk_->ReleaseSweepingSlotSet();
...@@ -4166,12 +4157,6 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -4166,12 +4157,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSetUpdatingMode updating_mode_; RememberedSetUpdatingMode updating_mode_;
}; };
std::unique_ptr<UpdatingItem> MarkCompactCollector::CreateToSpaceUpdatingItem(
MemoryChunk* chunk, Address start, Address end) {
return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
heap(), chunk, start, end, non_atomic_marking_state());
}
std::unique_ptr<UpdatingItem> std::unique_ptr<UpdatingItem>
MarkCompactCollector::CreateRememberedSetUpdatingItem( MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) { MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
...@@ -4180,24 +4165,6 @@ MarkCompactCollector::CreateRememberedSetUpdatingItem( ...@@ -4180,24 +4165,6 @@ MarkCompactCollector::CreateRememberedSetUpdatingItem(
heap(), non_atomic_marking_state(), chunk, updating_mode); heap(), non_atomic_marking_state(), chunk, updating_mode);
} }
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items) {
if (!heap()->new_space()) return 0;
// Seed to space pages.
const Address space_start = heap()->new_space()->first_allocatable_address();
const Address space_end = heap()->new_space()->top();
int pages = 0;
for (Page* page : PageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
pages++;
}
return pages;
}
template <typename IterateableSpace> template <typename IterateableSpace>
int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems( int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items, IterateableSpace* space, std::vector<std::unique_ptr<UpdatingItem>>* items, IterateableSpace* space,
...@@ -4316,12 +4283,11 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { ...@@ -4316,12 +4283,11 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(), CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
RememberedSetUpdatingMode::ALL); RememberedSetUpdatingMode::ALL);
// In order to update pointers in map space at the same time as other spaces // Iterating to space may require a valid body descriptor for e.g.
// we need to ensure that young generation is empty. Otherwise, iterating // WasmStruct which races with updating a slot in Map. Since to space is
// to space may require a valid body descriptor for e.g. WasmStruct which // empty after a full GC, such races can't happen.
// races with updating a slot in Map. DCHECK_IMPLIES(heap()->new_space(), heap()->new_space()->Size() == 0);
CHECK(FLAG_always_promote_young_mc);
CollectToSpaceUpdatingItems(&updating_items);
updating_items.push_back( updating_items.push_back(
std::make_unique<EphemeronTableUpdatingItem>(heap())); std::make_unique<EphemeronTableUpdatingItem>(heap()));
...@@ -5138,6 +5104,22 @@ void MinorMarkCompactCollector::EvacuateEpilogue() { ...@@ -5138,6 +5104,22 @@ void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
} }
int MinorMarkCompactCollector::CollectToSpaceUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items) {
// Seed to space pages.
const Address space_start = heap()->new_space()->first_allocatable_address();
const Address space_end = heap()->new_space()->top();
int pages = 0;
for (Page* page : PageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
pages++;
}
return pages;
}
std::unique_ptr<UpdatingItem> std::unique_ptr<UpdatingItem>
MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk, MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
Address start, Address start,
...@@ -5546,7 +5528,7 @@ class YoungGenerationEvacuator : public Evacuator { ...@@ -5546,7 +5528,7 @@ class YoungGenerationEvacuator : public Evacuator {
public: public:
explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector) explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_, : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
false), AlwaysPromoteYoung::kNo),
record_visitor_(collector->heap()->mark_compact_collector()), record_visitor_(collector->heap()->mark_compact_collector()),
local_allocator_( local_allocator_(
heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact), heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact),
...@@ -5634,7 +5616,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() { ...@@ -5634,7 +5616,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page); intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue; if (live_bytes_on_page == 0) continue;
live_bytes += live_bytes_on_page; live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page, false)) { if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else { } else {
......
...@@ -184,6 +184,7 @@ class LiveObjectVisitor : AllStatic { ...@@ -184,6 +184,7 @@ class LiveObjectVisitor : AllStatic {
static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state); static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
}; };
enum class AlwaysPromoteYoung { kYes, kNo };
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD }; enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
enum MarkingTreatmentMode { KEEP, CLEAR }; enum MarkingTreatmentMode { KEEP, CLEAR };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY }; enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
...@@ -215,8 +216,6 @@ class MarkCompactCollectorBase { ...@@ -215,8 +216,6 @@ class MarkCompactCollectorBase {
virtual void Evacuate() = 0; virtual void Evacuate() = 0;
virtual void EvacuatePagesInParallel() = 0; virtual void EvacuatePagesInParallel() = 0;
virtual void UpdatePointersAfterEvacuation() = 0; virtual void UpdatePointersAfterEvacuation() = 0;
virtual std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(
MemoryChunk* chunk, Address start, Address end) = 0;
virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem( virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0; MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
...@@ -228,10 +227,9 @@ class MarkCompactCollectorBase { ...@@ -228,10 +227,9 @@ class MarkCompactCollectorBase {
MigrationObserver* migration_observer); MigrationObserver* migration_observer);
// Returns whether this page should be moved according to heuristics. // Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes, bool promote_young); bool ShouldMovePage(Page* p, intptr_t live_bytes,
AlwaysPromoteYoung promote_young);
int CollectToSpaceUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items);
template <typename IterateableSpace> template <typename IterateableSpace>
int CollectRememberedSetUpdatingItems( int CollectRememberedSetUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items, std::vector<std::unique_ptr<UpdatingItem>>* items,
...@@ -712,9 +710,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -712,9 +710,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override; void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override; void UpdatePointersAfterEvacuation() override;
std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
Address start,
Address end) override;
std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem( std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override; MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
...@@ -870,10 +865,13 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -870,10 +865,13 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk, std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
Address start, Address start,
Address end) override; Address end);
std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem( std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override; MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
int CollectToSpaceUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items);
void SweepArrayBufferExtensions(); void SweepArrayBufferExtensions();
MarkingWorklist* worklist_; MarkingWorklist* worklist_;
......
...@@ -390,7 +390,7 @@ void MemoryChunk::InvalidateRecordedSlots(HeapObject object) { ...@@ -390,7 +390,7 @@ void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object); RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
} }
if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr) if (slot_set_[OLD_TO_NEW] != nullptr)
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object); RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
} }
......
...@@ -362,7 +362,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) { ...@@ -362,7 +362,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
} }
HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) { HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
if (FLAG_never_compact || FLAG_always_promote_young_mc) return; if (FLAG_never_compact) return;
// Test the scenario where we reach OOM during compaction and parts of the // Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one. Objects on the aborted page // page have already been migrated to a new one. Objects on the aborted page
// are linked together and the very first object on the aborted page points // are linked together and the very first object on the aborted page points
......
...@@ -6685,8 +6685,7 @@ HEAP_TEST(Regress779503) { ...@@ -6685,8 +6685,7 @@ HEAP_TEST(Regress779503) {
// currently scavenging. // currently scavenging.
heap->delay_sweeper_tasks_for_testing_ = true; heap->delay_sweeper_tasks_for_testing_ = true;
CcTest::CollectGarbage(OLD_SPACE); CcTest::CollectGarbage(OLD_SPACE);
CHECK(FLAG_always_promote_young_mc ? !Heap::InYoungGeneration(*byte_array) CHECK(!Heap::InYoungGeneration(*byte_array));
: Heap::InYoungGeneration(*byte_array));
} }
// Scavenging and sweeping the same page will crash as slots will be // Scavenging and sweeping the same page will crash as slots will be
// overridden. // overridden.
......
...@@ -73,33 +73,6 @@ TEST(Promotion) { ...@@ -73,33 +73,6 @@ TEST(Promotion) {
} }
} }
HEAP_TEST(NoPromotion) {
if (FLAG_always_promote_young_mc) return;
FLAG_stress_concurrent_allocation = false; // For SealCurrentObjects.
// Page promotion allows pages to be moved to old space even in the case of
// OOM scenarios.
FLAG_page_promotion = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
{
v8::HandleScope sc(CcTest::isolate());
Heap* heap = isolate->heap();
heap::SealCurrentObjects(heap);
int array_length = heap::FixedArrayLenFromSize(kMaxRegularHeapObjectSize);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(array_length);
heap->set_force_oom(true);
// Array should be in the new space.
CHECK(heap->InSpace(*array, NEW_SPACE));
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
CHECK(heap->InSpace(*array, NEW_SPACE));
}
}
// This is the same as Factory::NewMap, except it doesn't retry on // This is the same as Factory::NewMap, except it doesn't retry on
// allocation failure. // allocation failure.
AllocationResult HeapTester::AllocateMapForTest(Isolate* isolate) { AllocationResult HeapTester::AllocateMapForTest(Isolate* isolate) {
......
...@@ -100,72 +100,6 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) { ...@@ -100,72 +100,6 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
isolate->Dispose(); isolate->Dispose();
} }
UNINITIALIZED_TEST(PagePromotion_NewToNew) {
if (!i::FLAG_page_promotion || FLAG_always_promote_young_mc) return;
v8::Isolate* isolate = NewIsolateForPagePromotion();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
CHECK_GT(handles.size(), 0u);
// Last object in handles should definitely be on a page that does not
// contain the age mark, thus qualifying for moving.
Handle<FixedArray> last_object = handles.back();
Page* to_be_promoted_page = Page::FromHeapObject(*last_object);
CHECK(!to_be_promoted_page->Contains(heap->new_space()->age_mark()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
heap::GcAndSweep(heap, OLD_SPACE);
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
}
isolate->Dispose();
}
UNINITIALIZED_HEAP_TEST(Regress658718) {
if (!i::FLAG_page_promotion || FLAG_always_promote_young_mc) return;
v8::Isolate* isolate = NewIsolateForPagePromotion(4, 8);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
heap->delay_sweeper_tasks_for_testing_ = true;
GrowNewSpace(heap);
{
v8::HandleScope inner_handle_scope(isolate);
std::vector<Handle<FixedArray>> handles;
heap::SimulateFullSpace(heap->new_space(), &handles);
CHECK_GT(handles.size(), 0u);
// Last object in handles should definitely be on a page that does not
// contain the age mark, thus qualifying for moving.
Handle<FixedArray> last_object = handles.back();
Page* to_be_promoted_page = Page::FromHeapObject(*last_object);
CHECK(!to_be_promoted_page->Contains(heap->new_space()->age_mark()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
heap->CollectGarbage(OLD_SPACE, i::GarbageCollectionReason::kTesting);
CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
CHECK(to_be_promoted_page->Contains(last_object->address()));
}
heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
heap->new_space()->Shrink();
heap->memory_allocator()->unmapper()->EnsureUnmappingCompleted();
heap->delay_sweeper_tasks_for_testing_ = false;
heap->mark_compact_collector()->sweeper()->StartSweeperTasks();
heap->mark_compact_collector()->EnsureSweepingCompleted();
}
isolate->Dispose();
}
#endif // V8_LITE_MODE #endif // V8_LITE_MODE
} // namespace heap } // namespace heap
......
...@@ -40,8 +40,7 @@ TEST(WeakReferencesBasic) { ...@@ -40,8 +40,7 @@ TEST(WeakReferencesBasic) {
MaybeObject code_object = lh->data1(); MaybeObject code_object = lh->data1();
CHECK(code_object->IsSmi()); CHECK(code_object->IsSmi());
CcTest::CollectAllGarbage(); CcTest::CollectAllGarbage();
CHECK(FLAG_always_promote_young_mc ? !Heap::InYoungGeneration(*lh) CHECK(!Heap::InYoungGeneration(*lh));
: Heap::InYoungGeneration(*lh));
CHECK_EQ(code_object, lh->data1()); CHECK_EQ(code_object, lh->data1());
{ {
......
...@@ -166,9 +166,7 @@ TEST(WeakMapPromotionMarkCompact) { ...@@ -166,9 +166,7 @@ TEST(WeakMapPromotionMarkCompact) {
CcTest::CollectAllGarbage(); CcTest::CollectAllGarbage();
CHECK(FLAG_always_promote_young_mc CHECK(!ObjectInYoungGeneration(weakmap->table()));
? !ObjectInYoungGeneration(weakmap->table())
: ObjectInYoungGeneration(weakmap->table()));
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
Handle<JSObject> object = factory->NewJSObjectFromMap(map); Handle<JSObject> object = factory->NewJSObjectFromMap(map);
...@@ -180,8 +178,7 @@ TEST(WeakMapPromotionMarkCompact) { ...@@ -180,8 +178,7 @@ TEST(WeakMapPromotionMarkCompact) {
EphemeronHashTable::cast(weakmap->table()), *object)); EphemeronHashTable::cast(weakmap->table()), *object));
CcTest::CollectAllGarbage(); CcTest::CollectAllGarbage();
CHECK(FLAG_always_promote_young_mc ? !ObjectInYoungGeneration(*object) CHECK(!ObjectInYoungGeneration(*object));
: ObjectInYoungGeneration(*object));
CHECK(!ObjectInYoungGeneration(weakmap->table())); CHECK(!ObjectInYoungGeneration(weakmap->table()));
CHECK(EphemeronHashTableContainsKey( CHECK(EphemeronHashTableContainsKey(
EphemeronHashTable::cast(weakmap->table()), *object)); EphemeronHashTable::cast(weakmap->table()), *object));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment