Commit e967b449 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Revert "[heap] Replace ConcurrentSweepingState with a MemoryChunk local epoch counter."

This reverts commit 907f3a64.

Reason for revert: speculative revert for v8:9445
I will reland if the crash is not fixed by the revert.

Original change's description:
> [heap] Replace ConcurrentSweepingState with a MemoryChunk local epoch counter.
>
> Bug: v8:9093
> Change-Id: I7c415fd0ea9e48f7ee189115f164825cb120695b
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1624213
> Commit-Queue: Hannes Payer <hpayer@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#62423}

TBR=ulan@chromium.org,hpayer@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:9093, v8:9445
Change-Id: Ia81a52579dc0a89f57ee41c7d0f8b1ba0f9bba81
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1691025
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62575}
parent 590a9f78
......@@ -103,8 +103,7 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return true;
DCHECK_IMPLIES(Sweeper::IsValidSweepingSpace(page->owner()->identity()),
!page->SweepingDone());
DCHECK(page->SweepingDone());
tracker->Process([mode](JSArrayBuffer old_buffer, JSArrayBuffer* new_buffer) {
MapWord map_word = old_buffer.map_word();
if (map_word.IsForwardingAddress()) {
......
......@@ -83,7 +83,7 @@ class ConcurrentMarkingVisitor final
ConcurrentMarking::MarkingWorklist* shared,
MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled, uintptr_t mark_compact_epoch,
bool embedder_tracing_enabled, unsigned mark_compact_epoch,
bool is_forced_gc)
: shared_(shared, task_id),
weak_objects_(weak_objects),
......@@ -673,7 +673,7 @@ class ConcurrentMarkingVisitor final
int task_id_;
SlotSnapshot slot_snapshot_;
bool embedder_tracing_enabled_;
const uintptr_t mark_compact_epoch_;
const unsigned mark_compact_epoch_;
bool is_forced_gc_;
BytecodeFlushMode bytecode_flush_mode_;
};
......
......@@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
std::atomic<bool> preemption_request;
MemoryChunkDataMap memory_chunk_data;
size_t marked_bytes = 0;
uintptr_t mark_compact_epoch;
unsigned mark_compact_epoch;
bool is_forced_gc;
char cache_line_padding[64];
};
......
......@@ -1941,6 +1941,8 @@ void MarkCompactCollector::MarkLiveObjects() {
if (was_marked_incrementally_) {
heap()->incremental_marking()->Deactivate();
}
epoch_++;
}
void MarkCompactCollector::ClearNonLiveReferences() {
......@@ -2784,6 +2786,7 @@ class Evacuator : public Malloced {
void Evacuator::EvacuatePage(MemoryChunk* chunk) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
DCHECK(chunk->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
{
......@@ -3010,7 +3013,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
DCHECK_EQ(heap()->old_space(), page->owner());
// The move added page->allocated_bytes to the old space, but we are
// going to sweep the page and add page->live_byte_count.
page->MarkUnswept();
heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
page);
} else {
......@@ -3763,7 +3765,7 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
aborted_pages_verified++;
} else {
DCHECK(p->IsEvacuationCandidate());
DCHECK(!p->SweepingDone());
DCHECK(p->SweepingDone());
p->owner()->memory_chunk_list().Remove(p);
}
}
......@@ -3779,7 +3781,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
non_atomic_marking_state()->SetLiveBytes(p, 0);
CHECK(!p->SweepingDone());
CHECK(p->SweepingDone());
space->ReleasePage(p);
}
old_space_evacuation_pages_.clear();
......@@ -3795,7 +3797,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// Loop needs to support deletion if live bytes == 0 for a page.
for (auto it = space->begin(); it != space->end();) {
Page* p = *(it++);
DCHECK(!p->SweepingDone());
DCHECK(p->SweepingDone());
if (p->IsEvacuationCandidate()) {
// Will be processed in Evacuate.
......@@ -3830,11 +3832,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
void MarkCompactCollector::StartSweepSpaces() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
// We increment the epoch when sweeping starts. That will make all sweepable
// pages of the old generation not swept.
epoch_++;
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
......@@ -4866,11 +4863,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
// A page is promoted to the old generation. We have to give it the
// current MC epoch counter. It will take part in the sweeping phase
// of the next MC.
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
page->InitializeSweepingState();
} else {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
......
......@@ -709,7 +709,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VerifyMarkbitsAreClean(LargeObjectSpace* space);
#endif
uintptr_t epoch() const { return epoch_; }
unsigned epoch() const { return epoch_; }
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
......@@ -909,17 +909,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
// Counts the number of major garbage collections. The counter is
// Counts the number of major mark-compact collections. The counter is
// incremented right after marking. This is used for:
// - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
// two bits are used, so it is okay if this counter overflows and wraps
// around.
// - identifying if a MemoryChunk is swept. When sweeping starts the epoch
// counter is incremented. When sweeping of a MemoryChunk finishes the
// epoch counter of a MemoryChunk is incremented. A MemoryChunk is swept
// if both counters match. A MemoryChunk still requires sweeping if
// they don't match.
uintptr_t epoch_ = 0;
unsigned epoch_ = 0;
friend class FullEvacuator;
friend class RecordMigratedSlotVisitor;
......@@ -1013,7 +1008,7 @@ class MarkingVisitor final
Heap* const heap_;
MarkCompactCollector* const collector_;
MarkingState* const marking_state_;
const uintptr_t mark_compact_epoch_;
const unsigned mark_compact_epoch_;
};
class EvacuationScope {
......
......@@ -711,7 +711,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->invalidated_slots_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->InitializeSweepingState();
chunk->set_concurrent_sweeping_state(kSweepingDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
chunk->mutex_ = new base::Mutex();
......@@ -1023,24 +1023,6 @@ void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
}
}
bool MemoryChunk::SweepingDone() {
return !Sweeper::IsValidSweepingSpace(owner()->identity()) ||
heap_->mark_compact_collector()->epoch() == mark_compact_epoch_;
}
void MemoryChunk::MarkUnswept() {
DCHECK(Sweeper::IsValidSweepingSpace(owner()->identity()));
mark_compact_epoch_ = heap_->mark_compact_collector()->epoch() - 1;
}
void MemoryChunk::InitializeSweepingState() {
if (Sweeper::IsValidSweepingSpace(owner()->identity())) {
mark_compact_epoch_ = heap_->mark_compact_collector()->epoch();
} else {
mark_compact_epoch_ = 0;
}
}
void Page::ResetAllocationStatistics() {
allocated_bytes_ = area_size();
wasted_memory_ = 0;
......@@ -1758,6 +1740,7 @@ Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
}
size_t PagedSpace::AddPage(Page* page) {
CHECK(page->SweepingDone());
page->set_owner(this);
memory_chunk_list_.PushBack(page);
AccountCommitted(page->size());
......@@ -2140,8 +2123,8 @@ void PagedSpace::VerifyLiveBytes() {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->SweepingDone());
PagedSpaceObjectIterator it(page);
DCHECK(page->SweepingDone());
int black_size = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
......
......@@ -367,6 +367,17 @@ class MemoryChunk {
static const Flags kSkipEvacuationSlotsRecordingMask =
kEvacuationCandidateMask | kIsInYoungGenerationMask;
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
// not be performed on that page. Sweeper threads that are done with their
// work will set this value and not touch the page anymore.
// |kSweepingPending|: This page is ready for parallel sweeping.
// |kSweepingInProgress|: This page is currently swept by a sweeper thread.
enum ConcurrentSweepingState {
kSweepingDone,
kSweepingPending,
kSweepingInProgress,
};
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
......@@ -401,7 +412,8 @@ class MemoryChunk {
+ kSystemPointerSize // InvalidatedSlots* invalidated_slots_
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kUIntptrSize // std::atomic<uintptr_t> mark_compact_epoch_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
// concurrent_sweeping_
+ kSystemPointerSize // base::Mutex* page_protection_change_mutex_
+ kSystemPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * ExternalBackingStoreType::kNumTypes
......@@ -475,10 +487,15 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end();
}
V8_EXPORT_PRIVATE void InitializeSweepingState();
void MarkSwept() { mark_compact_epoch_++; }
V8_EXPORT_PRIVATE void MarkUnswept();
V8_EXPORT_PRIVATE bool SweepingDone();
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
concurrent_sweeping_ = state;
}
ConcurrentSweepingState concurrent_sweeping_state() {
return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
}
bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
......@@ -776,7 +793,7 @@ class MemoryChunk {
base::Mutex* mutex_;
std::atomic<uintptr_t> mark_compact_epoch_;
std::atomic<intptr_t> concurrent_sweeping_;
base::Mutex* page_protection_change_mutex_;
......@@ -1133,6 +1150,7 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
// Tracks off-heap memory used by this space.
std::atomic<size_t>* external_backing_store_bytes_;
private:
static const intptr_t kIdOffset = 9 * kSystemPointerSize;
bool allocation_observers_paused_;
......
......@@ -247,8 +247,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK_IMPLIES(free_list_mode == REBUILD_FREE_LIST,
!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
......@@ -368,13 +367,9 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
// The allocated_bytes() counter is precisely the total size of objects.
DCHECK_EQ(live_bytes, p->allocated_bytes());
}
p->set_concurrent_sweeping_state(Page::kSweepingDone);
if (code_object_registry) code_object_registry->Finalize();
if (free_list_mode == IGNORE_FREE_LIST) return 0;
p->MarkSwept();
DCHECK(p->SweepingDone());
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
......@@ -429,9 +424,12 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
// the page protection mode from rx -> rw while sweeping.
CodePageMemoryModificationScope code_page_scope(page);
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
......@@ -474,17 +472,17 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
// happened when the page was initially added, so it is skipped here.
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
}
DCHECK(!page->SweepingDone());
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
DCHECK(!page->SweepingDone());
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
page->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
page->set_concurrent_sweeping_state(Page::kSweepingPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
}
......@@ -576,8 +574,10 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_);
DCHECK(IsValidIterabilitySpace(page->owner_identity()));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
iterability_list_.push_back(page);
page->set_concurrent_sweeping_state(Page::kSweepingPending);
}
void Sweeper::MakeIterable(Page* page) {
......
......@@ -77,11 +77,6 @@ class Sweeper {
};
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
static bool IsValidSweepingSpace(AllocationSpace space) {
return space >= FIRST_GROWABLE_PAGED_SPACE &&
space <= LAST_GROWABLE_PAGED_SPACE;
}
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
bool sweeping_in_progress() const { return sweeping_in_progress_; }
......@@ -158,6 +153,11 @@ class Sweeper {
return space == NEW_SPACE || space == RO_SPACE;
}
static bool IsValidSweepingSpace(AllocationSpace space) {
return space >= FIRST_GROWABLE_PAGED_SPACE &&
space <= LAST_GROWABLE_PAGED_SPACE;
}
static int GetSweepSpaceIndex(AllocationSpace space) {
DCHECK(IsValidSweepingSpace(space));
return space - FIRST_GROWABLE_PAGED_SPACE;
......
......@@ -153,7 +153,7 @@ class DescriptorArray : public HeapObject {
// Atomic compare-and-swap operation on the raw_number_of_marked_descriptors.
int16_t CompareAndSwapRawNumberOfMarkedDescriptors(int16_t expected,
int16_t value);
int16_t UpdateNumberOfMarkedDescriptors(uintptr_t mark_compact_epoch,
int16_t UpdateNumberOfMarkedDescriptors(unsigned mark_compact_epoch,
int16_t number_of_marked_descriptors);
static constexpr int SizeFor(int number_of_all_descriptors) {
......@@ -245,12 +245,11 @@ class NumberOfMarkedDescriptors {
static const int kMaxNumberOfMarkedDescriptors = Marked::kMax;
// Decodes the raw value of the number of marked descriptors for the
// given mark compact garbage collection epoch.
static inline int16_t decode(uintptr_t mark_compact_epoch,
int16_t raw_value) {
static inline int16_t decode(unsigned mark_compact_epoch, int16_t raw_value) {
unsigned epoch_from_value = Epoch::decode(static_cast<uint16_t>(raw_value));
int16_t marked_from_value =
Marked::decode(static_cast<uint16_t>(raw_value));
uintptr_t actual_epoch = mark_compact_epoch & Epoch::kMask;
unsigned actual_epoch = mark_compact_epoch & Epoch::kMask;
if (actual_epoch == epoch_from_value) return marked_from_value;
// If the epochs do not match, then either the raw_value is zero (freshly
// allocated descriptor array) or the epoch from value lags by 1.
......@@ -263,7 +262,7 @@ class NumberOfMarkedDescriptors {
// Encodes the number of marked descriptors for the given mark compact
// garbage collection epoch.
static inline int16_t encode(uintptr_t mark_compact_epoch, int16_t value) {
static inline int16_t encode(unsigned mark_compact_epoch, int16_t value) {
// TODO(ulan): avoid casting to int16_t by adding support for uint16_t
// atomics.
return static_cast<int16_t>(
......
......@@ -4248,7 +4248,7 @@ void DescriptorArray::Sort() {
}
int16_t DescriptorArray::UpdateNumberOfMarkedDescriptors(
uintptr_t mark_compact_epoch, int16_t new_marked) {
unsigned mark_compact_epoch, int16_t new_marked) {
STATIC_ASSERT(kMaxNumberOfDescriptors <=
NumberOfMarkedDescriptors::kMaxNumberOfMarkedDescriptors);
int16_t old_raw_marked = raw_number_of_marked_descriptors();
......
......@@ -6278,16 +6278,16 @@ HEAP_TEST(MarkCompactEpochCounter) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
uintptr_t epoch0 = heap->mark_compact_collector()->epoch();
unsigned epoch0 = heap->mark_compact_collector()->epoch();
CcTest::CollectGarbage(OLD_SPACE);
uintptr_t epoch1 = heap->mark_compact_collector()->epoch();
unsigned epoch1 = heap->mark_compact_collector()->epoch();
CHECK_EQ(epoch0 + 1, epoch1);
heap::SimulateIncrementalMarking(heap, true);
CcTest::CollectGarbage(OLD_SPACE);
uintptr_t epoch2 = heap->mark_compact_collector()->epoch();
unsigned epoch2 = heap->mark_compact_collector()->epoch();
CHECK_EQ(epoch1 + 1, epoch2);
CcTest::CollectGarbage(NEW_SPACE);
uintptr_t epoch3 = heap->mark_compact_collector()->epoch();
unsigned epoch3 = heap->mark_compact_collector()->epoch();
CHECK_EQ(epoch2, epoch3);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment