Commit 76262021 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Merge marking states

MinorMC maintained a separate marking state to support interleaved GCs.
Since MinorMC now assumes that interleaving is not possible, MinorMC can
use the same marking state as the full GC.

Bug: v8:12612
Change-Id: Ibeb7df2eb24e448f811b497c9d16b3b132f87ec2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3735163Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81468}
parent b9af74c8
......@@ -367,12 +367,10 @@ class BasicMemoryChunk {
friend class BasicMemoryChunkValidator;
friend class ConcurrentMarkingState;
friend class MajorMarkingState;
friend class MajorAtomicMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MarkingState;
friend class AtomicMarkingState;
friend class NonAtomicMarkingState;
friend class MemoryAllocator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
};
......
......@@ -652,7 +652,7 @@ void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
}
void ConcurrentMarking::FlushMemoryChunkData(
MajorNonAtomicMarkingState* marking_state) {
NonAtomicMarkingState* marking_state) {
DCHECK(!job_handle_ || !job_handle_->IsValid());
for (int i = 1; i <= kMaxTasks; i++) {
MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
......
......@@ -26,7 +26,7 @@ namespace internal {
class Heap;
class Isolate;
class MajorNonAtomicMarkingState;
class NonAtomicMarkingState;
class MemoryChunk;
class WeakObjects;
......@@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// Flushes native context sizes to the given table of the main thread.
void FlushNativeContexts(NativeContextStats* main_stats);
// Flushes memory chunk data using the given marking state.
void FlushMemoryChunkData(MajorNonAtomicMarkingState* marking_state);
void FlushMemoryChunkData(NonAtomicMarkingState* marking_state);
// This function is called for a new space page that was cleared after
// scavenge and is going to be re-used.
void ClearMemoryChunkData(MemoryChunk* chunk);
......
......@@ -28,7 +28,7 @@ class UnifiedHeapMarkingState final {
private:
Heap* const heap_;
MarkCompactCollector::MarkingState* const marking_state_;
MarkingState* const marking_state_;
MarkingWorklists::Local* local_marking_worklist_ = nullptr;
const bool track_retaining_path_;
};
......
......@@ -375,7 +375,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
MinorMarkCompactCollector::MarkingState* minor_marking_state =
MarkingState* minor_marking_state =
heap()->minor_mark_compact_collector()->marking_state();
collector_->local_marking_worklists()->Publish();
......
......@@ -39,10 +39,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// is triggered via stack guard.
enum class CompletionAction { kGcViaStackGuard, kGCViaTask };
using MarkingState = MarkCompactCollector::MarkingState;
using AtomicMarkingState = MarkCompactCollector::AtomicMarkingState;
using NonAtomicMarkingState = MarkCompactCollector::NonAtomicMarkingState;
class V8_NODISCARD PauseBlackAllocationScope {
public:
explicit PauseBlackAllocationScope(IncrementalMarking* marking)
......
......@@ -23,7 +23,7 @@ namespace internal {
// change.
using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
class MajorNonAtomicMarkingState;
class NonAtomicMarkingState;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
......@@ -64,7 +64,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
Address sentinel_;
InvalidatedObjectInfo current_{kNullAddress, 0, false};
InvalidatedObjectInfo next_{kNullAddress, 0, false};
MajorNonAtomicMarkingState* marking_state_;
NonAtomicMarkingState* marking_state_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
......
......@@ -500,7 +500,6 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
page->SetFlag(MemoryChunk::TO_PAGE);
UpdatePendingObject(result);
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
->minor_mark_compact_collector()
->non_atomic_marking_state()
......
......@@ -296,7 +296,7 @@ class FullMarkingVerifier : public MarkingVerifier {
}
}
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
NonAtomicMarkingState* marking_state_;
};
class EvacuationVerifier : public ObjectVisitorWithCageBases,
......@@ -1446,7 +1446,7 @@ class ExternalStringTableCleaner : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
for (FullObjectSlot p = start; p < end; ++p) {
......@@ -1475,8 +1475,7 @@ class ExternalStringTableCleaner : public RootVisitor {
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit MarkCompactWeakObjectRetainer(
MarkCompactCollector::MarkingState* marking_state)
explicit MarkCompactWeakObjectRetainer(MarkingState* marking_state)
: marking_state_(marking_state) {}
Object RetainAs(Object object) override {
......@@ -1506,7 +1505,7 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
}
private:
MarkCompactCollector::MarkingState* const marking_state_;
MarkingState* const marking_state_;
};
class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
......@@ -2534,8 +2533,7 @@ void MarkCompactCollector::RecordObjectStats() {
namespace {
bool ShouldRetainMap(MarkCompactCollector::MarkingState* marking_state, Map map,
int age) {
bool ShouldRetainMap(MarkingState* marking_state, Map map, int age) {
if (age == 0) {
// The map has aged. Do not retain this map.
return false;
......@@ -4025,8 +4023,7 @@ class FullEvacuator : public Evacuator {
void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
MarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
NonAtomicMarkingState* marking_state = collector_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(chunk);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"FullEvacuator::RawEvacuatePage", "evacuation_mode",
......@@ -5005,10 +5002,9 @@ void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
namespace {
void ReRecordPage(
Heap* heap,
v8::internal::MarkCompactCollector::NonAtomicMarkingState* marking_state,
Address failed_start, Page* page) {
void ReRecordPage(Heap* heap,
v8::internal::NonAtomicMarkingState* marking_state,
Address failed_start, Page* page) {
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
......@@ -5266,7 +5262,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
}
}
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
NonAtomicMarkingState* marking_state_;
};
class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
......@@ -5342,7 +5338,7 @@ class YoungGenerationMarkingVisitor final
: public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
public:
YoungGenerationMarkingVisitor(
Isolate* isolate, MinorMarkCompactCollector::MarkingState* marking_state,
Isolate* isolate, MarkingState* marking_state,
MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local)
: NewSpaceVisitor(isolate),
worklist_local_(worklist_local),
......@@ -5418,7 +5414,7 @@ class YoungGenerationMarkingVisitor final
}
MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local_;
MinorMarkCompactCollector::MarkingState* marking_state_;
MarkingState* marking_state_;
};
void MinorMarkCompactCollector::SetUp() {}
......@@ -5624,6 +5620,11 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
void MinorMarkCompactCollector::CollectGarbage() {
DCHECK(!heap()->mark_compact_collector()->in_use());
#ifdef VERIFY_HEAP
for (Page* page : *heap()->new_space()) {
CHECK(page->marking_bitmap<AccessMode::NON_ATOMIC>()->IsClean());
}
#endif // VERIFY_HEAP
// Minor MC does not support processing the ephemeron remembered set.
DCHECK(heap()->ephemeron_remembered_set_.empty());
......@@ -5758,7 +5759,7 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
private:
Heap* heap_;
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
NonAtomicMarkingState* marking_state_;
};
} // namespace
......@@ -5856,7 +5857,7 @@ class YoungGenerationMarkingTask {
private:
MinorMarkCompactCollector::MarkingWorklist::Local marking_worklist_local_;
MinorMarkCompactCollector::MarkingState* marking_state_;
MarkingState* marking_state_;
YoungGenerationMarkingVisitor visitor_;
};
......@@ -6212,8 +6213,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
intptr_t* live_bytes) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"YoungGenerationEvacuator::RawEvacuatePage");
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
NonAtomicMarkingState* marking_state = collector_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(chunk);
switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld:
......
......@@ -184,65 +184,11 @@ enum class AlwaysPromoteYoung { kYes, kNo };
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
class MinorMarkingState final
: public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
public:
explicit MinorMarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return MemoryChunk::cast(chunk)
->young_generation_bitmap<AccessMode::ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
chunk->young_generation_live_byte_count_ += by;
}
intptr_t live_bytes(const MemoryChunk* chunk) const {
return chunk->young_generation_live_byte_count_;
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
chunk->young_generation_live_byte_count_ = value;
}
};
class MinorNonAtomicMarkingState final
: public MarkingStateBase<MinorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
public:
explicit MinorNonAtomicMarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return MemoryChunk::cast(chunk)
->young_generation_bitmap<AccessMode::NON_ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
chunk->young_generation_live_byte_count_.fetch_add(
by, std::memory_order_relaxed);
}
intptr_t live_bytes(const MemoryChunk* chunk) const {
return chunk->young_generation_live_byte_count_.load(
std::memory_order_relaxed);
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
chunk->young_generation_live_byte_count_.store(value,
std::memory_order_relaxed);
}
};
// This is used by marking visitors.
class MajorMarkingState final
: public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
class MarkingState final
: public MarkingStateBase<MarkingState, AccessMode::ATOMIC> {
public:
explicit MajorMarkingState(PtrComprCageBase cage_base)
explicit MarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
......@@ -267,10 +213,10 @@ class MajorMarkingState final
// This is used by Scavenger and Evacuator in TransferColor.
// Live byte increments have to be atomic.
class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
class AtomicMarkingState final
: public MarkingStateBase<AtomicMarkingState, AccessMode::ATOMIC> {
public:
explicit MajorAtomicMarkingState(PtrComprCageBase cage_base)
explicit AtomicMarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
......@@ -283,11 +229,10 @@ class MajorAtomicMarkingState final
}
};
class MajorNonAtomicMarkingState final
: public MarkingStateBase<MajorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
class NonAtomicMarkingState final
: public MarkingStateBase<NonAtomicMarkingState, AccessMode::NON_ATOMIC> {
public:
explicit MajorNonAtomicMarkingState(PtrComprCageBase cage_base)
explicit NonAtomicMarkingState(PtrComprCageBase cage_base)
: MarkingStateBase(cage_base) {}
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
......@@ -387,10 +332,6 @@ class MainMarkingVisitor final
// Collector for young and old generation.
class MarkCompactCollector final {
public:
using MarkingState = MajorMarkingState;
using AtomicMarkingState = MajorAtomicMarkingState;
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
using MarkingVisitor = MainMarkingVisitor<MarkingState>;
class RootMarkingVisitor;
......@@ -816,9 +757,6 @@ class V8_NODISCARD EvacuationScope {
// Collector for young-generation only.
class MinorMarkCompactCollector final {
public:
using MarkingState = MinorMarkingState;
using NonAtomicMarkingState = MinorNonAtomicMarkingState;
static constexpr size_t kMaxParallelTasks = 8;
explicit MinorMarkCompactCollector(Heap* heap);
......
......@@ -44,7 +44,7 @@ class MarkingBarrier {
inline bool MarkValue(HeapObject host, HeapObject value);
private:
using MarkingState = MarkCompactCollector::MarkingState;
using MarkingState = MarkingState;
inline bool WhiteToGreyAndPush(HeapObject value);
......
......@@ -66,8 +66,6 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(std::atomic<size_t>[kNumTypes], ExternalBackingStoreBytes),
FIELD(heap::ListNode<MemoryChunk>, ListNode),
FIELD(FreeListCategory**, Categories),
FIELD(std::atomic<intptr_t>, YoungGenerationLiveByteCount),
FIELD(Bitmap*, YoungGenerationBitmap),
FIELD(CodeObjectRegistry*, CodeObjectRegistry),
FIELD(PossiblyEmptyBuckets, PossiblyEmptyBuckets),
FIELD(ActiveSystemPages, ActiveSystemPages),
......
......@@ -157,7 +157,6 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
page_protection_change_mutex_ = new base::Mutex();
write_unprotect_counter_ = 0;
mutex_ = new base::Mutex();
young_generation_bitmap_ = nullptr;
external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] = 0;
......@@ -260,8 +259,6 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
ReleaseInvalidatedSlots<OLD_TO_OLD>();
ReleaseInvalidatedSlots<OLD_TO_SHARED>();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
if (!IsLargePage()) {
Page* page = static_cast<Page*>(this);
page->ReleaseFreeListCategories();
......@@ -456,18 +453,6 @@ bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
invalidated_slots<type>()->end();
}
void MemoryChunk::AllocateYoungGenerationBitmap() {
DCHECK_NULL(young_generation_bitmap_);
young_generation_bitmap_ =
static_cast<Bitmap*>(base::Calloc(1, Bitmap::kSize));
}
void MemoryChunk::ReleaseYoungGenerationBitmap() {
DCHECK_NOT_NULL(young_generation_bitmap_);
base::Free(young_generation_bitmap_);
young_generation_bitmap_ = nullptr;
}
#ifdef DEBUG
void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
// Note that we cannot use offsetof because MemoryChunk is not a POD.
......@@ -502,13 +487,6 @@ void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
MemoryChunkLayout::kListNodeOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->categories_) - chunk->address(),
MemoryChunkLayout::kCategoriesOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->young_generation_live_byte_count_) -
chunk->address(),
MemoryChunkLayout::kYoungGenerationLiveByteCountOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->young_generation_bitmap_) -
chunk->address(),
MemoryChunkLayout::kYoungGenerationBitmapOffset);
DCHECK_EQ(reinterpret_cast<Address>(&chunk->code_object_registry_) -
chunk->address(),
MemoryChunkLayout::kCodeObjectRegistryOffset);
......
......@@ -152,9 +152,6 @@ class MemoryChunk : public BasicMemoryChunk {
return invalidated_slots_[type];
}
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
int FreeListsLength();
// Approximate amount of physical memory committed for this chunk.
......@@ -233,10 +230,6 @@ class MemoryChunk : public BasicMemoryChunk {
void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::Permission permission);
template <AccessMode mode>
ConcurrentBitmap<mode>* young_generation_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
#ifdef DEBUG
static void ValidateOffsets(MemoryChunk* chunk);
#endif
......@@ -284,9 +277,6 @@ class MemoryChunk : public BasicMemoryChunk {
FreeListCategory** categories_;
std::atomic<intptr_t> young_generation_live_byte_count_;
Bitmap* young_generation_bitmap_;
CodeObjectRegistry* code_object_registry_;
PossiblyEmptyBuckets possibly_empty_buckets_;
......@@ -299,13 +289,11 @@ class MemoryChunk : public BasicMemoryChunk {
private:
friend class ConcurrentMarkingState;
friend class MajorMarkingState;
friend class MajorAtomicMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MarkingState;
friend class AtomicMarkingState;
friend class NonAtomicMarkingState;
friend class MemoryAllocator;
friend class MemoryChunkValidator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
};
......
......@@ -25,7 +25,6 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->list_node().Initialize();
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
->minor_mark_compact_collector()
->non_atomic_marking_state()
......@@ -76,7 +75,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
}
// Add more pages if we have less than expected_pages.
IncrementalMarking::NonAtomicMarkingState* marking_state =
NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
while (actual_pages < expected_pages) {
actual_pages++;
......@@ -181,7 +180,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
DCHECK(last_page());
IncrementalMarking::NonAtomicMarkingState* marking_state =
NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page = heap()->memory_allocator()->AllocatePage(
......@@ -237,8 +236,6 @@ void SemiSpace::FixPagesFlags(Page::MainThreadFlags flags,
page->ClearFlag(MemoryChunk::FROM_PAGE);
page->SetFlag(MemoryChunk::TO_PAGE);
page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
page, 0);
} else {
page->SetFlag(MemoryChunk::FROM_PAGE);
page->ClearFlag(MemoryChunk::TO_PAGE);
......@@ -682,7 +679,7 @@ void SemiSpaceNewSpace::ResetLinearAllocationArea() {
to_space_.Reset();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
IncrementalMarking::NonAtomicMarkingState* marking_state =
NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (Page* p : to_space_) {
marking_state->ClearLiveness(p);
......
......@@ -450,7 +450,7 @@ class ObjectStatsCollectorImpl {
Heap* heap_;
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
NonAtomicMarkingState* marking_state_;
std::unordered_set<HeapObject, Object::Hasher> virtual_objects_;
std::unordered_set<Address> external_resources_;
FieldStatsCollector field_stats_collector_;
......@@ -1103,7 +1103,7 @@ class ObjectStatsVisitor {
private:
ObjectStatsCollectorImpl* live_collector_;
ObjectStatsCollectorImpl* dead_collector_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
NonAtomicMarkingState* marking_state_;
ObjectStatsCollectorImpl::Phase phase_;
};
......
......@@ -841,8 +841,7 @@ void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
}
void PagedSpaceBase::VerifyLiveBytes() const {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
MarkingState* marking_state = heap()->incremental_marking()->marking_state();
PtrComprCageBase cage_base(heap()->isolate());
for (const Page* page : *this) {
CHECK(page->SweepingDone());
......
......@@ -524,7 +524,7 @@ void ScavengerCollector::SweepArrayBufferExtensions() {
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
const bool is_compacting = heap_->incremental_marking()->IsCompacting();
MajorAtomicMarkingState* marking_state =
AtomicMarkingState* marking_state =
heap_->incremental_marking()->atomic_marking_state();
for (SurvivingNewLargeObjectMapEntry update_info :
......
......@@ -173,8 +173,7 @@ void Page::CreateBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
MarkingState* marking_state = heap()->incremental_marking()->marking_state();
marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
......@@ -185,7 +184,7 @@ void Page::CreateBlackAreaBackground(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
IncrementalMarking::AtomicMarkingState* marking_state =
AtomicMarkingState* marking_state =
heap()->incremental_marking()->atomic_marking_state();
marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
......@@ -198,8 +197,7 @@ void Page::DestroyBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
MarkingState* marking_state = heap()->incremental_marking()->marking_state();
marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
......@@ -210,7 +208,7 @@ void Page::DestroyBlackAreaBackground(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
IncrementalMarking::AtomicMarkingState* marking_state =
AtomicMarkingState* marking_state =
heap()->incremental_marking()->atomic_marking_state();
marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
......
......@@ -19,7 +19,7 @@
namespace v8 {
namespace internal {
Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
Sweeper::Sweeper(Heap* heap, NonAtomicMarkingState* marking_state)
: heap_(heap),
marking_state_(marking_state),
sweeping_in_progress_(false),
......@@ -115,7 +115,7 @@ void Sweeper::TearDown() {
void Sweeper::StartSweeping() {
sweeping_in_progress_ = true;
should_reduce_memory_ = heap_->ShouldReduceMemory();
MajorNonAtomicMarkingState* marking_state =
NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
// Sorting is done in order to make compaction more efficient: by sweeping
......
......@@ -18,7 +18,7 @@ namespace v8 {
namespace internal {
class InvalidatedSlotsCleanup;
class MajorNonAtomicMarkingState;
class NonAtomicMarkingState;
class Page;
class PagedSpaceBase;
class Space;
......@@ -74,7 +74,7 @@ class Sweeper {
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
enum class SweepingMode { kEagerDuringGC, kLazyOrConcurrent };
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
Sweeper(Heap* heap, NonAtomicMarkingState* marking_state);
bool sweeping_in_progress() const { return sweeping_in_progress_; }
......@@ -177,7 +177,7 @@ class Sweeper {
}
Heap* const heap_;
MajorNonAtomicMarkingState* marking_state_;
NonAtomicMarkingState* marking_state_;
std::unique_ptr<JobHandle> job_handle_;
base::Mutex mutex_;
base::ConditionVariable cv_page_swept_;
......
......@@ -2369,7 +2369,7 @@ TEST(InstanceOfStubWriteBarrier) {
CHECK(f->HasAttachedOptimizedCode());
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
MarkingState* marking_state = marking->marking_state();
const double kStepSizeInMs = 100;
while (!marking_state->IsBlack(f->code()) && !marking->IsStopped()) {
......@@ -5689,7 +5689,7 @@ TEST(Regress598319) {
CHECK(heap->lo_space()->Contains(arr.get()));
IncrementalMarking* marking = heap->incremental_marking();
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
MarkingState* marking_state = marking->marking_state();
CHECK(marking_state->IsWhite(arr.get()));
for (int i = 0; i < arr.get().length(); i++) {
HeapObject arr_value = HeapObject::cast(arr.get().get(i));
......@@ -5933,7 +5933,7 @@ TEST(LeftTrimFixedArrayInBlackArea) {
Handle<FixedArray> array =
isolate->factory()->NewFixedArray(50, AllocationType::kOld);
CHECK(heap->old_space()->Contains(*array));
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
MarkingState* marking_state = marking->marking_state();
CHECK(marking_state->IsBlack(*array));
// Now left trim the allocated black area. A filler has to be installed
......@@ -5979,8 +5979,7 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
Address start_address = array->address();
Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address);
IncrementalMarking::NonAtomicMarkingState* marking_state =
marking->non_atomic_marking_state();
NonAtomicMarkingState* marking_state = marking->non_atomic_marking_state();
CHECK(marking_state->IsBlack(*array));
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(start_address),
......@@ -6049,8 +6048,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
Address start_address = array->address();
Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address);
IncrementalMarking::NonAtomicMarkingState* marking_state =
marking->non_atomic_marking_state();
NonAtomicMarkingState* marking_state = marking->non_atomic_marking_state();
CHECK(marking_state->IsBlack(*array));
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
......@@ -7106,7 +7104,7 @@ TEST(Regress978156) {
"collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(i::GarbageCollectionReason::kTesting);
}
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
MarkingState* marking_state = marking->marking_state();
// 6. Mark the filler black to access its two markbits. This triggers
// an out-of-bounds access of the marking bitmap in a bad case.
marking_state->WhiteToGrey(filler);
......
......@@ -471,7 +471,7 @@ TEST(Regress5829) {
heap->CreateFillerObjectAt(old_end - kTaggedSize, kTaggedSize);
heap->old_space()->FreeLinearAllocationArea();
Page* page = Page::FromAddress(array->address());
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
MarkingState* marking_state = marking->marking_state();
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(page, marking_state->bitmap(page))) {
CHECK(!object_and_size.first.IsFreeSpaceOrFiller());
......
......@@ -214,7 +214,7 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnConstruction) {
v8::Local<v8::Object> local =
v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state(i_isolate());
MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
auto ref =
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
......@@ -234,7 +234,7 @@ TEST_F(TracedReferenceTest, WriteBarrierOnHeapReset) {
v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
auto ref = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state(i_isolate());
MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
ref->Reset(v8_isolate(), local);
EXPECT_TRUE(state.IsGrey(HeapObject::cast(*Utils::OpenHandle(*local))));
......@@ -254,7 +254,7 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnStackReset) {
v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
v8::TracedReference<v8::Object> ref;
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state(i_isolate());
MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
ref.Reset(v8_isolate(), local);
EXPECT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
......@@ -274,7 +274,7 @@ TEST_F(TracedReferenceTest, WriteBarrierOnHeapCopy) {
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
auto ref_to = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state(i_isolate());
MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
*ref_to = *ref_from;
EXPECT_TRUE(!ref_from->IsEmpty());
......@@ -297,7 +297,7 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnStackCopy) {
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
v8::TracedReference<v8::Object> ref_to;
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state(i_isolate());
MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
ref_to = *ref_from;
EXPECT_TRUE(!ref_from->IsEmpty());
......@@ -318,7 +318,7 @@ TEST_F(TracedReferenceTest, WriteBarrierOnMove) {
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
auto ref_to = std::make_unique<v8::TracedReference<v8::Object>>();
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state(i_isolate());
MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
*ref_to = std::move(*ref_from);
ASSERT_TRUE(ref_from->IsEmpty());
......@@ -341,7 +341,7 @@ TEST_F(TracedReferenceTest, NoWriteBarrierOnStackMove) {
std::make_unique<v8::TracedReference<v8::Object>>(v8_isolate(), local);
v8::TracedReference<v8::Object> ref_to;
SimulateIncrementalMarking();
MarkCompactCollector::MarkingState state(i_isolate());
MarkingState state(i_isolate());
ASSERT_TRUE(state.IsWhite(HeapObject::cast(*Utils::OpenHandle(*local))));
ref_to = std::move(*ref_from);
ASSERT_TRUE(ref_from->IsEmpty());
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment