Commit d20c1414 authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

Revert "[heap] Cleanup: Use std::atomic<T> instead of base::AtomicValue<T> in heap/*."

This reverts commit 770ace07.

Reason for revert: Breaking the V8 Arm and V8 Arm - debug builds https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Arm%20-%20debug/7260

Original change's description:
> [heap] Cleanup: Use std::atomic<T> instead of base::AtomicValue<T> in heap/*.
> 
> Bug: chromium:842083
> Change-Id: Idc04f9ddea326df4ac48a8c58321620660b21549
> Reviewed-on: https://chromium-review.googlesource.com/1129520
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Hannes Payer <hpayer@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#54389}

TBR=ulan@chromium.org,hpayer@chromium.org,mlippautz@chromium.org

Change-Id: I108bc5386ea825c2700f9b830b85a1ca8c10bf4b
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:842083
Reviewed-on: https://chromium-review.googlesource.com/1134966Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54398}
parent ae044d69
...@@ -627,7 +627,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) { ...@@ -627,7 +627,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
marked_bytes += current_marked_bytes; marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes); marked_bytes);
if (task_state->preemption_request) { if (task_state->preemption_request.Value()) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ConcurrentMarking::Run Preempted"); "ConcurrentMarking::Run Preempted");
break; break;
...@@ -703,7 +703,7 @@ void ConcurrentMarking::ScheduleTasks() { ...@@ -703,7 +703,7 @@ void ConcurrentMarking::ScheduleTasks() {
heap_->isolate()->PrintWithTimestamp( heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i); "Scheduling concurrent marking task %d\n", i);
} }
task_state_[i].preemption_request = false; task_state_[i].preemption_request.SetValue(false);
is_pending_[i] = true; is_pending_[i] = true;
++pending_task_count_; ++pending_task_count_;
auto task = auto task =
...@@ -744,7 +744,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) { ...@@ -744,7 +744,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
is_pending_[i] = false; is_pending_[i] = false;
--pending_task_count_; --pending_task_count_;
} else if (stop_request == StopRequest::PREEMPT_TASKS) { } else if (stop_request == StopRequest::PREEMPT_TASKS) {
task_state_[i].preemption_request = true; task_state_[i].preemption_request.SetValue(true);
} }
} }
} }
......
...@@ -95,7 +95,7 @@ class ConcurrentMarking { ...@@ -95,7 +95,7 @@ class ConcurrentMarking {
struct TaskState { struct TaskState {
// The main thread sets this flag to true when it wants the concurrent // The main thread sets this flag to true when it wants the concurrent
// marker to give up the worker thread. // marker to give up the worker thread.
std::atomic<bool> preemption_request; base::AtomicValue<bool> preemption_request;
LiveBytesMap live_bytes; LiveBytesMap live_bytes;
size_t marked_bytes = 0; size_t marked_bytes = 0;
......
...@@ -1425,7 +1425,7 @@ bool Heap::CollectGarbage(AllocationSpace space, ...@@ -1425,7 +1425,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
if (deserialization_complete_) { if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event); memory_reducer_->NotifyMarkCompact(event);
} }
memory_pressure_level_ = MemoryPressureLevel::kNone; memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
} }
tracer()->Stop(collector); tracer()->Stop(collector);
...@@ -3484,9 +3484,9 @@ void Heap::CheckMemoryPressure() { ...@@ -3484,9 +3484,9 @@ void Heap::CheckMemoryPressure() {
// The optimizing compiler may be unnecessarily holding on to memory. // The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock); isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
} }
if (memory_pressure_level_ == MemoryPressureLevel::kCritical) { if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
CollectGarbageOnMemoryPressure(); CollectGarbageOnMemoryPressure();
} else if (memory_pressure_level_ == MemoryPressureLevel::kModerate) { } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) { if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
StartIncrementalMarking(kReduceMemoryFootprintMask, StartIncrementalMarking(kReduceMemoryFootprintMask,
GarbageCollectionReason::kMemoryPressure); GarbageCollectionReason::kMemoryPressure);
...@@ -3538,8 +3538,8 @@ void Heap::CollectGarbageOnMemoryPressure() { ...@@ -3538,8 +3538,8 @@ void Heap::CollectGarbageOnMemoryPressure() {
void Heap::MemoryPressureNotification(MemoryPressureLevel level, void Heap::MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked) { bool is_isolate_locked) {
MemoryPressureLevel previous = memory_pressure_level_; MemoryPressureLevel previous = memory_pressure_level_.Value();
memory_pressure_level_ = level; memory_pressure_level_.SetValue(level);
if ((previous != MemoryPressureLevel::kCritical && if ((previous != MemoryPressureLevel::kCritical &&
level == MemoryPressureLevel::kCritical) || level == MemoryPressureLevel::kCritical) ||
(previous == MemoryPressureLevel::kNone && (previous == MemoryPressureLevel::kNone &&
......
...@@ -704,7 +704,7 @@ class Heap { ...@@ -704,7 +704,7 @@ class Heap {
bool ShouldOptimizeForMemoryUsage(); bool ShouldOptimizeForMemoryUsage();
bool HighMemoryPressure() { bool HighMemoryPressure() {
return memory_pressure_level_ != MemoryPressureLevel::kNone; return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
} }
void RestoreHeapLimit(size_t heap_limit) { void RestoreHeapLimit(size_t heap_limit) {
...@@ -2005,7 +2005,7 @@ class Heap { ...@@ -2005,7 +2005,7 @@ class Heap {
// Stores the memory pressure level that set by MemoryPressureNotification // Stores the memory pressure level that set by MemoryPressureNotification
// and reset by a mark-compact garbage collection. // and reset by a mark-compact garbage collection.
std::atomic<MemoryPressureLevel> memory_pressure_level_; base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
std::vector<std::pair<v8::NearHeapLimitCallback, void*> > std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
near_heap_limit_callbacks_; near_heap_limit_callbacks_;
......
...@@ -49,18 +49,17 @@ class V8_EXPORT_PRIVATE ItemParallelJob { ...@@ -49,18 +49,17 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
virtual ~Item() = default; virtual ~Item() = default;
// Marks an item as being finished. // Marks an item as being finished.
void MarkFinished() { CHECK_EQ(kProcessing, state_.exchange(kFinished)); } void MarkFinished() { CHECK(state_.TrySetValue(kProcessing, kFinished)); }
private: private:
enum ProcessingState : uintptr_t { kAvailable, kProcessing, kFinished }; enum ProcessingState { kAvailable, kProcessing, kFinished };
bool TryMarkingAsProcessing() { bool TryMarkingAsProcessing() {
ProcessingState available = kAvailable; return state_.TrySetValue(kAvailable, kProcessing);
return state_.compare_exchange_weak(available, kProcessing);
} }
bool IsFinished() { return state_ == kFinished; } bool IsFinished() { return state_.Value() == kFinished; }
std::atomic<ProcessingState> state_{kAvailable}; base::AtomicValue<ProcessingState> state_{kAvailable};
friend class ItemParallelJob; friend class ItemParallelJob;
friend class ItemParallelJob::Task; friend class ItemParallelJob::Task;
......
...@@ -3379,7 +3379,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { ...@@ -3379,7 +3379,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// that this adds unusable memory into the free list that is later on // that this adds unusable memory into the free list that is later on
// (in the free list) dropped again. Since we only use the flag for // (in the free list) dropped again. Since we only use the flag for
// testing this is fine. // testing this is fine.
p->set_concurrent_sweeping_state(Page::kSweepingInProgress); p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST, sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
Heap::ShouldZapGarbage() Heap::ShouldZapGarbage()
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
......
...@@ -621,8 +621,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -621,8 +621,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->invalidated_slots_ = nullptr; chunk->invalidated_slots_ = nullptr;
chunk->skip_list_ = nullptr; chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0; chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base); chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->set_concurrent_sweeping_state(kSweepingDone); chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
chunk->page_protection_change_mutex_ = new base::Mutex(); chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0; chunk->write_unprotect_counter_ = 0;
chunk->mutex_ = new base::Mutex(); chunk->mutex_ = new base::Mutex();
...@@ -760,7 +760,7 @@ Page* Page::ConvertNewToOld(Page* old_page) { ...@@ -760,7 +760,7 @@ Page* Page::ConvertNewToOld(Page* old_page) {
size_t MemoryChunk::CommittedPhysicalMemory() { size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE) if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size(); return size();
return high_water_mark_; return high_water_mark_.Value();
} }
bool MemoryChunk::IsPagedSpace() const { bool MemoryChunk::IsPagedSpace() const {
...@@ -2229,8 +2229,8 @@ void NewSpace::UpdateLinearAllocationArea() { ...@@ -2229,8 +2229,8 @@ void NewSpace::UpdateLinearAllocationArea() {
Address new_top = to_space_.page_low(); Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high()); allocation_info_.Reset(new_top, to_space_.page_high());
original_top_ = top(); original_top_.SetValue(top());
original_limit_ = limit(); original_limit_.SetValue(limit());
StartNextInlineAllocationStep(); StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
} }
......
...@@ -368,10 +368,9 @@ class MemoryChunk { ...@@ -368,10 +368,9 @@ class MemoryChunk {
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kPointerSize // InvalidatedSlots* invalidated_slots_ + kPointerSize // InvalidatedSlots* invalidated_slots_
+ kPointerSize // SkipList* skip_list_ + kPointerSize // SkipList* skip_list_
+ kPointerSize // std::atomic<intptr_t> high_water_mark_ + kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_ + kPointerSize // base::Mutex* mutex_
+ + kPointerSize // base::AtomicWord concurrent_sweeping_
kPointerSize // std::atomic<ConcurrentSweepingState> concurrent_sweeping_
+ kPointerSize // base::Mutex* page_protection_change_mutex_ + kPointerSize // base::Mutex* page_protection_change_mutex_
+ kPointerSize // unitptr_t write_unprotect_counter_ + kPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * kNumTypes + kSizetSize * kNumTypes
...@@ -435,10 +434,9 @@ class MemoryChunk { ...@@ -435,10 +434,9 @@ class MemoryChunk {
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = 0; intptr_t old_mark = 0;
do { do {
old_mark = chunk->high_water_mark_; old_mark = chunk->high_water_mark_.Value();
} while ( } while ((new_mark > old_mark) &&
(new_mark > old_mark) && !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
!chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
} }
Address address() const { Address address() const {
...@@ -457,15 +455,13 @@ class MemoryChunk { ...@@ -457,15 +455,13 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end(); return addr >= area_start() && addr <= area_end();
} }
void set_concurrent_sweeping_state(ConcurrentSweepingState state) { base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
concurrent_sweeping_ = state;
}
ConcurrentSweepingState concurrent_sweeping_state() const {
return concurrent_sweeping_; return concurrent_sweeping_;
} }
bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; } bool SweepingDone() {
return concurrent_sweeping_state().Value() == kSweepingDone;
}
size_t size() const { return size_; } size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; } void set_size(size_t size) { size_ = size; }
...@@ -526,7 +522,7 @@ class MemoryChunk { ...@@ -526,7 +522,7 @@ class MemoryChunk {
// Approximate amount of physical memory committed for this chunk. // Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory(); size_t CommittedPhysicalMemory();
Address HighWaterMark() { return address() + high_water_mark_; } Address HighWaterMark() { return address() + high_water_mark_.Value(); }
int progress_bar() { int progress_bar() {
DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
...@@ -623,9 +619,9 @@ class MemoryChunk { ...@@ -623,9 +619,9 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
Space* owner() const { return owner_; } Space* owner() const { return owner_.Value(); }
void set_owner(Space* space) { owner_ = space; } void set_owner(Space* space) { owner_.SetValue(space); }
bool IsPagedSpace() const; bool IsPagedSpace() const;
...@@ -660,7 +656,7 @@ class MemoryChunk { ...@@ -660,7 +656,7 @@ class MemoryChunk {
VirtualMemory reservation_; VirtualMemory reservation_;
// The space owning this memory chunk. // The space owning this memory chunk.
std::atomic<Space*> owner_; base::AtomicValue<Space*> owner_;
Heap* heap_; Heap* heap_;
...@@ -682,11 +678,11 @@ class MemoryChunk { ...@@ -682,11 +678,11 @@ class MemoryChunk {
// Assuming the initial allocation on a page is sequential, // Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page. // count highest number of bytes ever allocated on the page.
std::atomic<intptr_t> high_water_mark_; base::AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_; base::Mutex* mutex_;
std::atomic<ConcurrentSweepingState> concurrent_sweeping_; base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
base::Mutex* page_protection_change_mutex_; base::Mutex* page_protection_change_mutex_;
...@@ -1375,8 +1371,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1375,8 +1371,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Returns an indication of whether a pointer is in a space that has // Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator. // been allocated by this MemoryAllocator.
V8_INLINE bool IsOutsideAllocatedSpace(Address address) { V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
return address < lowest_ever_allocated_ || return address < lowest_ever_allocated_.Value() ||
address >= highest_ever_allocated_; address >= highest_ever_allocated_.Value();
} }
// Returns a MemoryChunk in which the memory region from commit_area_size to // Returns a MemoryChunk in which the memory region from commit_area_size to
...@@ -1459,13 +1455,11 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1459,13 +1455,11 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// values only if they did not change in between. // values only if they did not change in between.
Address ptr = kNullAddress; Address ptr = kNullAddress;
do { do {
ptr = lowest_ever_allocated_; ptr = lowest_ever_allocated_.Value();
} while ((low < ptr) && } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
!lowest_ever_allocated_.compare_exchange_weak(ptr, low));
do { do {
ptr = highest_ever_allocated_; ptr = highest_ever_allocated_.Value();
} while ((high > ptr) && } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
!highest_ever_allocated_.compare_exchange_weak(ptr, high));
} }
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) { void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
...@@ -1496,8 +1490,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1496,8 +1490,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// conservative, i.e. not all addresses in 'allocated' space are allocated // conservative, i.e. not all addresses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end // to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end. // and exclusive on the high end.
std::atomic<Address> lowest_ever_allocated_; base::AtomicValue<Address> lowest_ever_allocated_;
std::atomic<Address> highest_ever_allocated_; base::AtomicValue<Address> highest_ever_allocated_;
VirtualMemory last_chunk_; VirtualMemory last_chunk_;
Unmapper unmapper_; Unmapper unmapper_;
...@@ -2689,11 +2683,11 @@ class NewSpace : public SpaceWithLinearArea { ...@@ -2689,11 +2683,11 @@ class NewSpace : public SpaceWithLinearArea {
void ResetOriginalTop() { void ResetOriginalTop() {
DCHECK_GE(top(), original_top()); DCHECK_GE(top(), original_top());
DCHECK_LE(top(), original_limit()); DCHECK_LE(top(), original_limit());
original_top_ = top(); original_top_.SetValue(top());
} }
Address original_top() { return original_top_; } Address original_top() { return original_top_.Value(); }
Address original_limit() { return original_limit_; } Address original_limit() { return original_limit_.Value(); }
// Return the address of the first allocatable address in the active // Return the address of the first allocatable address in the active
// semispace. This may be the address where the first object resides. // semispace. This may be the address where the first object resides.
...@@ -2782,8 +2776,8 @@ class NewSpace : public SpaceWithLinearArea { ...@@ -2782,8 +2776,8 @@ class NewSpace : public SpaceWithLinearArea {
// The top and the limit at the time of setting the linear allocation area. // The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks. // These values can be accessed by background tasks.
std::atomic<Address> original_top_; base::AtomicValue<Address> original_top_;
std::atomic<Address> original_limit_; base::AtomicValue<Address> original_limit_;
// The semispaces. // The semispaces.
SemiSpace to_space_; SemiSpace to_space_;
......
...@@ -17,7 +17,7 @@ namespace internal { ...@@ -17,7 +17,7 @@ namespace internal {
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper) Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
: sweeper_(sweeper) { : sweeper_(sweeper) {
sweeper_->stop_sweeper_tasks_ = true; sweeper_->stop_sweeper_tasks_.SetValue(true);
if (!sweeper_->sweeping_in_progress()) return; if (!sweeper_->sweeping_in_progress()) return;
sweeper_->AbortAndWaitForTasks(); sweeper_->AbortAndWaitForTasks();
...@@ -34,7 +34,7 @@ Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper) ...@@ -34,7 +34,7 @@ Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
} }
Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() { Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
sweeper_->stop_sweeper_tasks_ = false; sweeper_->stop_sweeper_tasks_.SetValue(false);
if (!sweeper_->sweeping_in_progress()) return; if (!sweeper_->sweeping_in_progress()) return;
sweeper_->StartSweeperTasks(); sweeper_->StartSweeperTasks();
...@@ -133,7 +133,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask { ...@@ -133,7 +133,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
}; };
void Sweeper::StartSweeping() { void Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_); CHECK(!stop_sweeper_tasks_.Value());
sweeping_in_progress_ = true; sweeping_in_progress_ = true;
iterability_in_progress_ = true; iterability_in_progress_ = true;
MajorNonAtomicMarkingState* marking_state = MajorNonAtomicMarkingState* marking_state =
...@@ -366,14 +366,14 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode, ...@@ -366,14 +366,14 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
// The allocated_bytes() counter is precisely the total size of objects. // The allocated_bytes() counter is precisely the total size of objects.
DCHECK_EQ(live_bytes, p->allocated_bytes()); DCHECK_EQ(live_bytes, p->allocated_bytes());
} }
p->set_concurrent_sweeping_state(Page::kSweepingDone); p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0; if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes)); return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
} }
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) { void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
Page* page = nullptr; Page* page = nullptr;
while (!stop_sweeper_tasks_ && while (!stop_sweeper_tasks_.Value() &&
((page = GetSweepingPageSafe(identity)) != nullptr)) { ((page = GetSweepingPageSafe(identity)) != nullptr)) {
ParallelSweepPage(page, identity); ParallelSweepPage(page, identity);
} }
...@@ -419,8 +419,9 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) { ...@@ -419,8 +419,9 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
// the page protection mode from rx -> rw while sweeping. // the page protection mode from rx -> rw while sweeping.
CodePageMemoryModificationScope code_page_scope(page); CodePageMemoryModificationScope code_page_scope(page);
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state()); DCHECK_EQ(Page::kSweepingPending,
page->set_concurrent_sweeping_state(Page::kSweepingInProgress); page->concurrent_sweeping_state().Value());
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode = const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
...@@ -466,17 +467,17 @@ void Sweeper::AddPage(AllocationSpace space, Page* page, ...@@ -466,17 +467,17 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
// happened when the page was initially added, so it is skipped here. // happened when the page was initially added, so it is skipped here.
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode); DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
} }
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state()); DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
sweeping_list_[GetSweepSpaceIndex(space)].push_back(page); sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
} }
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) { void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK_GE(page->area_size(), DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page))); static_cast<size_t>(marking_state_->live_bytes(page)));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state()); DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
page->ForAllFreeListCategories( page->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); }); [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
page->set_concurrent_sweeping_state(Page::kSweepingPending); page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
heap_->paged_space(space)->IncreaseAllocatedBytes( heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page); marking_state_->live_bytes(page), page);
} }
...@@ -568,10 +569,10 @@ void Sweeper::AddPageForIterability(Page* page) { ...@@ -568,10 +569,10 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(iterability_in_progress_); DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_); DCHECK(!iterability_task_started_);
DCHECK(IsValidIterabilitySpace(page->owner()->identity())); DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state()); DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
iterability_list_.push_back(page); iterability_list_.push_back(page);
page->set_concurrent_sweeping_state(Page::kSweepingPending); page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
} }
void Sweeper::MakeIterable(Page* page) { void Sweeper::MakeIterable(Page* page) {
......
...@@ -188,7 +188,7 @@ class Sweeper { ...@@ -188,7 +188,7 @@ class Sweeper {
// the semaphore for maintaining a task counter on the main thread. // the semaphore for maintaining a task counter on the main thread.
std::atomic<intptr_t> num_sweeping_tasks_; std::atomic<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks. // Used by PauseOrCompleteScope to signal early bailout to tasks.
std::atomic<bool> stop_sweeper_tasks_; base::AtomicValue<bool> stop_sweeper_tasks_;
// Pages that are only made iterable but have their free lists ignored. // Pages that are only made iterable but have their free lists ignored.
IterabilityList iterability_list_; IterabilityList iterability_list_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment