Commit a1ef54bb authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Cleanup: Use std::atomic<T> instead of base::AtomicValue<T> in heap/*.

Bug: chromium:842083
Change-Id: I7f3d7cb64c1263e081d1f9bd36939aaf1a0e3da8
Reviewed-on: https://chromium-review.googlesource.com/1135322
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54443}
parent 782b793b
......@@ -627,7 +627,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
if (task_state->preemption_request.Value()) {
if (task_state->preemption_request) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ConcurrentMarking::Run Preempted");
break;
......@@ -703,7 +703,7 @@ void ConcurrentMarking::ScheduleTasks() {
heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i);
}
task_state_[i].preemption_request.SetValue(false);
task_state_[i].preemption_request = false;
is_pending_[i] = true;
++pending_task_count_;
auto task =
......@@ -744,7 +744,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
is_pending_[i] = false;
--pending_task_count_;
} else if (stop_request == StopRequest::PREEMPT_TASKS) {
task_state_[i].preemption_request.SetValue(true);
task_state_[i].preemption_request = true;
}
}
}
......
......@@ -95,7 +95,7 @@ class ConcurrentMarking {
struct TaskState {
// The main thread sets this flag to true when it wants the concurrent
// marker to give up the worker thread.
base::AtomicValue<bool> preemption_request;
std::atomic<bool> preemption_request;
LiveBytesMap live_bytes;
size_t marked_bytes = 0;
......
......@@ -1425,7 +1425,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
}
memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
memory_pressure_level_ = MemoryPressureLevel::kNone;
}
tracer()->Stop(collector);
......@@ -3484,9 +3484,9 @@ void Heap::CheckMemoryPressure() {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
}
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
if (memory_pressure_level_ == MemoryPressureLevel::kCritical) {
CollectGarbageOnMemoryPressure();
} else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
} else if (memory_pressure_level_ == MemoryPressureLevel::kModerate) {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
StartIncrementalMarking(kReduceMemoryFootprintMask,
GarbageCollectionReason::kMemoryPressure);
......@@ -3538,8 +3538,8 @@ void Heap::CollectGarbageOnMemoryPressure() {
void Heap::MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked) {
MemoryPressureLevel previous = memory_pressure_level_.Value();
memory_pressure_level_.SetValue(level);
MemoryPressureLevel previous = memory_pressure_level_;
memory_pressure_level_ = level;
if ((previous != MemoryPressureLevel::kCritical &&
level == MemoryPressureLevel::kCritical) ||
(previous == MemoryPressureLevel::kNone &&
......
......@@ -704,7 +704,7 @@ class Heap {
bool ShouldOptimizeForMemoryUsage();
bool HighMemoryPressure() {
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
return memory_pressure_level_ != MemoryPressureLevel::kNone;
}
void RestoreHeapLimit(size_t heap_limit) {
......@@ -2005,7 +2005,7 @@ class Heap {
// Stores the memory pressure level that set by MemoryPressureNotification
// and reset by a mark-compact garbage collection.
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
std::atomic<MemoryPressureLevel> memory_pressure_level_;
std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
near_heap_limit_callbacks_;
......
......@@ -49,17 +49,18 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
virtual ~Item() = default;
// Marks an item as being finished.
void MarkFinished() { CHECK(state_.TrySetValue(kProcessing, kFinished)); }
void MarkFinished() { CHECK_EQ(kProcessing, state_.exchange(kFinished)); }
private:
enum ProcessingState { kAvailable, kProcessing, kFinished };
enum ProcessingState : uintptr_t { kAvailable, kProcessing, kFinished };
bool TryMarkingAsProcessing() {
return state_.TrySetValue(kAvailable, kProcessing);
ProcessingState available = kAvailable;
return state_.compare_exchange_strong(available, kProcessing);
}
bool IsFinished() { return state_.Value() == kFinished; }
bool IsFinished() { return state_ == kFinished; }
base::AtomicValue<ProcessingState> state_{kAvailable};
std::atomic<ProcessingState> state_{kAvailable};
friend class ItemParallelJob;
friend class ItemParallelJob::Task;
......
......@@ -3379,7 +3379,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// that this adds unusable memory into the free list that is later on
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
Heap::ShouldZapGarbage()
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
......
......@@ -621,8 +621,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->invalidated_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->set_concurrent_sweeping_state(kSweepingDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
chunk->mutex_ = new base::Mutex();
......@@ -760,7 +760,7 @@ Page* Page::ConvertNewToOld(Page* old_page) {
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size();
return high_water_mark_.Value();
return high_water_mark_;
}
bool MemoryChunk::IsPagedSpace() const {
......@@ -2229,8 +2229,8 @@ void NewSpace::UpdateLinearAllocationArea() {
Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
original_top_.SetValue(top());
original_limit_.SetValue(limit());
original_top_ = top();
original_limit_ = limit();
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
......
......@@ -368,9 +368,10 @@ class MemoryChunk {
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kPointerSize // InvalidatedSlots* invalidated_slots_
+ kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // std::atomic<intptr_t> high_water_mark_
+ kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_
+
kPointerSize // std::atomic<ConcurrentSweepingState> concurrent_sweeping_
+ kPointerSize // base::Mutex* page_protection_change_mutex_
+ kPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * kNumTypes
......@@ -434,9 +435,10 @@ class MemoryChunk {
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = 0;
do {
old_mark = chunk->high_water_mark_.Value();
} while ((new_mark > old_mark) &&
!chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
old_mark = chunk->high_water_mark_;
} while (
(new_mark > old_mark) &&
!chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
}
Address address() const {
......@@ -455,14 +457,16 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end();
}
base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
return concurrent_sweeping_;
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
concurrent_sweeping_ = state;
}
bool SweepingDone() {
return concurrent_sweeping_state().Value() == kSweepingDone;
ConcurrentSweepingState concurrent_sweeping_state() {
return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
}
bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
......@@ -522,7 +526,7 @@ class MemoryChunk {
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory();
Address HighWaterMark() { return address() + high_water_mark_.Value(); }
Address HighWaterMark() { return address() + high_water_mark_; }
int progress_bar() {
DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
......@@ -619,9 +623,9 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
Space* owner() const { return owner_.Value(); }
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_.SetValue(space); }
void set_owner(Space* space) { owner_ = space; }
bool IsPagedSpace() const;
......@@ -656,7 +660,7 @@ class MemoryChunk {
VirtualMemory reservation_;
// The space owning this memory chunk.
base::AtomicValue<Space*> owner_;
std::atomic<Space*> owner_;
Heap* heap_;
......@@ -678,11 +682,11 @@ class MemoryChunk {
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
base::AtomicValue<intptr_t> high_water_mark_;
std::atomic<intptr_t> high_water_mark_;
base::Mutex* mutex_;
base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
std::atomic<intptr_t> concurrent_sweeping_;
base::Mutex* page_protection_change_mutex_;
......@@ -734,6 +738,9 @@ class MemoryChunk {
friend class PagedSpace;
};
static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize,
"sizeof(std::atomic<intptr_t>) == kPointerSize");
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
......@@ -1371,8 +1378,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator.
V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
return address < lowest_ever_allocated_.Value() ||
address >= highest_ever_allocated_.Value();
return address < lowest_ever_allocated_ ||
address >= highest_ever_allocated_;
}
// Returns a MemoryChunk in which the memory region from commit_area_size to
......@@ -1455,11 +1462,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// values only if they did not change in between.
Address ptr = kNullAddress;
do {
ptr = lowest_ever_allocated_.Value();
} while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
ptr = lowest_ever_allocated_;
} while ((low < ptr) &&
!lowest_ever_allocated_.compare_exchange_weak(ptr, low));
do {
ptr = highest_ever_allocated_.Value();
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
ptr = highest_ever_allocated_;
} while ((high > ptr) &&
!highest_ever_allocated_.compare_exchange_weak(ptr, high));
}
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
......@@ -1490,8 +1499,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// conservative, i.e. not all addresses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end.
base::AtomicValue<Address> lowest_ever_allocated_;
base::AtomicValue<Address> highest_ever_allocated_;
std::atomic<Address> lowest_ever_allocated_;
std::atomic<Address> highest_ever_allocated_;
VirtualMemory last_chunk_;
Unmapper unmapper_;
......@@ -2683,11 +2692,11 @@ class NewSpace : public SpaceWithLinearArea {
void ResetOriginalTop() {
DCHECK_GE(top(), original_top());
DCHECK_LE(top(), original_limit());
original_top_.SetValue(top());
original_top_ = top();
}
Address original_top() { return original_top_.Value(); }
Address original_limit() { return original_limit_.Value(); }
Address original_top() { return original_top_; }
Address original_limit() { return original_limit_; }
// Return the address of the first allocatable address in the active
// semispace. This may be the address where the first object resides.
......@@ -2776,8 +2785,8 @@ class NewSpace : public SpaceWithLinearArea {
// The top and the limit at the time of setting the linear allocation area.
// These values can be accessed by background tasks.
base::AtomicValue<Address> original_top_;
base::AtomicValue<Address> original_limit_;
std::atomic<Address> original_top_;
std::atomic<Address> original_limit_;
// The semispaces.
SemiSpace to_space_;
......
......@@ -17,7 +17,7 @@ namespace internal {
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
: sweeper_(sweeper) {
sweeper_->stop_sweeper_tasks_.SetValue(true);
sweeper_->stop_sweeper_tasks_ = true;
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->AbortAndWaitForTasks();
......@@ -34,7 +34,7 @@ Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
}
Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
sweeper_->stop_sweeper_tasks_.SetValue(false);
sweeper_->stop_sweeper_tasks_ = false;
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->StartSweeperTasks();
......@@ -133,7 +133,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
};
void Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_.Value());
CHECK(!stop_sweeper_tasks_);
sweeping_in_progress_ = true;
iterability_in_progress_ = true;
MajorNonAtomicMarkingState* marking_state =
......@@ -366,14 +366,14 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
// The allocated_bytes() counter is precisely the total size of objects.
DCHECK_EQ(live_bytes, p->allocated_bytes());
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
p->set_concurrent_sweeping_state(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
Page* page = nullptr;
while (!stop_sweeper_tasks_.Value() &&
while (!stop_sweeper_tasks_ &&
((page = GetSweepingPageSafe(identity)) != nullptr)) {
ParallelSweepPage(page, identity);
}
......@@ -419,9 +419,8 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
// the page protection mode from rx -> rw while sweeping.
CodePageMemoryModificationScope code_page_scope(page);
DCHECK_EQ(Page::kSweepingPending,
page->concurrent_sweeping_state().Value());
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
......@@ -467,17 +466,17 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
// happened when the page was initially added, so it is skipped here.
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
}
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
page->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
page->set_concurrent_sweeping_state(Page::kSweepingPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
}
......@@ -569,10 +568,10 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_);
DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
iterability_list_.push_back(page);
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
page->set_concurrent_sweeping_state(Page::kSweepingPending);
}
void Sweeper::MakeIterable(Page* page) {
......
......@@ -188,7 +188,7 @@ class Sweeper {
// the semaphore for maintaining a task counter on the main thread.
std::atomic<intptr_t> num_sweeping_tasks_;
// Used by PauseOrCompleteScope to signal early bailout to tasks.
base::AtomicValue<bool> stop_sweeper_tasks_;
std::atomic<bool> stop_sweeper_tasks_;
// Pages that are only made iterable but have their free lists ignored.
IterabilityList iterability_list_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment