Commit 7032b91d authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Cleanup: Use std::atomic<T> instead of base::AtomicNumber<T> in spaces.

Bug: chromium:842083
Change-Id: Ic0cfd84d56f48e61711cdbb695178837e1570e21
Reviewed-on: https://chromium-review.googlesource.com/1073427
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53388}
parent 246fd44c
...@@ -294,7 +294,7 @@ void MemoryAllocator::TearDown() { ...@@ -294,7 +294,7 @@ void MemoryAllocator::TearDown() {
unmapper()->TearDown(); unmapper()->TearDown();
// Check that spaces were torn down before MemoryAllocator. // Check that spaces were torn down before MemoryAllocator.
DCHECK_EQ(size_.Value(), 0u); DCHECK_EQ(size_, 0u);
// TODO(gc) this will be true again when we fix FreeMemory. // TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK_EQ(0, size_executable_); // DCHECK_EQ(0, size_executable_);
capacity_ = 0; capacity_ = 0;
...@@ -319,7 +319,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask { ...@@ -319,7 +319,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
TRACE_BACKGROUND_GC(tracer_, TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER); GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(); unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->active_unmapping_tasks_.Decrement(1); unmapper_->active_unmapping_tasks_--;
unmapper_->pending_unmapping_tasks_semaphore_.Signal(); unmapper_->pending_unmapping_tasks_semaphore_.Signal();
if (FLAG_trace_unmapper) { if (FLAG_trace_unmapper) {
PrintIsolate(unmapper_->heap_->isolate(), PrintIsolate(unmapper_->heap_->isolate(),
...@@ -350,9 +350,9 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() { ...@@ -350,9 +350,9 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
task->id()); task->id());
} }
DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks); DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
DCHECK_LE(active_unmapping_tasks_.Value(), pending_unmapping_tasks_); DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
DCHECK_GE(active_unmapping_tasks_.Value(), 0); DCHECK_GE(active_unmapping_tasks_, 0);
active_unmapping_tasks_.Increment(1); active_unmapping_tasks_++;
task_ids_[pending_unmapping_tasks_++] = task->id(); task_ids_[pending_unmapping_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task)); V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else { } else {
...@@ -368,7 +368,7 @@ void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() { ...@@ -368,7 +368,7 @@ void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
} }
} }
pending_unmapping_tasks_ = 0; pending_unmapping_tasks_ = 0;
active_unmapping_tasks_.SetValue(0); active_unmapping_tasks_ = 0;
if (FLAG_trace_unmapper) { if (FLAG_trace_unmapper) {
PrintIsolate( PrintIsolate(
...@@ -391,7 +391,7 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() { ...@@ -391,7 +391,7 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() { bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks); DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
if (active_unmapping_tasks_.Value() == 0 && pending_unmapping_tasks_ > 0) { if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
// All previous unmapping tasks have been run to completion. // All previous unmapping tasks have been run to completion.
// Finalize those tasks to make room for new ones. // Finalize those tasks to make room for new ones.
CancelAndWaitForPendingTasks(); CancelAndWaitForPendingTasks();
...@@ -491,7 +491,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, ...@@ -491,7 +491,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
} }
Address result = reservation.address(); Address result = reservation.address();
size_.Increment(reservation.size()); size_ += reservation.size();
controller->TakeControl(&reservation); controller->TakeControl(&reservation);
return result; return result;
} }
...@@ -523,7 +523,7 @@ Address MemoryAllocator::AllocateAlignedMemory( ...@@ -523,7 +523,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
// Failed to commit the body. Free the mapping and any partially committed // Failed to commit the body. Free the mapping and any partially committed
// regions inside it. // regions inside it.
reservation.Free(); reservation.Free();
size_.Decrement(reserve_size); size_ -= reserve_size;
return kNullAddress; return kNullAddress;
} }
...@@ -831,16 +831,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, ...@@ -831,16 +831,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size); code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
DCHECK(IsAligned(base, MemoryChunk::kAlignment)); DCHECK(IsAligned(base, MemoryChunk::kAlignment));
if (base == kNullAddress) return nullptr; if (base == kNullAddress) return nullptr;
size_.Increment(chunk_size); size_ += chunk_size;
// Update executable memory size. // Update executable memory size.
size_executable_.Increment(chunk_size); size_executable_ += chunk_size;
} else { } else {
base = AllocateAlignedMemory(chunk_size, commit_size, base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable, MemoryChunk::kAlignment, executable,
address_hint, &reservation); address_hint, &reservation);
if (base == kNullAddress) return nullptr; if (base == kNullAddress) return nullptr;
// Update executable memory size. // Update executable memory size.
size_executable_.Increment(reservation.size()); size_executable_ += reservation.size();
} }
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
...@@ -885,9 +885,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, ...@@ -885,9 +885,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
CHECK(!last_chunk_.IsReserved()); CHECK(!last_chunk_.IsReserved());
last_chunk_.TakeControl(&reservation); last_chunk_.TakeControl(&reservation);
UncommitBlock(last_chunk_.address(), last_chunk_.size()); UncommitBlock(last_chunk_.address(), last_chunk_.size());
size_.Decrement(chunk_size); size_ -= chunk_size;
if (executable == EXECUTABLE) { if (executable == EXECUTABLE) {
size_executable_.Decrement(chunk_size); size_executable_ -= chunk_size;
} }
CHECK(last_chunk_.IsReserved()); CHECK(last_chunk_.IsReserved());
return AllocateChunk(reserve_area_size, commit_area_size, executable, return AllocateChunk(reserve_area_size, commit_area_size, executable,
...@@ -1016,8 +1016,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free, ...@@ -1016,8 +1016,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
// partially starting at |start_free| will also release the potentially // partially starting at |start_free| will also release the potentially
// unused part behind the current page. // unused part behind the current page.
const size_t released_bytes = reservation->Release(start_free); const size_t released_bytes = reservation->Release(start_free);
DCHECK_GE(size_.Value(), released_bytes); DCHECK_GE(size_, released_bytes);
size_.Decrement(released_bytes); size_ -= released_bytes;
isolate_->counters()->memory_allocated()->Decrement( isolate_->counters()->memory_allocated()->Decrement(
static_cast<int>(released_bytes)); static_cast<int>(released_bytes));
} }
...@@ -1032,12 +1032,12 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { ...@@ -1032,12 +1032,12 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory(); VirtualMemory* reservation = chunk->reserved_memory();
const size_t size = const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size(); reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_.Value(), static_cast<size_t>(size)); DCHECK_GE(size_, static_cast<size_t>(size));
size_.Decrement(size); size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (chunk->executable() == EXECUTABLE) { if (chunk->executable() == EXECUTABLE) {
DCHECK_GE(size_executable_.Value(), size); DCHECK_GE(size_executable_, size);
size_executable_.Decrement(size); size_executable_ -= size;
} }
chunk->SetFlag(MemoryChunk::PRE_FREED); chunk->SetFlag(MemoryChunk::PRE_FREED);
...@@ -1146,7 +1146,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { ...@@ -1146,7 +1146,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
VirtualMemory reservation(start, size); VirtualMemory reservation(start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end, MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, &reservation); NOT_EXECUTABLE, owner, &reservation);
size_.Increment(size); size_ += size;
return chunk; return chunk;
} }
...@@ -2775,7 +2775,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) { ...@@ -2775,7 +2775,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
// Blocks have to be a minimum size to hold free list items. // Blocks have to be a minimum size to hold free list items.
if (size_in_bytes < kMinBlockSize) { if (size_in_bytes < kMinBlockSize) {
page->add_wasted_memory(size_in_bytes); page->add_wasted_memory(size_in_bytes);
wasted_bytes_.Increment(size_in_bytes); wasted_bytes_ += size_in_bytes;
return size_in_bytes; return size_in_bytes;
} }
......
...@@ -1262,7 +1262,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1262,7 +1262,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks]; CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_; base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t pending_unmapping_tasks_; intptr_t pending_unmapping_tasks_;
base::AtomicNumber<intptr_t> active_unmapping_tasks_; std::atomic<intptr_t> active_unmapping_tasks_;
friend class MemoryAllocator; friend class MemoryAllocator;
}; };
...@@ -1321,10 +1321,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1321,10 +1321,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void Free(MemoryChunk* chunk); void Free(MemoryChunk* chunk);
// Returns allocated spaces in bytes. // Returns allocated spaces in bytes.
size_t Size() { return size_.Value(); } size_t Size() { return size_; }
// Returns allocated executable spaces in bytes. // Returns allocated executable spaces in bytes.
size_t SizeExecutable() { return size_executable_.Value(); } size_t SizeExecutable() { return size_executable_; }
// Returns the maximum available bytes of heaps. // Returns the maximum available bytes of heaps.
size_t Available() { size_t Available() {
...@@ -1450,9 +1450,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1450,9 +1450,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
size_t capacity_; size_t capacity_;
// Allocated space size in bytes. // Allocated space size in bytes.
base::AtomicNumber<size_t> size_; std::atomic<size_t> size_;
// Allocated executable space size in bytes. // Allocated executable space size in bytes.
base::AtomicNumber<size_t> size_executable_; std::atomic<size_t> size_executable_;
// We keep the lowest and highest addresses allocated as a quick way // We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is // of determining that pointers are outside the heap. The estimate is
...@@ -1650,7 +1650,7 @@ class AllocationStats BASE_EMBEDDED { ...@@ -1650,7 +1650,7 @@ class AllocationStats BASE_EMBEDDED {
} }
// Accessors for the allocation statistics. // Accessors for the allocation statistics.
size_t Capacity() { return capacity_.Value(); } size_t Capacity() { return capacity_; }
size_t MaxCapacity() { return max_capacity_; } size_t MaxCapacity() { return max_capacity_; }
size_t Size() { return size_; } size_t Size() { return size_; }
#ifdef DEBUG #ifdef DEBUG
...@@ -1675,19 +1675,16 @@ class AllocationStats BASE_EMBEDDED { ...@@ -1675,19 +1675,16 @@ class AllocationStats BASE_EMBEDDED {
} }
void DecreaseCapacity(size_t bytes) { void DecreaseCapacity(size_t bytes) {
size_t capacity = capacity_.Value(); DCHECK_GE(capacity_, bytes);
DCHECK_GE(capacity, bytes); DCHECK_GE(capacity_ - bytes, size_);
DCHECK_GE(capacity - bytes, size_); capacity_ -= bytes;
USE(capacity);
capacity_.Decrement(bytes);
} }
void IncreaseCapacity(size_t bytes) { void IncreaseCapacity(size_t bytes) {
size_t capacity = capacity_.Value(); DCHECK_GE(capacity_ + bytes, capacity_);
DCHECK_GE(capacity + bytes, capacity); capacity_ += bytes;
capacity_.Increment(bytes); if (capacity_ > max_capacity_) {
if (capacity > max_capacity_) { max_capacity_ = capacity_;
max_capacity_ = capacity;
} }
} }
...@@ -1696,7 +1693,7 @@ class AllocationStats BASE_EMBEDDED { ...@@ -1696,7 +1693,7 @@ class AllocationStats BASE_EMBEDDED {
// bookkeeping structures) currently in the space. // bookkeeping structures) currently in the space.
// During evacuation capacity of the main spaces is accessed from multiple // During evacuation capacity of the main spaces is accessed from multiple
// threads to check the old generation hard limit. // threads to check the old generation hard limit.
base::AtomicNumber<size_t> capacity_; std::atomic<size_t> capacity_;
// |max_capacity_|: The maximum capacity ever observed. // |max_capacity_|: The maximum capacity ever observed.
size_t max_capacity_; size_t max_capacity_;
...@@ -1787,7 +1784,7 @@ class V8_EXPORT_PRIVATE FreeList { ...@@ -1787,7 +1784,7 @@ class V8_EXPORT_PRIVATE FreeList {
void Reset(); void Reset();
void ResetStats() { void ResetStats() {
wasted_bytes_.SetValue(0); wasted_bytes_ = 0;
ForAllFreeListCategories( ForAllFreeListCategories(
[](FreeListCategory* category) { category->ResetStats(); }); [](FreeListCategory* category) { category->ResetStats(); });
} }
...@@ -1815,7 +1812,7 @@ class V8_EXPORT_PRIVATE FreeList { ...@@ -1815,7 +1812,7 @@ class V8_EXPORT_PRIVATE FreeList {
size_t EvictFreeListItems(Page* page); size_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page); bool ContainsPageFreeListItems(Page* page);
size_t wasted_bytes() { return wasted_bytes_.Value(); } size_t wasted_bytes() { return wasted_bytes_; }
template <typename Callback> template <typename Callback>
void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) { void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
...@@ -1911,7 +1908,7 @@ class V8_EXPORT_PRIVATE FreeList { ...@@ -1911,7 +1908,7 @@ class V8_EXPORT_PRIVATE FreeList {
return categories_[type]; return categories_[type];
} }
base::AtomicNumber<size_t> wasted_bytes_; std::atomic<size_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories]; FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory; friend class FreeListCategory;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment