Commit 7032b91d authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Cleanup: Use std::atomic<T> instead of base::AtomicNumber<T> in spaces.

Bug: chromium:842083
Change-Id: Ic0cfd84d56f48e61711cdbb695178837e1570e21
Reviewed-on: https://chromium-review.googlesource.com/1073427
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53388}
parent 246fd44c
......@@ -294,7 +294,7 @@ void MemoryAllocator::TearDown() {
unmapper()->TearDown();
// Check that spaces were torn down before MemoryAllocator.
DCHECK_EQ(size_.Value(), 0u);
DCHECK_EQ(size_, 0u);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK_EQ(0, size_executable_);
capacity_ = 0;
......@@ -319,7 +319,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->active_unmapping_tasks_.Decrement(1);
unmapper_->active_unmapping_tasks_--;
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
if (FLAG_trace_unmapper) {
PrintIsolate(unmapper_->heap_->isolate(),
......@@ -350,9 +350,9 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
task->id());
}
DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
DCHECK_LE(active_unmapping_tasks_.Value(), pending_unmapping_tasks_);
DCHECK_GE(active_unmapping_tasks_.Value(), 0);
active_unmapping_tasks_.Increment(1);
DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
DCHECK_GE(active_unmapping_tasks_, 0);
active_unmapping_tasks_++;
task_ids_[pending_unmapping_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
......@@ -368,7 +368,7 @@ void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
}
}
pending_unmapping_tasks_ = 0;
active_unmapping_tasks_.SetValue(0);
active_unmapping_tasks_ = 0;
if (FLAG_trace_unmapper) {
PrintIsolate(
......@@ -391,7 +391,7 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
if (active_unmapping_tasks_.Value() == 0 && pending_unmapping_tasks_ > 0) {
if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
// All previous unmapping tasks have been run to completion.
// Finalize those tasks to make room for new ones.
CancelAndWaitForPendingTasks();
......@@ -491,7 +491,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
}
Address result = reservation.address();
size_.Increment(reservation.size());
size_ += reservation.size();
controller->TakeControl(&reservation);
return result;
}
......@@ -523,7 +523,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
// Failed to commit the body. Free the mapping and any partially committed
// regions inside it.
reservation.Free();
size_.Decrement(reserve_size);
size_ -= reserve_size;
return kNullAddress;
}
......@@ -831,16 +831,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
DCHECK(IsAligned(base, MemoryChunk::kAlignment));
if (base == kNullAddress) return nullptr;
size_.Increment(chunk_size);
size_ += chunk_size;
// Update executable memory size.
size_executable_.Increment(chunk_size);
size_executable_ += chunk_size;
} else {
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
address_hint, &reservation);
if (base == kNullAddress) return nullptr;
// Update executable memory size.
size_executable_.Increment(reservation.size());
size_executable_ += reservation.size();
}
if (Heap::ShouldZapGarbage()) {
......@@ -885,9 +885,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
CHECK(!last_chunk_.IsReserved());
last_chunk_.TakeControl(&reservation);
UncommitBlock(last_chunk_.address(), last_chunk_.size());
size_.Decrement(chunk_size);
size_ -= chunk_size;
if (executable == EXECUTABLE) {
size_executable_.Decrement(chunk_size);
size_executable_ -= chunk_size;
}
CHECK(last_chunk_.IsReserved());
return AllocateChunk(reserve_area_size, commit_area_size, executable,
......@@ -1016,8 +1016,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
// partially starting at |start_free| will also release the potentially
// unused part behind the current page.
const size_t released_bytes = reservation->Release(start_free);
DCHECK_GE(size_.Value(), released_bytes);
size_.Decrement(released_bytes);
DCHECK_GE(size_, released_bytes);
size_ -= released_bytes;
isolate_->counters()->memory_allocated()->Decrement(
static_cast<int>(released_bytes));
}
......@@ -1032,12 +1032,12 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_.Value(), static_cast<size_t>(size));
size_.Decrement(size);
DCHECK_GE(size_, static_cast<size_t>(size));
size_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (chunk->executable() == EXECUTABLE) {
DCHECK_GE(size_executable_.Value(), size);
size_executable_.Decrement(size);
DCHECK_GE(size_executable_, size);
size_executable_ -= size;
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
......@@ -1146,7 +1146,7 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
VirtualMemory reservation(start, size);
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, &reservation);
size_.Increment(size);
size_ += size;
return chunk;
}
......@@ -2775,7 +2775,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
// Blocks have to be a minimum size to hold free list items.
if (size_in_bytes < kMinBlockSize) {
page->add_wasted_memory(size_in_bytes);
wasted_bytes_.Increment(size_in_bytes);
wasted_bytes_ += size_in_bytes;
return size_in_bytes;
}
......
......@@ -1262,7 +1262,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t pending_unmapping_tasks_;
base::AtomicNumber<intptr_t> active_unmapping_tasks_;
std::atomic<intptr_t> active_unmapping_tasks_;
friend class MemoryAllocator;
};
......@@ -1321,10 +1321,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void Free(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
size_t Size() { return size_.Value(); }
size_t Size() { return size_; }
// Returns allocated executable spaces in bytes.
size_t SizeExecutable() { return size_executable_.Value(); }
size_t SizeExecutable() { return size_executable_; }
// Returns the maximum available bytes of heaps.
size_t Available() {
......@@ -1450,9 +1450,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
size_t capacity_;
// Allocated space size in bytes.
base::AtomicNumber<size_t> size_;
std::atomic<size_t> size_;
// Allocated executable space size in bytes.
base::AtomicNumber<size_t> size_executable_;
std::atomic<size_t> size_executable_;
// We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is
......@@ -1650,7 +1650,7 @@ class AllocationStats BASE_EMBEDDED {
}
// Accessors for the allocation statistics.
size_t Capacity() { return capacity_.Value(); }
size_t Capacity() { return capacity_; }
size_t MaxCapacity() { return max_capacity_; }
size_t Size() { return size_; }
#ifdef DEBUG
......@@ -1675,19 +1675,16 @@ class AllocationStats BASE_EMBEDDED {
}
void DecreaseCapacity(size_t bytes) {
size_t capacity = capacity_.Value();
DCHECK_GE(capacity, bytes);
DCHECK_GE(capacity - bytes, size_);
USE(capacity);
capacity_.Decrement(bytes);
DCHECK_GE(capacity_, bytes);
DCHECK_GE(capacity_ - bytes, size_);
capacity_ -= bytes;
}
void IncreaseCapacity(size_t bytes) {
size_t capacity = capacity_.Value();
DCHECK_GE(capacity + bytes, capacity);
capacity_.Increment(bytes);
if (capacity > max_capacity_) {
max_capacity_ = capacity;
DCHECK_GE(capacity_ + bytes, capacity_);
capacity_ += bytes;
if (capacity_ > max_capacity_) {
max_capacity_ = capacity_;
}
}
......@@ -1696,7 +1693,7 @@ class AllocationStats BASE_EMBEDDED {
// bookkeeping structures) currently in the space.
// During evacuation capacity of the main spaces is accessed from multiple
// threads to check the old generation hard limit.
base::AtomicNumber<size_t> capacity_;
std::atomic<size_t> capacity_;
// |max_capacity_|: The maximum capacity ever observed.
size_t max_capacity_;
......@@ -1787,7 +1784,7 @@ class V8_EXPORT_PRIVATE FreeList {
void Reset();
void ResetStats() {
wasted_bytes_.SetValue(0);
wasted_bytes_ = 0;
ForAllFreeListCategories(
[](FreeListCategory* category) { category->ResetStats(); });
}
......@@ -1815,7 +1812,7 @@ class V8_EXPORT_PRIVATE FreeList {
size_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page);
size_t wasted_bytes() { return wasted_bytes_.Value(); }
size_t wasted_bytes() { return wasted_bytes_; }
template <typename Callback>
void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
......@@ -1911,7 +1908,7 @@ class V8_EXPORT_PRIVATE FreeList {
return categories_[type];
}
base::AtomicNumber<size_t> wasted_bytes_;
std::atomic<size_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment