Commit ae489dec authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Move MemoryChunk fields to BasicMemoryChunk

This moves several fields that will be needed by ReadOnlySpace pages
when it stops using MemoryChunk into BasicMemoryChunk.

Additionally AllocationStats is moved from spaces.h into
allocation-stats.h.

Bug: v8:10473, v8:10454
Change-Id: I76a66565a260126e629bd7588a5418267dfa8423
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2228722Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68134}
parent 262a1078
......@@ -2428,6 +2428,7 @@ v8_source_set("v8_base_without_compiler") {
"src/handles/maybe-handles.h",
"src/handles/persistent-handles.cc",
"src/handles/persistent-handles.h",
"src/heap/allocation-stats.h",
"src/heap/array-buffer-collector.cc",
"src/heap/array-buffer-collector.h",
"src/heap/array-buffer-sweeper.cc",
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_ALLOCATION_STATS_H_
#define V8_HEAP_ALLOCATION_STATS_H_
#include <atomic>
#include <unordered_map>
#include "src/base/macros.h"
#include "src/heap/basic-memory-chunk.h"
namespace v8 {
namespace internal {
// An abstraction of the accounting statistics of a page-structured space.
//
// The stats are only set by functions that ensure they stay balanced. These
// functions increase or decrease one of the non-capacity stats in conjunction
// with capacity, or else they always balance increases and decreases to the
// non-capacity stats.
class AllocationStats {
public:
AllocationStats() { Clear(); }
AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT {
capacity_ = stats.capacity_.load();
max_capacity_ = stats.max_capacity_;
size_.store(stats.size_);
#ifdef DEBUG
allocated_on_page_ = stats.allocated_on_page_;
#endif
return *this;
}
// Zero out all the allocation statistics (i.e., no capacity).
void Clear() {
capacity_ = 0;
max_capacity_ = 0;
ClearSize();
}
void ClearSize() {
size_ = 0;
#ifdef DEBUG
allocated_on_page_.clear();
#endif
}
// Accessors for the allocation statistics.
size_t Capacity() { return capacity_; }
size_t MaxCapacity() { return max_capacity_; }
size_t Size() { return size_; }
#ifdef DEBUG
size_t AllocatedOnPage(BasicMemoryChunk* page) {
return allocated_on_page_[page];
}
#endif
void IncreaseAllocatedBytes(size_t bytes, BasicMemoryChunk* page) {
#ifdef DEBUG
size_t size = size_;
DCHECK_GE(size + bytes, size);
#endif
size_.fetch_add(bytes);
#ifdef DEBUG
allocated_on_page_[page] += bytes;
#endif
}
void DecreaseAllocatedBytes(size_t bytes, BasicMemoryChunk* page) {
DCHECK_GE(size_, bytes);
size_.fetch_sub(bytes);
#ifdef DEBUG
DCHECK_GE(allocated_on_page_[page], bytes);
allocated_on_page_[page] -= bytes;
#endif
}
void DecreaseCapacity(size_t bytes) {
DCHECK_GE(capacity_, bytes);
DCHECK_GE(capacity_ - bytes, size_);
capacity_ -= bytes;
}
void IncreaseCapacity(size_t bytes) {
DCHECK_GE(capacity_ + bytes, capacity_);
capacity_ += bytes;
if (capacity_ > max_capacity_) {
max_capacity_ = capacity_;
}
}
private:
// |capacity_|: The number of object-area bytes (i.e., not including page
// bookkeeping structures) currently in the space.
// During evacuation capacity of the main spaces is accessed from multiple
// threads to check the old generation hard limit.
std::atomic<size_t> capacity_;
// |max_capacity_|: The maximum capacity ever observed.
size_t max_capacity_;
// |size_|: The number of allocated bytes.
std::atomic<size_t> size_;
#ifdef DEBUG
std::unordered_map<BasicMemoryChunk*, size_t, BasicMemoryChunk::Hasher>
allocated_on_page_;
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_ALLOCATION_STATS_H_
......@@ -7,6 +7,7 @@
#include <cstdlib>
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/objects/heap-object.h"
namespace v8 {
......@@ -38,5 +39,24 @@ void BasicMemoryChunk::ReleaseMarkingBitmap() {
marking_bitmap_ = nullptr;
}
// static
BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
size_t size, Address area_start,
Address area_end, Space* owner,
VirtualMemory reservation) {
BasicMemoryChunk* chunk = FromAddress(base);
DCHECK_EQ(base, chunk->address());
new (chunk) BasicMemoryChunk(size, area_start, area_end);
chunk->heap_ = heap;
chunk->set_owner(owner);
chunk->reservation_ = std::move(reservation);
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
return chunk;
}
} // namespace internal
} // namespace v8
......@@ -9,21 +9,24 @@
#include "src/base/atomic-utils.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/marking.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
class MemoryChunk;
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
NUMBER_OF_REMEMBERED_SET_TYPES
};
class Space;
class BasicMemoryChunk {
public:
// Use with std data structures.
struct Hasher {
size_t operator()(BasicMemoryChunk* const chunk) const {
return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
}
};
enum Flag {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
......@@ -108,6 +111,27 @@ class BasicMemoryChunk {
Address address() const { return reinterpret_cast<Address>(this); }
// Returns the offset of a given address to this page.
inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
// Returns the address for a given offset to the this page.
Address OffsetToAddress(size_t offset) {
Address address_in_page = address() + offset;
DCHECK_GE(address_in_page, area_start());
DCHECK_LT(address_in_page, area_end());
return address_in_page;
}
// Some callers rely on the fact that this can operate on both
// tagged and aligned object addresses.
inline uint32_t AddressToMarkbitIndex(Address addr) const {
return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
}
inline Address MarkbitIndexToAddress(uint32_t index) const {
return this->address() + (index << kTaggedSizeLog2);
}
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
......@@ -120,6 +144,16 @@ class BasicMemoryChunk {
return static_cast<size_t>(area_end() - area_start());
}
Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
// Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
void SetFlag(Flag flag) {
if (access_mode == AccessMode::NON_ATOMIC) {
......@@ -168,6 +202,14 @@ class BasicMemoryChunk {
void ReleaseMarkingBitmap();
static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Space* owner, VirtualMemory reservation);
size_t wasted_memory() { return wasted_memory_; }
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
size_t allocated_bytes() { return allocated_bytes_; }
static const intptr_t kSizeOffset = 0;
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
......@@ -176,12 +218,62 @@ class BasicMemoryChunk {
static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
static const size_t kHeaderSize =
kSizeOffset + kSizetSize // size_t size
+ kUIntptrSize // uintptr_t flags_
+ kSystemPointerSize // Bitmap* marking_bitmap_
+ kSystemPointerSize // Heap* heap_
+ kSystemPointerSize // Address area_start_
+ kSystemPointerSize; // Address area_end_
kSizeOffset + kSizetSize // size_t size
+ kUIntptrSize // uintptr_t flags_
+ kSystemPointerSize // Bitmap* marking_bitmap_
+ kSystemPointerSize // Heap* heap_
+ kSystemPointerSize // Address area_start_
+ kSystemPointerSize // Address area_end_
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // Address owner_
+ 3 * kSystemPointerSize; // VirtualMemory reservation_
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromAddress(Address a) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
}
template <AccessMode mode>
ConcurrentBitmap<mode>* marking_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
}
Address HighWaterMark() { return address() + high_water_mark_; }
static inline void UpdateHighWaterMark(Address mark) {
if (mark == kNullAddress) return;
// Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs
// to another chunk. See the comment to Page::FromAllocationAreaAddress.
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
while ((new_mark > old_mark) &&
!chunk->high_water_mark_.compare_exchange_weak(
old_mark, new_mark, std::memory_order_acq_rel)) {
}
}
VirtualMemory* reserved_memory() { return &reservation_; }
void ResetAllocationStatistics() {
allocated_bytes_ = area_size();
wasted_memory_ = 0;
}
void IncreaseAllocatedBytes(size_t bytes) {
DCHECK_LE(bytes, area_size());
allocated_bytes_ += bytes;
}
void DecreaseAllocatedBytes(size_t bytes) {
DCHECK_LE(bytes, area_size());
DCHECK_GE(allocated_bytes(), bytes);
allocated_bytes_ -= bytes;
}
protected:
// Overall size of the chunk, including the header and guards.
......@@ -201,7 +293,31 @@ class BasicMemoryChunk {
Address area_start_;
Address area_end_;
// Byte allocated on the page, which includes all objects on the page and the
// linear allocation area.
size_t allocated_bytes_;
// Freed memory that was not added to the free list.
size_t wasted_memory_;
// Assuming the initial allocation on a page is sequential, count highest
// number of bytes ever allocated on the page.
std::atomic<intptr_t> high_water_mark_;
// The space owning this memory chunk.
std::atomic<Space*> owner_;
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
friend class BasicMemoryChunkValidator;
friend class ConcurrentMarkingState;
friend class MajorMarkingState;
friend class MajorAtomicMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
};
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
......
......@@ -173,11 +173,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
VirtualMemory reservation) {
MemoryChunk* chunk = FromAddress(base);
DCHECK_EQ(base, chunk->address());
new (chunk) BasicMemoryChunk(size, area_start, area_end);
BasicMemoryChunk::Initialize(heap, base, size, area_start, area_end, owner,
std::move(reservation));
chunk->heap_ = heap;
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
......@@ -188,13 +186,10 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
chunk->mutex_ = new base::Mutex();
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
chunk->local_tracker_ = nullptr;
......@@ -222,15 +217,14 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
heap->code_space_memory_modification_scope_depth();
} else {
size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAligned(area_start, page_size));
size_t area_size = RoundUp(area_end - area_start, page_size);
CHECK(reservation.SetPermissions(area_start, area_size,
DefaultWritableCodePermissions()));
DCHECK(IsAligned(chunk->area_start(), page_size));
size_t area_size =
RoundUp(chunk->area_end() - chunk->area_start(), page_size);
CHECK(chunk->reservation_.SetPermissions(
chunk->area_start(), area_size, DefaultWritableCodePermissions()));
}
}
chunk->reservation_ = std::move(reservation);
if (owner->identity() == CODE_SPACE) {
chunk->code_object_registry_ = new CodeObjectRegistry();
} else {
......
......@@ -37,19 +37,18 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
NUMBER_OF_REMEMBERED_SET_TYPES
};
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
// any heap object.
class MemoryChunk : public BasicMemoryChunk {
public:
// Use with std data structures.
struct Hasher {
size_t operator()(MemoryChunk* const chunk) const {
return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
}
};
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
......@@ -81,8 +80,6 @@ class MemoryChunk : public BasicMemoryChunk {
static const size_t kHeaderSize =
BasicMemoryChunk::kHeaderSize // Parent size.
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ 3 * kSystemPointerSize // VirtualMemory reservation_
+ kSystemPointerSize // Address owner_
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
......@@ -90,7 +87,6 @@ class MemoryChunk : public BasicMemoryChunk {
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
// concurrent_sweeping_
......@@ -98,8 +94,6 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * ExternalBackingStoreType::kNumTypes
// std::atomic<size_t> external_backing_store_bytes_
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kSystemPointerSize * 2 // heap::ListNode
+ kSystemPointerSize // FreeListCategory** categories__
+ kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
......@@ -118,9 +112,9 @@ class MemoryChunk : public BasicMemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
return reinterpret_cast<MemoryChunk*>(BasicMemoryChunk::FromAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
......@@ -132,20 +126,6 @@ class MemoryChunk : public BasicMemoryChunk {
void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking);
static inline void UpdateHighWaterMark(Address mark) {
if (mark == kNullAddress) return;
// Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs
// to another chunk. See the comment to Page::FromAllocationAreaAddress.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
while ((new_mark > old_mark) &&
!chunk->high_water_mark_.compare_exchange_weak(
old_mark, new_mark, std::memory_order_acq_rel)) {
}
}
static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
size_t amount);
......@@ -166,11 +146,6 @@ class MemoryChunk : public BasicMemoryChunk {
return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
}
inline Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race in
// mark-bit initialization. See MemoryChunk::Initialize for the corresponding
......@@ -245,8 +220,6 @@ class MemoryChunk : public BasicMemoryChunk {
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
Address HighWaterMark() { return address() + high_water_mark_; }
size_t ProgressBar() {
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
return progress_bar_.load(std::memory_order_acquire);
......@@ -274,16 +247,6 @@ class MemoryChunk : public BasicMemoryChunk {
return external_backing_store_bytes_[type];
}
// Some callers rely on the fact that this can operate on both
// tagged and aligned object addresses.
inline uint32_t AddressToMarkbitIndex(Address addr) const {
return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
}
inline Address MarkbitIndexToAddress(uint32_t index) const {
return this->address() + (index << kTaggedSizeLog2);
}
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
......@@ -382,13 +345,6 @@ class MemoryChunk : public BasicMemoryChunk {
void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::Permission permission);
VirtualMemory* reserved_memory() { return &reservation_; }
template <AccessMode mode>
ConcurrentBitmap<mode>* marking_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
}
template <AccessMode mode>
ConcurrentBitmap<mode>* young_generation_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
......@@ -399,12 +355,6 @@ class MemoryChunk : public BasicMemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
// The space owning this memory chunk.
std::atomic<Space*> owner_;
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
std::atomic<size_t> progress_bar_;
......@@ -419,10 +369,6 @@ class MemoryChunk : public BasicMemoryChunk {
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
std::atomic<intptr_t> high_water_mark_;
base::Mutex* mutex_;
std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
......@@ -443,16 +389,9 @@ class MemoryChunk : public BasicMemoryChunk {
// counter.
uintptr_t write_unprotect_counter_;
// Byte allocated on the page, which includes all objects on the page
// and the linear allocation area.
size_t allocated_bytes_;
// Tracks off-heap memory used by this memory chunk.
std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
// Freed memory that was not added to the free list.
size_t wasted_memory_;
heap::ListNode<MemoryChunk> list_node_;
FreeListCategory** categories_;
......@@ -467,8 +406,6 @@ class MemoryChunk : public BasicMemoryChunk {
PossiblyEmptyBuckets possibly_empty_buckets_;
private:
void InitializeReservedMemory() { reservation_.Reset(); }
friend class ConcurrentMarkingState;
friend class MajorMarkingState;
friend class MajorAtomicMarkingState;
......
......@@ -14,6 +14,7 @@
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/allocation-stats.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
......
......@@ -10,6 +10,7 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
#include "src/heap/allocation-stats.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/paged-spaces.h"
......
......@@ -747,100 +747,6 @@ class LinearAllocationArea {
Address limit_;
};
// An abstraction of the accounting statistics of a page-structured space.
//
// The stats are only set by functions that ensure they stay balanced. These
// functions increase or decrease one of the non-capacity stats in conjunction
// with capacity, or else they always balance increases and decreases to the
// non-capacity stats.
class AllocationStats {
public:
AllocationStats() { Clear(); }
AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT {
capacity_ = stats.capacity_.load();
max_capacity_ = stats.max_capacity_;
size_.store(stats.size_);
#ifdef DEBUG
allocated_on_page_ = stats.allocated_on_page_;
#endif
return *this;
}
// Zero out all the allocation statistics (i.e., no capacity).
void Clear() {
capacity_ = 0;
max_capacity_ = 0;
ClearSize();
}
void ClearSize() {
size_ = 0;
#ifdef DEBUG
allocated_on_page_.clear();
#endif
}
// Accessors for the allocation statistics.
size_t Capacity() { return capacity_; }
size_t MaxCapacity() { return max_capacity_; }
size_t Size() { return size_; }
#ifdef DEBUG
size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
#endif
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
#ifdef DEBUG
size_t size = size_;
DCHECK_GE(size + bytes, size);
#endif
size_.fetch_add(bytes);
#ifdef DEBUG
allocated_on_page_[page] += bytes;
#endif
}
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
DCHECK_GE(size_, bytes);
size_.fetch_sub(bytes);
#ifdef DEBUG
DCHECK_GE(allocated_on_page_[page], bytes);
allocated_on_page_[page] -= bytes;
#endif
}
void DecreaseCapacity(size_t bytes) {
DCHECK_GE(capacity_, bytes);
DCHECK_GE(capacity_ - bytes, size_);
capacity_ -= bytes;
}
void IncreaseCapacity(size_t bytes) {
DCHECK_GE(capacity_ + bytes, capacity_);
capacity_ += bytes;
if (capacity_ > max_capacity_) {
max_capacity_ = capacity_;
}
}
private:
// |capacity_|: The number of object-area bytes (i.e., not including page
// bookkeeping structures) currently in the space.
// During evacuation capacity of the main spaces is accessed from multiple
// threads to check the old generation hard limit.
std::atomic<size_t> capacity_;
// |max_capacity_|: The maximum capacity ever observed.
size_t max_capacity_;
// |size_|: The number of allocated bytes.
std::atomic<size_t> size_;
#ifdef DEBUG
std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
#endif
};
// The free list is organized in categories as follows:
// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
// allocation, when categories >= small do not have entries anymore.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment