Commit ffcff3a0 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Cleanup MemoryChunk's declarations

- Remove unused methods that we should never actually use like SetArea() or
  set_size().
- Live bytes are now reported with --trace-live-bytes and not gc-verbose.

BUG=chromium:581076
LOG=N

Review URL: https://codereview.chromium.org/1686413002

Cr-Commit-Position: refs/heads/master@{#33908}
parent 2749ebba
......@@ -968,6 +968,8 @@ DEFINE_BOOL(trace_lazy, false, "trace lazy compilation")
DEFINE_BOOL(collect_heap_spill_statistics, false,
"report heap spill statistics along with heap_stats "
"(requires heap_stats)")
DEFINE_BOOL(trace_live_bytes, false,
"trace incrementing and resetting of live bytes")
DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
......
......@@ -3858,7 +3858,7 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
int max_freed = 0;
if (page->TryLock()) {
if (page->mutex()->TryLock()) {
// If this page was already swept in the meantime, we can return here.
if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
page->mutex()->Unlock();
......
......@@ -251,6 +251,36 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
return page;
}
void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
}
void MemoryChunk::ResetLiveBytes() {
if (FLAG_trace_live_bytes) {
PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this,
live_byte_count_);
}
live_byte_count_ = 0;
}
void MemoryChunk::IncrementLiveBytes(int by) {
if (FLAG_trace_live_bytes) {
PrintIsolate(heap()->isolate(),
"live-bytes: update page=%p delta=%d %d->%d\n", this, by,
live_byte_count_, live_byte_count_ + by);
}
live_byte_count_ += by;
DCHECK_GE(live_byte_count_, 0);
DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
}
void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
}
chunk->IncrementLiveBytes(by);
}
bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr);
......
......@@ -428,7 +428,7 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
MemoryChunk* chunk =
MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
area_end, NOT_EXECUTABLE, semi_space);
area_end, NOT_EXECUTABLE, semi_space, nullptr);
bool in_to_space = (semi_space->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE);
......@@ -449,10 +449,10 @@ void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
SetFlags(0, ~0);
}
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner) {
Executability executable, Space* owner,
base::VirtualMemory* reservation) {
MemoryChunk* chunk = FromAddress(base);
DCHECK(base == chunk->address());
......@@ -490,6 +490,10 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->SetFlag(IS_EXECUTABLE);
}
if (reservation != nullptr) {
chunk->reservation_.TakeControl(reservation);
}
return chunk;
}
......@@ -691,10 +695,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
}
MemoryChunk* result = MemoryChunk::Initialize(
heap, base, chunk_size, area_start, area_end, executable, owner);
result->set_reserved_memory(&reservation);
return result;
return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
executable, owner, &reservation);
}
......@@ -920,19 +922,13 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
}
chunk->IncrementLiveBytes(by);
}
void MemoryChunk::ReleaseAllocatedMemory() {
delete slots_buffer_;
slots_buffer_ = nullptr;
delete skip_list_;
skip_list_ = nullptr;
delete mutex_;
mutex_ = nullptr;
ReleaseOldToNewSlots();
}
......
......@@ -319,7 +319,7 @@ class MemoryChunk {
// candidates selection cycle.
FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
// This flag is inteded to be used for testing.
// This flag is intended to be used for testing.
NEVER_ALLOCATE_ON_PAGE,
// The memory chunk is already logically freed, however the actual freeing
......@@ -429,30 +429,17 @@ class MemoryChunk {
static const int kFlagsOffset = kPointerSize;
static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
static const MemoryChunk* FromAddress(const byte* a) {
return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
~kAlignmentMask);
}
static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
}
// Only works for addresses in pointer spaces, not data or code spaces.
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
}
static inline void UpdateHighWaterMark(Address mark) {
if (mark == nullptr) return;
// Need to subtract one from the mark because when a chunk is full the
......@@ -471,75 +458,18 @@ class MemoryChunk {
bool is_valid() { return address() != NULL; }
MemoryChunk* next_chunk() { return next_chunk_.Value(); }
MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
kPageHeaderTag) {
return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
kPageHeaderTag);
} else {
return NULL;
}
}
void set_owner(Space* space) {
DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
kPageHeaderTag);
}
base::VirtualMemory* reserved_memory() { return &reservation_; }
void set_reserved_memory(base::VirtualMemory* reservation) {
DCHECK_NOT_NULL(reservation);
reservation_.TakeControl(reservation);
}
base::Mutex* mutex() { return mutex_; }
bool Contains(Address addr) {
return addr >= area_start() && addr < area_end();
}
// Checks whether addr can be a limit of addresses in this page.
// It's a limit if it's in the page, or if it's just after the
// last byte of the page.
// Checks whether |addr| can be a limit of addresses in this page. It's a
// limit if it's in the page, or if it's just after the last byte of the page.
bool ContainsLimit(Address addr) {
return addr >= area_start() && addr <= area_end();
}
void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
void SetFlagTo(int flag, bool value) {
if (value) {
SetFlag(flag);
} else {
ClearFlag(flag);
}
}
bool IsFlagSet(int flag) {
return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
}
// Set or clear multiple flags at a time. The flags in the mask
// are set to the value in "flags", the rest retain the current value
// in flags_.
void SetFlags(intptr_t flags, intptr_t mask) {
flags_ = (flags_ & ~mask) | (flags & mask);
}
// Return all current flags.
intptr_t GetFlags() { return flags_; }
AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
return concurrent_sweeping_;
}
......@@ -548,39 +478,18 @@ class MemoryChunk {
return parallel_compaction_;
}
bool TryLock() { return mutex_->TryLock(); }
base::Mutex* mutex() { return mutex_; }
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
void ResetLiveBytes() {
if (FLAG_gc_verbose) {
PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
live_byte_count_);
}
live_byte_count_ = 0;
}
void IncrementLiveBytes(int by) {
if (FLAG_gc_verbose) {
printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
live_byte_count_ + by);
}
live_byte_count_ += by;
DCHECK_GE(live_byte_count_, 0);
DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
}
// Manage live byte count, i.e., count of bytes in black objects.
inline void ResetLiveBytes();
inline void IncrementLiveBytes(int by);
int LiveBytes() {
DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
return live_byte_count_;
}
void SetLiveBytes(int live_bytes) {
DCHECK_GE(live_bytes, 0);
DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
DCHECK_LE(static_cast<size_t>(live_bytes), size_);
live_byte_count_ = live_bytes;
}
......@@ -592,6 +501,32 @@ class MemoryChunk {
write_barrier_counter_ = counter;
}
size_t size() const { return size_; }
inline Heap* heap() const { return heap_; }
inline SkipList* skip_list() { return skip_list_; }
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
int area_size() { return static_cast<int>(area_end() - area_start()); }
bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
int progress_bar() {
DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
return progress_bar_;
......@@ -609,35 +544,10 @@ class MemoryChunk {
}
}
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
void SetArea(Address area_start, Address area_end) {
area_start_ = area_start;
area_end_ = area_end;
}
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
bool InNewSpace() {
return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
}
bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
// Markbits support
inline Bitmap* markbits() {
return Bitmap::FromAddress(address() + kHeaderSize);
}
void PrintMarkbits() { markbits()->Print(); }
inline uint32_t AddressToMarkbitIndex(Address addr) {
return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
}
......@@ -646,10 +556,24 @@ class MemoryChunk {
return this->address() + (index << kPointerSizeLog2);
}
void InsertAfter(MemoryChunk* other);
void Unlink();
void PrintMarkbits() { markbits()->Print(); }
inline Heap* heap() const { return heap_; }
void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
bool IsFlagSet(int flag) {
return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
}
// Set or clear multiple flags at a time. The flags in the mask are set to
// the value in "flags", the rest retain the current value in |flags_|.
void SetFlags(intptr_t flags, intptr_t mask) {
flags_ = (flags_ & ~mask) | (flags & mask);
}
// Return all current flags.
intptr_t GetFlags() { return flags_; }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
......@@ -664,49 +588,71 @@ class MemoryChunk {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
void MarkEvacuationCandidate() {
DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK_NULL(slots_buffer_);
SetFlag(EVACUATION_CANDIDATE);
}
void ClearEvacuationCandidate() {
DCHECK(slots_buffer_ == NULL);
ClearFlag(EVACUATION_CANDIDATE);
}
bool ShouldSkipEvacuationSlotRecording() {
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
}
inline SkipList* skip_list() { return skip_list_; }
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
bool InNewSpace() {
return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
}
inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
MemoryChunk* next_chunk() { return next_chunk_.Value(); }
void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
void MarkEvacuationCandidate() {
DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK(slots_buffer_ == NULL);
SetFlag(EVACUATION_CANDIDATE);
void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
kPageHeaderTag) {
return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
kPageHeaderTag);
} else {
return nullptr;
}
}
void ClearEvacuationCandidate() {
DCHECK(slots_buffer_ == NULL);
ClearFlag(EVACUATION_CANDIDATE);
void set_owner(Space* space) {
DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
kPageHeaderTag);
}
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
int area_size() { return static_cast<int>(area_end() - area_start()); }
bool CommitArea(size_t requested);
void InsertAfter(MemoryChunk* other);
void Unlink();
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
base::VirtualMemory* reservation);
// Should be called when memory chunk is about to be freed.
void ReleaseAllocatedMemory();
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner);
base::VirtualMemory* reserved_memory() { return &reservation_; }
size_t size_;
intptr_t flags_;
......@@ -717,28 +663,38 @@ class MemoryChunk {
// If the chunk needs to remember its memory reservation, it is stored here.
base::VirtualMemory reservation_;
// The identity of the owning space. This is tagged as a failure pointer, but
// no failure can be in an object, so this can be distinguished from any entry
// in a fixed array.
Address owner_;
Heap* heap_;
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
int progress_bar_;
// Count of bytes marked black on page.
int live_byte_count_;
SlotsBuffer* slots_buffer_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* old_to_new_slots_;
SkipList* skip_list_;
intptr_t write_barrier_counter_;
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_;
AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
AtomicValue<ParallelCompactingState> parallel_compaction_;
......
......@@ -1383,8 +1383,9 @@ void HeapObject::VerifySmiField(int offset) {
Heap* HeapObject::GetHeap() const {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<const byte*>(this))->heap();
Heap* heap = MemoryChunk::FromAddress(
reinterpret_cast<Address>(const_cast<HeapObject*>(this)))
->heap();
SLOW_DCHECK(heap != NULL);
return heap;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment