Commit 8b682e1a authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Use size_t throughout MemoryAllocator

BUG=chromium:652721
R=hpayer@chromium.org

Review-Url: https://codereview.chromium.org/2395563002
Cr-Commit-Position: refs/heads/master@{#39986}
parent ff81734c
...@@ -117,7 +117,8 @@ void StatisticsExtension::GetCounters( ...@@ -117,7 +117,8 @@ void StatisticsExtension::GetCounters(
}; };
const StatisticNumber numbers[] = { const StatisticNumber numbers[] = {
{heap->memory_allocator()->Size(), "total_committed_bytes"}, {static_cast<intptr_t>(heap->memory_allocator()->Size()),
"total_committed_bytes"},
{heap->new_space()->Size(), "new_space_live_bytes"}, {heap->new_space()->Size(), "new_space_live_bytes"},
{heap->new_space()->Available(), "new_space_available_bytes"}, {heap->new_space()->Available(), "new_space_available_bytes"},
{static_cast<intptr_t>(heap->new_space()->CommittedMemory()), {static_cast<intptr_t>(heap->new_space()->CommittedMemory()),
......
...@@ -221,10 +221,10 @@ class GCTracer { ...@@ -221,10 +221,10 @@ class GCTracer {
intptr_t end_object_size; intptr_t end_object_size;
// Size of memory allocated from OS set in constructor. // Size of memory allocated from OS set in constructor.
intptr_t start_memory_size; size_t start_memory_size;
// Size of memory allocated from OS set in destructor. // Size of memory allocated from OS set in destructor.
intptr_t end_memory_size; size_t end_memory_size;
// Total amount of space either wasted or contained in one of free lists // Total amount of space either wasted or contained in one of free lists
// before the current GC. // before the current GC.
......
...@@ -275,7 +275,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, ...@@ -275,7 +275,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
// and does not count available bytes already in the old space or code // and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when // space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded. // a scavenge would have succeeded.
if (memory_allocator()->MaxAvailable() <= new_space_->Size()) { if (static_cast<intptr_t>(memory_allocator()->MaxAvailable()) <=
new_space_->Size()) {
isolate_->counters() isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion() ->gc_compactor_caused_by_oldspace_exhaustion()
->Increment(); ->Increment();
...@@ -315,8 +316,9 @@ void Heap::ReportStatisticsBeforeGC() { ...@@ -315,8 +316,9 @@ void Heap::ReportStatisticsBeforeGC() {
void Heap::PrintShortHeapStatistics() { void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return; if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_, "Memory allocator, used: %6" V8PRIdPTR PrintIsolate(isolate_,
" KB, available: %6" V8PRIdPTR " KB\n", "Memory allocator, used: %6zu KB,"
" available: %6zu KB\n",
memory_allocator()->Size() / KB, memory_allocator()->Size() / KB,
memory_allocator()->Available() / KB); memory_allocator()->Available() / KB);
PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR
......
...@@ -446,7 +446,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, ...@@ -446,7 +446,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
base::VirtualMemory reservation(size, alignment); base::VirtualMemory reservation(size, alignment);
if (!reservation.IsReserved()) return NULL; if (!reservation.IsReserved()) return NULL;
size_.Increment(static_cast<intptr_t>(reservation.size())); size_.Increment(reservation.size());
Address base = Address base =
RoundUp(static_cast<Address>(reservation.address()), alignment); RoundUp(static_cast<Address>(reservation.address()), alignment);
controller->TakeControl(&reservation); controller->TakeControl(&reservation);
...@@ -681,8 +681,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, ...@@ -681,8 +681,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
CodePageGuardSize(); CodePageGuardSize();
// Check executable memory limit. // Check executable memory limit.
if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) > if ((size_executable_.Value() + chunk_size) > capacity_executable_) {
capacity_executable_) {
LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
"V8 Executable Allocation capacity exceeded")); "V8 Executable Allocation capacity exceeded"));
return NULL; return NULL;
...@@ -705,16 +704,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, ...@@ -705,16 +704,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
DCHECK( DCHECK(
IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
if (base == NULL) return NULL; if (base == NULL) return NULL;
size_.Increment(static_cast<intptr_t>(chunk_size)); size_.Increment(chunk_size);
// Update executable memory size. // Update executable memory size.
size_executable_.Increment(static_cast<intptr_t>(chunk_size)); size_executable_.Increment(chunk_size);
} else { } else {
base = AllocateAlignedMemory(chunk_size, commit_size, base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable, MemoryChunk::kAlignment, executable,
&reservation); &reservation);
if (base == NULL) return NULL; if (base == NULL) return NULL;
// Update executable memory size. // Update executable memory size.
size_executable_.Increment(static_cast<intptr_t>(reservation.size())); size_executable_.Increment(reservation.size());
} }
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
...@@ -759,9 +758,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, ...@@ -759,9 +758,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
last_chunk_.TakeControl(&reservation); last_chunk_.TakeControl(&reservation);
UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()), UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
last_chunk_.size()); last_chunk_.size());
size_.Increment(-static_cast<intptr_t>(chunk_size)); size_.Decrement(chunk_size);
if (executable == EXECUTABLE) { if (executable == EXECUTABLE) {
size_executable_.Increment(-static_cast<intptr_t>(chunk_size)); size_executable_.Decrement(chunk_size);
} }
CHECK(last_chunk_.IsReserved()); CHECK(last_chunk_.IsReserved());
return AllocateChunk(reserve_area_size, commit_area_size, executable, return AllocateChunk(reserve_area_size, commit_area_size, executable,
...@@ -837,8 +836,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, ...@@ -837,8 +836,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
size_t to_free_size = size - (start_free - chunk->address()); size_t to_free_size = size - (start_free - chunk->address());
DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size)); DCHECK(size_.Value() >= to_free_size);
size_.Increment(-static_cast<intptr_t>(to_free_size)); size_.Decrement(to_free_size);
isolate_->counters()->memory_allocated()->Decrement( isolate_->counters()->memory_allocated()->Decrement(
static_cast<int>(to_free_size)); static_cast<int>(to_free_size));
chunk->set_size(size - to_free_size); chunk->set_size(size - to_free_size);
...@@ -853,20 +852,15 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { ...@@ -853,20 +852,15 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate()); chunk->IsEvacuationCandidate());
intptr_t size;
base::VirtualMemory* reservation = chunk->reserved_memory(); base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) { const size_t size =
size = static_cast<intptr_t>(reservation->size()); reservation->IsReserved() ? reservation->size() : chunk->size();
} else { DCHECK_GE(size_.Value(), static_cast<size_t>(size));
size = static_cast<intptr_t>(chunk->size()); size_.Decrement(size);
}
DCHECK(size_.Value() >= size);
size_.Increment(-size);
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (chunk->executable() == EXECUTABLE) { if (chunk->executable() == EXECUTABLE) {
DCHECK(size_executable_.Value() >= size); DCHECK_GE(size_executable_.Value(), size);
size_executable_.Increment(-size); size_executable_.Decrement(size);
} }
chunk->SetFlag(MemoryChunk::PRE_FREED); chunk->SetFlag(MemoryChunk::PRE_FREED);
...@@ -999,10 +993,9 @@ void MemoryAllocator::ZapBlock(Address start, size_t size) { ...@@ -999,10 +993,9 @@ void MemoryAllocator::ZapBlock(Address start, size_t size) {
#ifdef DEBUG #ifdef DEBUG
void MemoryAllocator::ReportStatistics() { void MemoryAllocator::ReportStatistics() {
intptr_t size = Size(); size_t size = Size();
float pct = static_cast<float>(capacity_ - size) / capacity_; float pct = static_cast<float>(capacity_ - size) / capacity_;
PrintF(" capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n",
", available: %%%d\n\n",
capacity_, size, static_cast<int>(pct * 100)); capacity_, size, static_cast<int>(pct * 100));
} }
#endif #endif
......
...@@ -1230,12 +1230,31 @@ class MemoryAllocator { ...@@ -1230,12 +1230,31 @@ class MemoryAllocator {
kRegular, kRegular,
kPooled, kPooled,
}; };
enum FreeMode { enum FreeMode {
kFull, kFull,
kPreFreeAndQueue, kPreFreeAndQueue,
kPooledAndQueue, kPooledAndQueue,
}; };
static int CodePageGuardStartOffset();
static int CodePageGuardSize();
static int CodePageAreaStartOffset();
static int CodePageAreaEndOffset();
static int CodePageAreaSize() {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
static int PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
: Page::kAllocatableMemory;
}
explicit MemoryAllocator(Isolate* isolate); explicit MemoryAllocator(Isolate* isolate);
// Initializes its internal bookkeeping structures. // Initializes its internal bookkeeping structures.
...@@ -1261,26 +1280,26 @@ class MemoryAllocator { ...@@ -1261,26 +1280,26 @@ class MemoryAllocator {
bool CanFreeMemoryChunk(MemoryChunk* chunk); bool CanFreeMemoryChunk(MemoryChunk* chunk);
// Returns allocated spaces in bytes. // Returns allocated spaces in bytes.
intptr_t Size() { return size_.Value(); } size_t Size() { return size_.Value(); }
// Returns allocated executable spaces in bytes. // Returns allocated executable spaces in bytes.
intptr_t SizeExecutable() { return size_executable_.Value(); } size_t SizeExecutable() { return size_executable_.Value(); }
// Returns the maximum available bytes of heaps. // Returns the maximum available bytes of heaps.
intptr_t Available() { size_t Available() {
intptr_t size = Size(); const size_t size = Size();
return capacity_ < size ? 0 : capacity_ - size; return capacity_ < size ? 0 : capacity_ - size;
} }
// Returns the maximum available executable bytes of heaps. // Returns the maximum available executable bytes of heaps.
intptr_t AvailableExecutable() { size_t AvailableExecutable() {
intptr_t executable_size = SizeExecutable(); const size_t executable_size = SizeExecutable();
if (capacity_executable_ < executable_size) return 0; if (capacity_executable_ < executable_size) return 0;
return capacity_executable_ - executable_size; return capacity_executable_ - executable_size;
} }
// Returns maximum available bytes that the old space can have. // Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() { size_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kAllocatableMemory; return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
} }
...@@ -1291,11 +1310,6 @@ class MemoryAllocator { ...@@ -1291,11 +1310,6 @@ class MemoryAllocator {
address >= highest_ever_allocated_.Value(); address >= highest_ever_allocated_.Value();
} }
#ifdef DEBUG
// Reports statistic info of the space.
void ReportStatistics();
#endif
// Returns a MemoryChunk in which the memory region from commit_area_size to // Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it // reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea. // could be committed later by calling MemoryChunk::CommitArea.
...@@ -1333,24 +1347,6 @@ class MemoryAllocator { ...@@ -1333,24 +1347,6 @@ class MemoryAllocator {
// filling it up with a recognizable non-NULL bit pattern. // filling it up with a recognizable non-NULL bit pattern.
void ZapBlock(Address start, size_t size); void ZapBlock(Address start, size_t size);
static int CodePageGuardStartOffset();
static int CodePageGuardSize();
static int CodePageAreaStartOffset();
static int CodePageAreaEndOffset();
static int CodePageAreaSize() {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
static int PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
: Page::kAllocatableMemory;
}
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
Address start, size_t commit_size, Address start, size_t commit_size,
size_t reserved_size); size_t reserved_size);
...@@ -1358,6 +1354,11 @@ class MemoryAllocator { ...@@ -1358,6 +1354,11 @@ class MemoryAllocator {
CodeRange* code_range() { return code_range_; } CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; } Unmapper* unmapper() { return &unmapper_; }
#ifdef DEBUG
// Reports statistic info of the space.
void ReportStatistics();
#endif
private: private:
// PreFree logically frees the object, i.e., it takes care of the size // PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback. // bookkeeping and calls the allocation callback.
...@@ -1371,28 +1372,6 @@ class MemoryAllocator { ...@@ -1371,28 +1372,6 @@ class MemoryAllocator {
template <typename SpaceType> template <typename SpaceType>
MemoryChunk* AllocatePagePooled(SpaceType* owner); MemoryChunk* AllocatePagePooled(SpaceType* owner);
Isolate* isolate_;
CodeRange* code_range_;
// Maximum space size in bytes.
intptr_t capacity_;
// Maximum subset of capacity_ that can be executable
intptr_t capacity_executable_;
// Allocated space size in bytes.
base::AtomicNumber<intptr_t> size_;
// Allocated executable space size in bytes.
base::AtomicNumber<intptr_t> size_executable_;
// We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is
// conservative, i.e. not all addrsses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end.
base::AtomicValue<void*> lowest_ever_allocated_;
base::AtomicValue<void*> highest_ever_allocated_;
// Initializes pages in a chunk. Returns the first page address. // Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact // This function and GetChunkId() are provided for the mark-compact
// collector to rebuild page headers in the from space, which is // collector to rebuild page headers in the from space, which is
...@@ -1413,6 +1392,27 @@ class MemoryAllocator { ...@@ -1413,6 +1392,27 @@ class MemoryAllocator {
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
} }
Isolate* isolate_;
CodeRange* code_range_;
// Maximum space size in bytes.
size_t capacity_;
// Maximum subset of capacity_ that can be executable
size_t capacity_executable_;
// Allocated space size in bytes.
base::AtomicNumber<size_t> size_;
// Allocated executable space size in bytes.
base::AtomicNumber<size_t> size_executable_;
// We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is
// conservative, i.e. not all addresses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end.
base::AtomicValue<void*> lowest_ever_allocated_;
base::AtomicValue<void*> highest_ever_allocated_;
base::VirtualMemory last_chunk_; base::VirtualMemory last_chunk_;
Unmapper unmapper_; Unmapper unmapper_;
......
...@@ -200,7 +200,7 @@ class RegExpImpl { ...@@ -200,7 +200,7 @@ class RegExpImpl {
// is not tracked, however. As a conservative approximation we track the // is not tracked, however. As a conservative approximation we track the
// total regexp code compiled including code that has subsequently been freed // total regexp code compiled including code that has subsequently been freed
// and the total executable memory at any point. // and the total executable memory at any point.
static const int kRegExpExecutableMemoryLimit = 16 * MB; static const size_t kRegExpExecutableMemoryLimit = 16 * MB;
static const int kRegExpCompiledLimit = 1 * MB; static const int kRegExpCompiledLimit = 1 * MB;
static const int kRegExpTooLargeToOptimize = 20 * KB; static const int kRegExpTooLargeToOptimize = 20 * KB;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment