Commit f2c0f97a authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] MemoryAllocator: Use size_t consistently

BUG=chromium:652721
R=ulan@chromium.org

Review-Url: https://codereview.chromium.org/2406913002
Cr-Commit-Position: refs/heads/master@{#40155}
parent a863620f
......@@ -1171,8 +1171,9 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
for (auto& chunk : *reservation) {
AllocationResult allocation;
int size = chunk.size;
DCHECK_LE(size, MemoryAllocator::PageAreaSize(
static_cast<AllocationSpace>(space)));
DCHECK_LE(static_cast<size_t>(size),
MemoryAllocator::PageAreaSize(
static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
} else {
......
......@@ -631,18 +631,18 @@ void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
}
}
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
Space* owner) {
DCHECK(commit_area_size <= reserve_area_size);
DCHECK_LE(commit_area_size, reserve_area_size);
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = NULL;
Address base = nullptr;
base::VirtualMemory reservation;
Address area_start = NULL;
Address area_end = NULL;
Address area_start = nullptr;
Address area_end = nullptr;
//
// MemoryChunk layout:
......@@ -913,11 +913,11 @@ template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
MemoryChunk* chunk);
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
if (alloc_mode == kPooled) {
DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
DCHECK_EQ(executable, NOT_EXECUTABLE);
chunk = AllocatePagePooled(owner);
}
......@@ -930,15 +930,15 @@ Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
intptr_t size, PagedSpace* owner, Executability executable);
size_t size, PagedSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
intptr_t size, SemiSpace* owner, Executability executable);
size_t size, SemiSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
intptr_t size, SemiSpace* owner, Executability executable);
size_t size, SemiSpace* owner, Executability executable);
LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
......@@ -999,27 +999,23 @@ void MemoryAllocator::ReportStatistics() {
}
#endif
int MemoryAllocator::CodePageGuardStartOffset() {
size_t MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
}
int MemoryAllocator::CodePageGuardSize() {
size_t MemoryAllocator::CodePageGuardSize() {
return static_cast<int>(base::OS::CommitPageSize());
}
int MemoryAllocator::CodePageAreaStartOffset() {
size_t MemoryAllocator::CodePageAreaStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return CodePageGuardStartOffset() + CodePageGuardSize();
}
int MemoryAllocator::CodePageAreaEndOffset() {
size_t MemoryAllocator::CodePageAreaEndOffset() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
......
......@@ -1216,19 +1216,19 @@ class MemoryAllocator {
kPooledAndQueue,
};
static int CodePageGuardStartOffset();
static size_t CodePageGuardStartOffset();
static int CodePageGuardSize();
static size_t CodePageGuardSize();
static int CodePageAreaStartOffset();
static size_t CodePageAreaStartOffset();
static int CodePageAreaEndOffset();
static size_t CodePageAreaEndOffset();
static int CodePageAreaSize() {
static size_t CodePageAreaSize() {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
static int PageAreaSize(AllocationSpace space) {
static size_t PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
: Page::kAllocatableMemory;
......@@ -1248,9 +1248,9 @@ class MemoryAllocator {
// should be tried first.
template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
typename SpaceType>
Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
template <MemoryAllocator::FreeMode mode = kFull>
......@@ -1292,8 +1292,7 @@ class MemoryAllocator {
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space);
void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
......@@ -2099,7 +2098,7 @@ class PagedSpace : public Space {
int CountTotalPages();
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; }
inline int AreaSize() { return static_cast<int>(area_size_); }
virtual bool is_local() { return false; }
......@@ -2162,7 +2161,7 @@ class PagedSpace : public Space {
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
int area_size_;
size_t area_size_;
// Accounting information for this space.
AllocationStats accounting_stats_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment