Commit b0edf8e6 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr][heap] Fix TODOs about always using proper page allocator

Only read-only pages don't have properly initialized reservation object.

Bug: v8:8096
Change-Id: I83f4baa414dc2ca5a397a9897088060b6cac4783
Reviewed-on: https://chromium-review.googlesource.com/1216562Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55763}
parent d8bdea05
...@@ -370,29 +370,30 @@ size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() { ...@@ -370,29 +370,30 @@ size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
return sum; return sum;
} }
bool MemoryAllocator::CommitMemory(Address base, size_t size) { bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
// TODO(ishell): use proper page allocator Address base = reservation->address();
if (!SetPermissions(GetPlatformPageAllocator(), base, size, size_t size = reservation->size();
PageAllocator::kReadWrite)) { if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
return false; return false;
} }
UpdateAllocatedSpaceLimits(base, base + size); UpdateAllocatedSpaceLimits(base, base + size);
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
return true; return true;
} }
void MemoryAllocator::FreeMemory(VirtualMemory* reservation, bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
Executability executable) { size_t size = reservation->size();
// Executability and page allocator must be in sync. if (!reservation->SetPermissions(reservation->address(), size,
CHECK_EQ(reservation->page_allocator(), page_allocator(executable)); PageAllocator::kNoAccess)) {
reservation->Free(); return false;
}
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
} }
void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
void MemoryAllocator::FreeMemory(Address base, size_t size, Address base, size_t size) {
Executability executable) { CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
// TODO(ishell): use proper page allocator
CHECK(FreePages(page_allocator(executable), reinterpret_cast<void*>(base),
size));
} }
Address MemoryAllocator::AllocateAlignedMemory( Address MemoryAllocator::AllocateAlignedMemory(
...@@ -772,7 +773,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, ...@@ -772,7 +773,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
if ((base + chunk_size) == 0u) { if ((base + chunk_size) == 0u) {
CHECK(!last_chunk_.IsReserved()); CHECK(!last_chunk_.IsReserved());
last_chunk_.TakeControl(&reservation); last_chunk_.TakeControl(&reservation);
UncommitBlock(&last_chunk_, last_chunk_.address(), last_chunk_.size()); UncommitMemory(&last_chunk_);
size_ -= chunk_size; size_ -= chunk_size;
if (executable == EXECUTABLE) { if (executable == EXECUTABLE) {
size_executable_ -= chunk_size; size_executable_ -= chunk_size;
...@@ -972,13 +973,15 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { ...@@ -972,13 +973,15 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory(); VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) { if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
UncommitBlock(reservation, reinterpret_cast<Address>(chunk), UncommitMemory(reservation);
MemoryChunk::kPageSize);
} else { } else {
if (reservation->IsReserved()) { if (reservation->IsReserved()) {
FreeMemory(reservation, chunk->executable()); reservation->Free();
} else { } else {
FreeMemory(chunk->address(), chunk->size(), chunk->executable()); // Only read-only pages can have non-initialized reservation object.
DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
FreeMemory(page_allocator(chunk->executable()), chunk->address(),
chunk->size());
} }
} }
} }
...@@ -992,8 +995,9 @@ void MemoryAllocator::Free(MemoryChunk* chunk) { ...@@ -992,8 +995,9 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
break; break;
case kAlreadyPooled: case kAlreadyPooled:
// Pooled pages cannot be touched anymore as their memory is uncommitted. // Pooled pages cannot be touched anymore as their memory is uncommitted.
FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize), // Pooled pages are not-executable.
Executability::NOT_EXECUTABLE); FreeMemory(data_page_allocator(), chunk->address(),
static_cast<size_t>(MemoryChunk::kPageSize));
break; break;
case kPooledAndQueue: case kPooledAndQueue:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize)); DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
...@@ -1061,36 +1065,19 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { ...@@ -1061,36 +1065,19 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
const Address start = reinterpret_cast<Address>(chunk); const Address start = reinterpret_cast<Address>(chunk);
const Address area_start = start + MemoryChunk::kObjectStartOffset; const Address area_start = start + MemoryChunk::kObjectStartOffset;
const Address area_end = start + size; const Address area_end = start + size;
if (!CommitBlock(start, size)) { // Pooled pages are always regular data pages.
return nullptr; DCHECK_NE(CODE_SPACE, owner->identity());
}
VirtualMemory reservation(data_page_allocator(), start, size); VirtualMemory reservation(data_page_allocator(), start, size);
if (!CommitMemory(&reservation)) return nullptr;
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size, kZapValue);
}
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end, MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, std::move(reservation)); NOT_EXECUTABLE, owner, std::move(reservation));
size_ += size; size_ += size;
return chunk; return chunk;
} }
bool MemoryAllocator::CommitBlock(Address start, size_t size) {
if (!CommitMemory(start, size)) return false;
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size, kZapValue);
}
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
return true;
}
bool MemoryAllocator::UncommitBlock(VirtualMemory* reservation, Address start,
size_t size) {
if (!reservation->SetPermissions(start, size, PageAllocator::kNoAccess)) {
return false;
}
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
void MemoryAllocator::ZapBlock(Address start, size_t size, void MemoryAllocator::ZapBlock(Address start, size_t size,
uintptr_t zap_value) { uintptr_t zap_value) {
DCHECK_EQ(start % kPointerSize, 0); DCHECK_EQ(start % kPointerSize, 0);
......
...@@ -1342,10 +1342,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1342,10 +1342,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
size_t alignment, Executability executable, size_t alignment, Executability executable,
void* hint, VirtualMemory* controller); void* hint, VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size); void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
void FreeMemory(VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
// internally memory is freed from |start_free| to the end of the reservation. // internally memory is freed from |start_free| to the end of the reservation.
...@@ -1354,23 +1351,19 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1354,23 +1351,19 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void PartialFreeMemory(MemoryChunk* chunk, Address start_free, void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t bytes_to_free, Address new_area_end); size_t bytes_to_free, Address new_area_end);
// Commit a contiguous block of memory from the initial chunk. Assumes that
// the address is not kNullAddress, the size is greater than zero, and that
// the block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
bool CommitBlock(Address start, size_t size);
// Checks if an allocated MemoryChunk was intended to be used for executable // Checks if an allocated MemoryChunk was intended to be used for executable
// memory. // memory.
bool IsMemoryChunkExecutable(MemoryChunk* chunk) { bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
return executable_memory_.find(chunk) != executable_memory_.end(); return executable_memory_.find(chunk) != executable_memory_.end();
} }
// Uncommit a contiguous block of memory [start..(start+size)[. // Commit memory region owned by given reservation object. Returns true if
// start is not kNullAddress, the size is greater than zero, and the // it succeeded and false otherwise.
// block is contained in the initial chunk. Returns true if it succeeded bool CommitMemory(VirtualMemory* reservation);
// and false otherwise.
bool UncommitBlock(VirtualMemory* reservation, Address start, size_t size); // Uncommit memory region owned by given reservation object. Returns true if
// it succeeded and false otherwise.
bool UncommitMemory(VirtualMemory* reservation);
// Zaps a contiguous block of memory [start..(start+size)[ with // Zaps a contiguous block of memory [start..(start+size)[ with
// a given zap value. // a given zap value.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment