Commit 1b4934b9 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

Reland "[heap] Use partial free when shrinking instead of uncommitting"

This fixes the counter inconsistencies and makes use of the already existing
mechanism for partially releasing memory.

This reverts commit c8e6cdfd.

Bug: chromium:724947
Change-Id: I2a7b52a28654fd2524df502a353997393d4f53ac
Reviewed-on: https://chromium-review.googlesource.com/530369Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45890}
parent b490fd66
......@@ -633,22 +633,6 @@ void MemoryChunk::Unlink() {
set_next_chunk(NULL);
}
void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
Address free_start = chunk->area_end_ - bytes_to_shrink;
// Don't adjust the size of the page. The area is just uncomitted but not
// released.
chunk->area_end_ -= bytes_to_shrink;
UncommitBlock(free_start, bytes_to_shrink);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (chunk->reservation_.IsReserved())
chunk->reservation_.Guard(chunk->area_end_);
else
base::OS::Guard(chunk->area_end_, GetCommitPageSize());
}
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
......@@ -796,6 +780,11 @@ size_t Page::AvailableInFreeList() {
}
size_t Page::ShrinkToHighWaterMark() {
// Shrinking only makes sense outside of the CodeRange, where we don't care
// about address space fragmentation.
base::VirtualMemory* reservation = reserved_memory();
if (!reservation->IsReserved()) return 0;
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
......@@ -825,6 +814,7 @@ size_t Page::ShrinkToHighWaterMark() {
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
reinterpret_cast<void*>(this),
......@@ -835,7 +825,8 @@ size_t Page::ShrinkToHighWaterMark() {
filler->address(),
static_cast<int>(area_end() - filler->address() - unused),
ClearRecordedSlots::kNo);
heap()->memory_allocator()->ShrinkChunk(this, unused);
heap()->memory_allocator()->PartialFreeMemory(
this, address() + size() - unused, unused);
CHECK(filler->IsFiller());
CHECK_EQ(filler->address() + filler->Size(), area_end());
}
......@@ -866,26 +857,24 @@ void Page::DestroyBlackArea(Address start, Address end) {
-static_cast<int>(end - start));
}
size_t MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) {
// We do not allow partial shrink for code.
DCHECK(chunk->executable() == NOT_EXECUTABLE);
intptr_t size;
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t bytes_to_free) {
base::VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
size = static_cast<intptr_t>(reservation->size());
size_t to_free_size = size - (start_free - chunk->address());
DCHECK(size_.Value() >= to_free_size);
size_.Decrement(to_free_size);
DCHECK_GE(size_.Value(), bytes_to_free);
size_.Decrement(bytes_to_free);
isolate_->counters()->memory_allocated()->Decrement(
static_cast<int>(to_free_size));
chunk->set_size(size - to_free_size);
static_cast<int>(bytes_to_free));
chunk->size_ -= bytes_to_free;
chunk->area_end_ -= bytes_to_free;
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(chunk->area_end_) %
static_cast<uintptr_t>(GetCommitPageSize()));
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + CodePageGuardSize());
chunk->reservation_.Guard(chunk->area_end_);
}
reservation->ReleasePartial(start_free);
return to_free_size;
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
......@@ -1409,9 +1398,7 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
// Do not account for the unused space as uncommitted because the counter
// is kept in sync with page size which is also not adjusted for those
// chunks.
AccountUncommitted(unused);
}
}
......@@ -3260,10 +3247,12 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
if ((free_start = current->GetAddressToShrink()) != 0) {
current->ClearOutOfLiveRangeSlots(free_start);
RemoveChunkMapEntries(current, free_start);
const size_t freed =
heap()->memory_allocator()->PartialFreeMemory(current, free_start);
size_ -= freed;
AccountUncommitted(freed);
const size_t bytes_to_free =
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(current, free_start,
bytes_to_free);
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
previous = current;
current = current->next_page();
......
......@@ -1347,8 +1347,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space);
void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
Address ReserveAlignedMemory(size_t requested, size_t alignment,
base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
......@@ -1360,8 +1358,12 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
// Returns the size of the freed memory in bytes.
size_t PartialFreeMemory(MemoryChunk* chunk, Address start_free);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
// internally memory is freed from |start_free| to the end of the reservation.
// Additional memory beyond the page is not accounted though, so
// |bytes_to_free| is computed by the caller.
void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t bytes_to_free);
// Commit a contiguous block of memory from the initial chunk. Assumes that
// the address is not NULL, the size is greater than zero, and that the
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment