Commit d61a5c37 authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Uncommit unused large object page memory.

As a first step I uncommit the memory on the main thread. Also to measure impact and stability of that optimization. In a follow-up CL, the uncommitting should be moved on the concurrent thread.

BUG=

Review-Url: https://codereview.chromium.org/2032393002
Cr-Commit-Position: refs/heads/master@{#36763}
parent c0c3a231
...@@ -375,6 +375,14 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) { ...@@ -375,6 +375,14 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
kMmapFdOffset) != MAP_FAILED; kMmapFdOffset) != MAP_FAILED;
} }
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
void* free_start, size_t free_size) {
#if defined(LEAK_SANITIZER)
__lsan_unregister_root_region(base, size);
__lsan_register_root_region(base, size - free_size);
#endif
return munmap(free_start, free_size) == 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) { bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
#if defined(LEAK_SANITIZER) #if defined(LEAK_SANITIZER)
......
...@@ -239,6 +239,10 @@ bool VirtualMemory::UncommitRegion(void* address, size_t size) { ...@@ -239,6 +239,10 @@ bool VirtualMemory::UncommitRegion(void* address, size_t size) {
kMmapFdOffset) != MAP_FAILED; kMmapFdOffset) != MAP_FAILED;
} }
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
void* free_start, size_t free_size) {
return munmap(free_start, free_size) == 0;
}
bool VirtualMemory::ReleaseRegion(void* address, size_t size) { bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0; return munmap(address, size) == 0;
......
...@@ -1290,6 +1290,10 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) { ...@@ -1290,6 +1290,10 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0; return VirtualFree(base, size, MEM_DECOMMIT) != 0;
} }
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
void* free_start, size_t free_size) {
return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) { bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return VirtualFree(base, 0, MEM_RELEASE) != 0; return VirtualFree(base, 0, MEM_RELEASE) != 0;
......
...@@ -337,6 +337,23 @@ class VirtualMemory { ...@@ -337,6 +337,23 @@ class VirtualMemory {
// Creates a single guard page at the given address. // Creates a single guard page at the given address.
bool Guard(void* address); bool Guard(void* address);
// Releases the memory after |free_start|.
void ReleasePartial(void* free_start) {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
reinterpret_cast<size_t>(address_));
CHECK(InVM(free_start, size));
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, reinterpret_cast<void*>(
reinterpret_cast<size_t>(address_) + size_));
bool result = ReleasePartialRegion(address_, size_, free_start, size);
USE(result);
DCHECK(result);
size_ -= size;
}
void Release() { void Release() {
DCHECK(IsReserved()); DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live // Notice: Order is important here. The VirtualMemory object might live
...@@ -369,6 +386,12 @@ class VirtualMemory { ...@@ -369,6 +386,12 @@ class VirtualMemory {
// and the same size it was reserved with. // and the same size it was reserved with.
static bool ReleaseRegion(void* base, size_t size); static bool ReleaseRegion(void* base, size_t size);
// Must be called with a base pointer that has been returned by ReserveRegion
// and the same size it was reserved with.
// [free_start, free_start + free_size] is the memory that will be released.
static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
size_t free_size);
// Returns true if OS performs lazy commits, i.e. the memory allocation call // Returns true if OS performs lazy commits, i.e. the memory allocation call
// defers actual physical memory allocation till the first memory access. // defers actual physical memory allocation till the first memory access.
// Otherwise returns false. // Otherwise returns false.
......
...@@ -584,6 +584,11 @@ bool MemoryChunk::CommitArea(size_t requested) { ...@@ -584,6 +584,11 @@ bool MemoryChunk::CommitArea(size_t requested) {
return true; return true;
} }
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size();
return high_water_mark_.Value();
}
void MemoryChunk::InsertAfter(MemoryChunk* other) { void MemoryChunk::InsertAfter(MemoryChunk* other) {
MemoryChunk* other_next = other->next_chunk(); MemoryChunk* other_next = other->next_chunk();
...@@ -751,6 +756,27 @@ void Page::ResetFreeListStatistics() { ...@@ -751,6 +756,27 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0; available_in_free_list_ = 0;
} }
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) {
// We do not allow partial shrink for code.
DCHECK(chunk->executable() == NOT_EXECUTABLE);
intptr_t size;
base::VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
size = static_cast<intptr_t>(reservation->size());
size_t to_free_size = size - (start_free - chunk->address());
DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size));
size_.Increment(-static_cast<intptr_t>(to_free_size));
isolate_->counters()->memory_allocated()->Decrement(
static_cast<int>(to_free_size));
chunk->set_size(size - to_free_size);
reservation->ReleasePartial(start_free);
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
...@@ -2884,6 +2910,18 @@ void PagedSpace::ResetCodeAndMetadataStatistics(Isolate* isolate) { ...@@ -2884,6 +2910,18 @@ void PagedSpace::ResetCodeAndMetadataStatistics(Isolate* isolate) {
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
#endif #endif
Address LargePage::GetAddressToShrink() {
HeapObject* object = GetObject();
if (executable() == EXECUTABLE) {
return 0;
}
size_t used_size = RoundUp((object->address() - address()) + object->Size(),
base::OS::CommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
return 0;
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// LargeObjectIterator // LargeObjectIterator
...@@ -3034,7 +3072,6 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { ...@@ -3034,7 +3072,6 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
} }
} }
void LargeObjectSpace::FreeUnmarkedObjects() { void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* previous = NULL; LargePage* previous = NULL;
LargePage* current = first_page_; LargePage* current = first_page_;
...@@ -3043,6 +3080,11 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -3043,6 +3080,11 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
MarkBit mark_bit = Marking::MarkBitFrom(object); MarkBit mark_bit = Marking::MarkBitFrom(object);
DCHECK(!Marking::IsGrey(mark_bit)); DCHECK(!Marking::IsGrey(mark_bit));
if (Marking::IsBlack(mark_bit)) { if (Marking::IsBlack(mark_bit)) {
Address free_start;
if ((free_start = current->GetAddressToShrink()) != 0) {
// TODO(hpayer): Perform partial free concurrently.
heap()->memory_allocator()->PartialFreeMemory(current, free_start);
}
previous = current; previous = current;
current = current->next_page(); current = current->next_page();
} else { } else {
......
...@@ -622,6 +622,7 @@ class MemoryChunk { ...@@ -622,6 +622,7 @@ class MemoryChunk {
} }
size_t size() const { return size_; } size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
inline Heap* heap() const { return heap_; } inline Heap* heap() const { return heap_; }
...@@ -654,7 +655,7 @@ class MemoryChunk { ...@@ -654,7 +655,7 @@ class MemoryChunk {
bool CommitArea(size_t requested); bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk. // Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } size_t CommittedPhysicalMemory();
Address HighWaterMark() { return address() + high_water_mark_.Value(); } Address HighWaterMark() { return address() + high_water_mark_.Value(); }
...@@ -992,6 +993,10 @@ class LargePage : public MemoryChunk { ...@@ -992,6 +993,10 @@ class LargePage : public MemoryChunk {
inline void set_next_page(LargePage* page) { set_next_chunk(page); } inline void set_next_page(LargePage* page) { set_next_chunk(page); }
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink();
// A limit to guarantee that we do not overflow typed slot offset in // A limit to guarantee that we do not overflow typed slot offset in
// the old to old remembered set. // the old to old remembered set.
// Note that this limit is higher than what assembler already imposes on // Note that this limit is higher than what assembler already imposes on
...@@ -1443,6 +1448,7 @@ class MemoryAllocator { ...@@ -1443,6 +1448,7 @@ class MemoryAllocator {
bool CommitMemory(Address addr, size_t size, Executability executable); bool CommitMemory(Address addr, size_t size, Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable); void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void PartialFreeMemory(MemoryChunk* chunk, Address start_free);
void FreeMemory(Address addr, size_t size, Executability executable); void FreeMemory(Address addr, size_t size, Executability executable);
// Commit a contiguous block of memory from the initial chunk. Assumes that // Commit a contiguous block of memory from the initial chunk. Assumes that
......
...@@ -6760,5 +6760,28 @@ TEST(Regress615489) { ...@@ -6760,5 +6760,28 @@ TEST(Regress615489) {
CHECK_LE(size_after, size_before); CHECK_LE(size_after, size_before);
} }
TEST(UncommitUnusedLargeObjectMemory) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
CHECK(chunk->owner()->identity() == LO_SPACE);
intptr_t size_before = array->Size();
size_t committed_memory_before = chunk->CommittedPhysicalMemory();
array->Shrink(1);
CHECK(array->Size() < size_before);
CcTest::heap()->CollectAllGarbage();
CHECK(chunk->CommittedPhysicalMemory() < committed_memory_before);
size_t shrinked_size =
RoundUp((array->address() - chunk->address()) + array->Size(),
base::OS::CommitPageSize());
CHECK_EQ(shrinked_size, chunk->CommittedPhysicalMemory());
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment