Commit 46a365fa authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Reland uncommit unused large object page memory.

BUG=

Review-Url: https://codereview.chromium.org/2109943003
Cr-Commit-Position: refs/heads/master@{#37376}
parent fba1a1aa
......@@ -61,6 +61,9 @@ class TemplateHashMapImpl {
Entry* LookupOrInsert(void* key, uint32_t hash,
AllocationPolicy allocator = AllocationPolicy());
Entry* InsertNew(void* key, uint32_t hash,
AllocationPolicy allocator = AllocationPolicy());
// Removes the entry with matching key.
// It returns the value of the deleted entry
// or null if there is no value for such key.
......@@ -134,6 +137,17 @@ TemplateHashMapImpl<AllocationPolicy>::LookupOrInsert(
return p;
}
return InsertNew(key, hash, allocator);
}
template <class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::InsertNew(void* key, uint32_t hash,
AllocationPolicy allocator) {
// Find a matching entry.
Entry* p = Probe(key, hash);
DCHECK(p->key == NULL);
// No entry found; insert one.
p->key = key;
p->value = NULL;
......
......@@ -375,6 +375,14 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
void* free_start, size_t free_size) {
#if defined(LEAK_SANITIZER)
__lsan_unregister_root_region(base, size);
__lsan_register_root_region(base, size - free_size);
#endif
return munmap(free_start, free_size) == 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
#if defined(LEAK_SANITIZER)
......
......@@ -239,6 +239,10 @@ bool VirtualMemory::UncommitRegion(void* address, size_t size) {
kMmapFdOffset) != MAP_FAILED;
}
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
void* free_start, size_t free_size) {
return munmap(free_start, free_size) == 0;
}
bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
......
......@@ -1290,6 +1290,10 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
void* free_start, size_t free_size) {
return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return VirtualFree(base, 0, MEM_RELEASE) != 0;
......
......@@ -337,6 +337,23 @@ class VirtualMemory {
// Creates a single guard page at the given address.
bool Guard(void* address);
// Releases the memory after |free_start|.
void ReleasePartial(void* free_start) {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
reinterpret_cast<size_t>(address_));
CHECK(InVM(free_start, size));
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, reinterpret_cast<void*>(
reinterpret_cast<size_t>(address_) + size_));
bool result = ReleasePartialRegion(address_, size_, free_start, size);
USE(result);
DCHECK(result);
size_ -= size;
}
void Release() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
......@@ -369,6 +386,12 @@ class VirtualMemory {
// and the same size it was reserved with.
static bool ReleaseRegion(void* base, size_t size);
// Must be called with a base pointer that has been returned by ReserveRegion
// and the same size it was reserved with.
// [free_start, free_start + free_size] is the memory that will be released.
static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
size_t free_size);
// Returns true if OS performs lazy commits, i.e. the memory allocation call
// defers actual physical memory allocation till the first memory access.
// Otherwise returns false.
......
......@@ -570,6 +570,11 @@ bool MemoryChunk::CommitArea(size_t requested) {
return true;
}
size_t MemoryChunk::CommittedPhysicalMemory() {
if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
return size();
return high_water_mark_.Value();
}
void MemoryChunk::InsertAfter(MemoryChunk* other) {
MemoryChunk* other_next = other->next_chunk();
......@@ -737,6 +742,27 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0;
}
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) {
// We do not allow partial shrink for code.
DCHECK(chunk->executable() == NOT_EXECUTABLE);
intptr_t size;
base::VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
size = static_cast<intptr_t>(reservation->size());
size_t to_free_size = size - (start_free - chunk->address());
DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size));
size_.Increment(-static_cast<intptr_t>(to_free_size));
isolate_->counters()->memory_allocated()->Decrement(
static_cast<int>(to_free_size));
chunk->set_size(size - to_free_size);
reservation->ReleasePartial(start_free);
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
......@@ -2908,6 +2934,31 @@ void PagedSpace::ResetCodeAndMetadataStatistics(Isolate* isolate) {
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
#endif
Address LargePage::GetAddressToShrink() {
HeapObject* object = GetObject();
if (executable() == EXECUTABLE) {
return 0;
}
size_t used_size = RoundUp((object->address() - address()) + object->Size(),
base::OS::CommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
return 0;
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
if (old_to_new_slots() != nullptr) {
old_to_new_slots()->RemoveRange(
static_cast<int>(free_start - address()),
static_cast<int>(free_start + size() - address()));
}
if (old_to_old_slots() != nullptr) {
old_to_old_slots()->RemoveRange(
static_cast<int>(free_start - address()),
static_cast<int>(free_start + size() - address()));
}
}
// -----------------------------------------------------------------------------
// LargeObjectIterator
......@@ -2981,16 +3032,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
page->set_next_page(first_page_);
first_page_ = page;
// Register all MemoryChunk::kAlignment-aligned chunks covered by
// this large page in the chunk map.
uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
for (uintptr_t key = base; key <= limit; key++) {
base::HashMap::Entry* entry = chunk_map_.LookupOrInsert(
reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
DCHECK(entry != NULL);
entry->value = page;
}
InsertChunkMapEntries(page);
HeapObject* object = page->GetObject();
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
......@@ -3056,6 +3098,35 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
}
}
void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
// Register all MemoryChunk::kAlignment-aligned chunks covered by
// this large page in the chunk map.
uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
MemoryChunk::kAlignment;
for (uintptr_t key = start; key <= limit; key++) {
base::HashMap::Entry* entry = chunk_map_.InsertNew(
reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
DCHECK(entry != NULL);
entry->value = page;
}
}
void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
RemoveChunkMapEntries(page, page->address());
}
void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
Address free_start) {
uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start),
MemoryChunk::kAlignment) /
MemoryChunk::kAlignment;
uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
MemoryChunk::kAlignment;
for (uintptr_t key = start; key <= limit; key++) {
chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
}
}
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* previous = NULL;
......@@ -3065,6 +3136,13 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
MarkBit mark_bit = Marking::MarkBitFrom(object);
DCHECK(!Marking::IsGrey(mark_bit));
if (Marking::IsBlack(mark_bit)) {
Address free_start;
if ((free_start = current->GetAddressToShrink()) != 0) {
// TODO(hpayer): Perform partial free concurrently.
current->ClearOutOfLiveRangeSlots(free_start);
RemoveChunkMapEntries(current, free_start);
heap()->memory_allocator()->PartialFreeMemory(current, free_start);
}
previous = current;
current = current->next_page();
} else {
......@@ -3083,17 +3161,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
objects_size_ -= object->Size();
page_count_--;
// Remove entries belonging to this page.
// Use variable alignment to help pass length check (<= 80 characters)
// of single line in tools/presubmit.py.
const intptr_t alignment = MemoryChunk::kAlignment;
uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
uintptr_t limit = base + (page->size() - 1) / alignment;
for (uintptr_t key = base; key <= limit; key++) {
chunk_map_.Remove(reinterpret_cast<void*>(key),
static_cast<uint32_t>(key));
}
RemoveChunkMapEntries(page);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
}
}
......
......@@ -628,6 +628,7 @@ class MemoryChunk {
}
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
inline Heap* heap() const { return heap_; }
......@@ -663,7 +664,7 @@ class MemoryChunk {
bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
size_t CommittedPhysicalMemory();
Address HighWaterMark() { return address() + high_water_mark_.Value(); }
......@@ -1003,6 +1004,12 @@ class LargePage : public MemoryChunk {
inline void set_next_page(LargePage* page) { set_next_chunk(page); }
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink();
void ClearOutOfLiveRangeSlots(Address free_start);
// A limit to guarantee that we do not overflow typed slot offset in
// the old to old remembered set.
// Note that this limit is higher than what assembler already imposes on
......@@ -1454,6 +1461,7 @@ class MemoryAllocator {
bool CommitMemory(Address addr, size_t size, Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void PartialFreeMemory(MemoryChunk* chunk, Address start_free);
void FreeMemory(Address addr, size_t size, Executability executable);
// Commit a contiguous block of memory from the initial chunk. Assumes that
......@@ -3075,6 +3083,10 @@ class LargeObjectSpace : public Space {
// Frees unmarked objects.
void FreeUnmarkedObjects();
void InsertChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page, Address free_start);
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject* obj);
// Checks whether an address is in the object area in this space. Iterates
......
......@@ -6791,5 +6791,28 @@ TEST(Regress618958) {
!heap->incremental_marking()->IsStopped()));
}
TEST(UncommitUnusedLargeObjectMemory) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
CHECK(chunk->owner()->identity() == LO_SPACE);
intptr_t size_before = array->Size();
size_t committed_memory_before = chunk->CommittedPhysicalMemory();
array->Shrink(1);
CHECK(array->Size() < size_before);
CcTest::heap()->CollectAllGarbage();
CHECK(chunk->CommittedPhysicalMemory() < committed_memory_before);
size_t shrinked_size =
RoundUp((array->address() - chunk->address()) + array->Size(),
base::OS::CommitPageSize());
CHECK_EQ(shrinked_size, chunk->CommittedPhysicalMemory());
}
} // namespace internal
} // namespace v8
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --invoke-weak-callbacks --omit-quit --gc-interval=355 --expose-debug-as=debug
var __v_33 = {};
__v_4 = 70000;
function __f_18() {
if ((__v_7 % 50) != 0) {
} else {
return __v_33 + 0.5;
}
}
function __f_17(a) {
for (var __v_7= 0; __v_7 < __v_4; ++__v_7 ) {
a[__v_7] = __f_18();
}
}
for (var __v_7= 0; __v_7 < __v_4; __v_7 += 500 ) {
}
__v_9 = new Array();
__f_17(__v_9);
__v_9.length = 100;
Debug = debug.Debug
function __f_26() {
}
__v_29 = "(function() {\
})()";
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment