Commit 18ad43c7 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Move the chunk map to CodeLargeObjectSpace.

Only Heap::GcSafeFindCodeForInnerPointer requires the chunk map.
Other large object spaces use more the efficient
MemoryChunk::FromAnyPointerAddress.

Additionally, this patch renames Register/Unregister to AddPage/RemovePage
to be consistent with other spaces and makes them virtual.

Bug: chromium:852420
Change-Id: I8d637bb59e15bd61fe452fda7f4a55049d32030c
Reviewed-on: https://chromium-review.googlesource.com/c/1439417
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59207}
parent 99936546
...@@ -3417,11 +3417,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap) ...@@ -3417,11 +3417,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {} : LargeObjectSpace(heap, LO_SPACE) {}
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id), : Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
size_(0),
page_count_(0),
objects_size_(0),
chunk_map_(1024) {}
void LargeObjectSpace::TearDown() { void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) { while (!memory_chunk_list_.Empty()) {
...@@ -3472,7 +3468,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size, ...@@ -3472,7 +3468,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
if (page == nullptr) return nullptr; if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size)); DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
Register(page, object_size); AddPage(page, object_size);
HeapObject object = page->GetObject(); HeapObject object = page->GetObject();
...@@ -3490,29 +3486,17 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() { ...@@ -3490,29 +3486,17 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
return CommittedMemory(); return CommittedMemory();
} }
LargePage* CodeLargeObjectSpace::FindPage(Address a) {
// GC support
Object LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
if (page != nullptr) {
return page->GetObject();
}
return Smi::kZero; // Signaling not found.
}
LargePage* LargeObjectSpace::FindPage(Address a) {
const Address key = MemoryChunk::FromAddress(a)->address(); const Address key = MemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key); auto it = chunk_map_.find(key);
if (it != chunk_map_.end()) { if (it != chunk_map_.end()) {
LargePage* page = it->second; LargePage* page = it->second;
if (page->Contains(a)) { CHECK(page->Contains(a));
return page; return page;
} }
}
return nullptr; return nullptr;
} }
void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state = IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
...@@ -3529,10 +3513,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { ...@@ -3529,10 +3513,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
} }
} }
void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) { void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
// There may be concurrent access on the chunk map. We have to take the lock
// here.
base::MutexGuard guard(&chunk_map_mutex_);
for (Address current = reinterpret_cast<Address>(page); for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size(); current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) { current += MemoryChunk::kPageSize) {
...@@ -3540,13 +3521,8 @@ void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) { ...@@ -3540,13 +3521,8 @@ void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
} }
} }
void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) { void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
RemoveChunkMapEntries(page, page->address()); for (Address current = page->address();
}
void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
Address free_start) {
for (Address current = ::RoundUp(free_start, MemoryChunk::kPageSize);
current < reinterpret_cast<Address>(page) + page->size(); current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) { current += MemoryChunk::kPageSize) {
chunk_map_.erase(current); chunk_map_.erase(current);
...@@ -3559,32 +3535,27 @@ void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) { ...@@ -3559,32 +3535,27 @@ void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE)); DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE)); DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
size_t object_size = static_cast<size_t>(page->GetObject()->Size()); size_t object_size = static_cast<size_t>(page->GetObject()->Size());
reinterpret_cast<NewLargeObjectSpace*>(page->owner()) static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
->Unregister(page, object_size); AddPage(page, object_size);
Register(page, object_size);
page->ClearFlag(MemoryChunk::FROM_PAGE); page->ClearFlag(MemoryChunk::FROM_PAGE);
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking()); page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->set_owner(this); page->set_owner(this);
} }
void LargeObjectSpace::Register(LargePage* page, size_t object_size) { void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
size_ += static_cast<int>(page->size()); size_ += static_cast<int>(page->size());
AccountCommitted(page->size()); AccountCommitted(page->size());
objects_size_ += object_size; objects_size_ += object_size;
page_count_++; page_count_++;
memory_chunk_list_.PushBack(page); memory_chunk_list_.PushBack(page);
InsertChunkMapEntries(page);
} }
void LargeObjectSpace::Unregister(LargePage* page, size_t object_size) { void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
size_ -= static_cast<int>(page->size()); size_ -= static_cast<int>(page->size());
AccountUncommitted(page->size()); AccountUncommitted(page->size());
objects_size_ -= object_size; objects_size_ -= object_size;
page_count_--; page_count_--;
memory_chunk_list_.Remove(page); memory_chunk_list_.Remove(page);
RemoveChunkMapEntries(page);
} }
void LargeObjectSpace::FreeUnmarkedObjects() { void LargeObjectSpace::FreeUnmarkedObjects() {
...@@ -3593,20 +3564,19 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -3593,20 +3564,19 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
// Right-trimming does not update the objects_size_ counter. We are lazily // Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC. // updating it after every GC.
objects_size_ = 0; size_t surviving_object_size = 0;
while (current) { while (current) {
LargePage* next_current = current->next_page(); LargePage* next_current = current->next_page();
HeapObject object = current->GetObject(); HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object)); DCHECK(!marking_state->IsGrey(object));
size_t size = static_cast<size_t>(object->Size());
if (marking_state->IsBlack(object)) { if (marking_state->IsBlack(object)) {
Address free_start; Address free_start;
size_t size = static_cast<size_t>(object->Size()); surviving_object_size += size;
objects_size_ += size;
if ((free_start = current->GetAddressToShrink(object->address(), size)) != if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
0) { 0) {
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE)); DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start); current->ClearOutOfLiveRangeSlots(free_start);
RemoveChunkMapEntries(current, free_start);
const size_t bytes_to_free = const size_t bytes_to_free =
current->size() - (free_start - current->address()); current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory( heap()->memory_allocator()->PartialFreeMemory(
...@@ -3616,19 +3586,13 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -3616,19 +3586,13 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
AccountUncommitted(bytes_to_free); AccountUncommitted(bytes_to_free);
} }
} else { } else {
memory_chunk_list_.Remove(current); RemovePage(current, size);
// Free the chunk.
size_ -= static_cast<int>(current->size());
AccountUncommitted(current->size());
page_count_--;
RemoveChunkMapEntries(current);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>( heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current); current);
} }
current = next_current; current = next_current;
} }
objects_size_ = surviving_object_size;
} }
bool LargeObjectSpace::Contains(HeapObject object) { bool LargeObjectSpace::Contains(HeapObject object) {
...@@ -3636,11 +3600,18 @@ bool LargeObjectSpace::Contains(HeapObject object) { ...@@ -3636,11 +3600,18 @@ bool LargeObjectSpace::Contains(HeapObject object) {
bool owned = (chunk->owner() == this); bool owned = (chunk->owner() == this);
SLOW_DCHECK(!owned || FindObject(object->address())->IsHeapObject()); SLOW_DCHECK(!owned || ContainsSlow(object->address()));
return owned; return owned;
} }
bool LargeObjectSpace::ContainsSlow(Address addr) {
for (LargePage* page : *this) {
if (page->Contains(addr)) return true;
}
return false;
}
std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() { std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this)); return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
} }
...@@ -3793,7 +3764,7 @@ void NewLargeObjectSpace::FreeAllObjects() { ...@@ -3793,7 +3764,7 @@ void NewLargeObjectSpace::FreeAllObjects() {
LargePage* current = first_page(); LargePage* current = first_page();
while (current) { while (current) {
LargePage* next_current = current->next_page(); LargePage* next_current = current->next_page();
Unregister(current, static_cast<size_t>(current->GetObject()->Size())); RemovePage(current, static_cast<size_t>(current->GetObject()->Size()));
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>( heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current); current);
current = next_current; current = next_current;
...@@ -3804,11 +3775,22 @@ void NewLargeObjectSpace::FreeAllObjects() { ...@@ -3804,11 +3775,22 @@ void NewLargeObjectSpace::FreeAllObjects() {
} }
CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap) CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, CODE_LO_SPACE) {} : LargeObjectSpace(heap, CODE_LO_SPACE),
chunk_map_(kInitialChunkMapCapacity) {}
AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) { AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE); return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
} }
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
LargeObjectSpace::AddPage(page, object_size);
InsertChunkMapEntries(page);
}
void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
RemoveChunkMapEntries(page);
LargeObjectSpace::RemovePage(page, object_size);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -3025,37 +3025,25 @@ class LargeObjectSpace : public Space { ...@@ -3025,37 +3025,25 @@ class LargeObjectSpace : public Space {
int PageCount() { return page_count_; } int PageCount() { return page_count_; }
// Finds an object for a given address, returns a Smi if it is not found.
// The function iterates through all objects in this space, may be slow.
Object FindObject(Address a);
// Finds a large object page containing the given address, returns nullptr
// if such a page doesn't exist.
LargePage* FindPage(Address a);
// Clears the marking state of live objects. // Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects(); void ClearMarkingStateOfLiveObjects();
// Frees unmarked objects. // Frees unmarked objects.
void FreeUnmarkedObjects(); void FreeUnmarkedObjects();
void InsertChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page, Address free_start);
void PromoteNewLargeObject(LargePage* page); void PromoteNewLargeObject(LargePage* page);
// Checks whether a heap object is in this space; O(1). // Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj); bool Contains(HeapObject obj);
// Checks whether an address is in the object area in this space. Iterates // Checks whether an address is in the object area in this space. Iterates
// all objects in the space. May be slow. // all objects in the space. May be slow.
bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); } bool ContainsSlow(Address addr);
// Checks whether the space is empty. // Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; } bool IsEmpty() { return first_page() == nullptr; }
void Register(LargePage* page, size_t object_size); virtual void AddPage(LargePage* page, size_t object_size);
void Unregister(LargePage* page, size_t object_size); virtual void RemovePage(LargePage* page, size_t object_size);
LargePage* first_page() { LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page()); return reinterpret_cast<LargePage*>(Space::first_page());
...@@ -3069,8 +3057,6 @@ class LargeObjectSpace : public Space { ...@@ -3069,8 +3057,6 @@ class LargeObjectSpace : public Space {
std::unique_ptr<ObjectIterator> GetObjectIterator() override; std::unique_ptr<ObjectIterator> GetObjectIterator() override;
base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate); virtual void Verify(Isolate* isolate);
#endif #endif
...@@ -3089,13 +3075,6 @@ class LargeObjectSpace : public Space { ...@@ -3089,13 +3075,6 @@ class LargeObjectSpace : public Space {
size_t objects_size_; // size of objects size_t objects_size_; // size of objects
private: private:
// The chunk_map_mutex_ has to be used when the chunk map is accessed
// concurrently.
base::Mutex chunk_map_mutex_;
// Page-aligned addresses to their corresponding LargePage.
std::unordered_map<Address, LargePage*> chunk_map_;
friend class LargeObjectIterator; friend class LargeObjectIterator;
}; };
...@@ -3119,6 +3098,22 @@ class CodeLargeObjectSpace : public LargeObjectSpace { ...@@ -3119,6 +3098,22 @@ class CodeLargeObjectSpace : public LargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size); AllocateRaw(int object_size);
// Finds a large object page containing the given address, returns nullptr
// if such a page doesn't exist.
LargePage* FindPage(Address a);
protected:
void AddPage(LargePage* page, size_t object_size) override;
void RemovePage(LargePage* page, size_t object_size) override;
private:
static const size_t kInitialChunkMapCapacity = 1024;
void InsertChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page);
// Page-aligned addresses to their corresponding LargePage.
std::unordered_map<Address, LargePage*> chunk_map_;
}; };
class LargeObjectIterator : public ObjectIterator { class LargeObjectIterator : public ObjectIterator {
......
...@@ -5355,7 +5355,7 @@ TEST(Regress598319) { ...@@ -5355,7 +5355,7 @@ TEST(Regress598319) {
CHECK_EQ(arr.get()->length(), kNumberOfObjects); CHECK_EQ(arr.get()->length(), kNumberOfObjects);
CHECK(heap->lo_space()->Contains(arr.get())); CHECK(heap->lo_space()->Contains(arr.get()));
LargePage* page = heap->lo_space()->FindPage(arr.get()->address()); LargePage* page = LargePage::FromHeapObject(arr.get());
CHECK_NOT_NULL(page); CHECK_NOT_NULL(page);
// GC to cleanup state // GC to cleanup state
......
...@@ -315,8 +315,6 @@ TEST(LargeObjectSpace) { ...@@ -315,8 +315,6 @@ TEST(LargeObjectSpace) {
CHECK(lo->Contains(HeapObject::cast(obj))); CHECK(lo->Contains(HeapObject::cast(obj)));
CHECK(lo->FindObject(ho->address()) == obj);
CHECK(lo->Contains(ho)); CHECK(lo->Contains(ho));
while (true) { while (true) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment