Commit bc13af92 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] LargeObjectSpace: Move chunk map from HashMap to unordered_map

Bug: 
Change-Id: Ied0ef1fc7fbcd9f58d793b9b2ecd87ae6c549dca
Reviewed-on: https://chromium-review.googlesource.com/635590
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47644}
parent a33b0d25
...@@ -3272,7 +3272,7 @@ HeapObject* LargeObjectIterator::Next() { ...@@ -3272,7 +3272,7 @@ HeapObject* LargeObjectIterator::Next() {
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
first_page_(NULL), first_page_(nullptr),
size_(0), size_(0),
page_count_(0), page_count_(0),
objects_size_(0), objects_size_(0),
...@@ -3280,17 +3280,10 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) ...@@ -3280,17 +3280,10 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
LargeObjectSpace::~LargeObjectSpace() {} LargeObjectSpace::~LargeObjectSpace() {}
bool LargeObjectSpace::SetUp() { bool LargeObjectSpace::SetUp() {
first_page_ = NULL;
size_ = 0;
page_count_ = 0;
objects_size_ = 0;
chunk_map_.Clear();
return true; return true;
} }
void LargeObjectSpace::TearDown() { void LargeObjectSpace::TearDown() {
while (first_page_ != NULL) { while (first_page_ != NULL) {
LargePage* page = first_page_; LargePage* page = first_page_;
...@@ -3372,18 +3365,16 @@ LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) { ...@@ -3372,18 +3365,16 @@ LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
} }
LargePage* LargeObjectSpace::FindPage(Address a) { LargePage* LargeObjectSpace::FindPage(Address a) {
uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; const Address key = MemoryChunk::FromAddress(a)->address();
base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), auto it = chunk_map_.find(reinterpret_cast<Address>(key));
static_cast<uint32_t>(key)); if (it != chunk_map_.end()) {
if (e != NULL) { LargePage* page = it->second;
DCHECK(e->value != NULL);
LargePage* page = reinterpret_cast<LargePage*>(e->value);
DCHECK(LargePage::IsValid(page)); DCHECK(LargePage::IsValid(page));
if (page->Contains(a)) { if (page->Contains(a)) {
return page; return page;
} }
} }
return NULL; return nullptr;
} }
...@@ -3403,19 +3394,13 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { ...@@ -3403,19 +3394,13 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
} }
void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) { void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
// Register all MemoryChunk::kAlignment-aligned chunks covered by
// this large page in the chunk map.
uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
MemoryChunk::kAlignment;
// There may be concurrent access on the chunk map. We have to take the lock // There may be concurrent access on the chunk map. We have to take the lock
// here. // here.
base::LockGuard<base::Mutex> guard(&chunk_map_mutex_); base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
for (uintptr_t key = start; key <= limit; key++) { for (Address current = reinterpret_cast<Address>(page);
base::HashMap::Entry* entry = chunk_map_.InsertNew( current < reinterpret_cast<Address>(page) + page->size();
reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); current += MemoryChunk::kPageSize) {
DCHECK(entry != NULL); chunk_map_[current] = page;
entry->value = page;
} }
} }
...@@ -3425,13 +3410,11 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) { ...@@ -3425,13 +3410,11 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page, void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
Address free_start) { Address free_start) {
uintptr_t start = ::RoundUp(reinterpret_cast<uintptr_t>(free_start), for (Address current = reinterpret_cast<Address>(::RoundUp(
MemoryChunk::kAlignment) / reinterpret_cast<uintptr_t>(free_start), MemoryChunk::kPageSize));
MemoryChunk::kAlignment; current < reinterpret_cast<Address>(page) + page->size();
uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) / current += MemoryChunk::kPageSize) {
MemoryChunk::kAlignment; chunk_map_.erase(current);
for (uintptr_t key = start; key <= limit; key++) {
chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
} }
} }
......
...@@ -8,13 +8,13 @@ ...@@ -8,13 +8,13 @@
#include <list> #include <list>
#include <map> #include <map>
#include <memory> #include <memory>
#include <unordered_map>
#include <unordered_set> #include <unordered_set>
#include "src/allocation.h" #include "src/allocation.h"
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/atomicops.h" #include "src/base/atomicops.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/hashmap.h"
#include "src/base/iterator.h" #include "src/base/iterator.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/cancelable-task.h" #include "src/cancelable-task.h"
...@@ -2954,8 +2954,8 @@ class LargeObjectSpace : public Space { ...@@ -2954,8 +2954,8 @@ class LargeObjectSpace : public Space {
// The chunk_map_mutex_ has to be used when the chunk map is accessed // The chunk_map_mutex_ has to be used when the chunk map is accessed
// concurrently. // concurrently.
base::Mutex chunk_map_mutex_; base::Mutex chunk_map_mutex_;
// Map MemoryChunk::kAlignment-aligned chunks to large pages covering them // Page-aligned addresses to their corresponding LargePage.
base::HashMap chunk_map_; std::unordered_map<Address, LargePage*> chunk_map_;
friend class LargeObjectIterator; friend class LargeObjectIterator;
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment