Commit b7b2fd4c authored by vegorov@chromium.org's avatar vegorov@chromium.org

Implement a hash based look-up to speed up containing address check in large

object space. Before, it was a link-list based look-up, and make this function
a little bit 'hot' from profile point.

BUG=v8:853
TEST=

Review URL: https://chromiumcodereview.appspot.com/9634005
Patch from Zhongping Wang <kewpie.w.zp@gmail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11084 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent cfb0ab5a
......@@ -1302,7 +1302,7 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
Address inner_pointer) {
Heap* heap = isolate_->heap();
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer);
LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
if (large_page != NULL) {
return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
}
......
......@@ -2525,6 +2525,10 @@ HeapObject* LargeObjectIterator::Next() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
static bool ComparePointers(void* key1, void* key2) {
return key1 == key2;
}
LargeObjectSpace::LargeObjectSpace(Heap* heap,
intptr_t max_capacity,
......@@ -2534,7 +2538,8 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap,
first_page_(NULL),
size_(0),
page_count_(0),
objects_size_(0) {}
objects_size_(0),
chunk_map_(ComparePointers, 1024) {}
bool LargeObjectSpace::SetUp() {
......@@ -2542,6 +2547,7 @@ bool LargeObjectSpace::SetUp() {
size_ = 0;
page_count_ = 0;
objects_size_ = 0;
chunk_map_.Clear();
return true;
}
......@@ -2585,6 +2591,17 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
page->set_next_page(first_page_);
first_page_ = page;
// Register all MemoryChunk::kAlignment-aligned chunks covered by
// this large page in the chunk map.
uintptr_t base = reinterpret_cast<uintptr_t>(page)/MemoryChunk::kAlignment;
uintptr_t limit = base + (page->size()-1)/MemoryChunk::kAlignment;
for (uintptr_t key = base; key <= limit; key++) {
HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
key, true);
ASSERT(entry != NULL);
entry->value = page;
}
HeapObject* object = page->GetObject();
#ifdef DEBUG
......@@ -2601,27 +2618,24 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
// GC support
MaybeObject* LargeObjectSpace::FindObject(Address a) {
for (LargePage* page = first_page_;
page != NULL;
page = page->next_page()) {
Address page_address = page->address();
if (page_address <= a && a < page_address + page->size()) {
LargePage* page = FindPage(a);
if (page != NULL) {
return page->GetObject();
}
}
return Failure::Exception();
}
LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
// TODO(853): Change this implementation to only find executable
// chunks and use some kind of hash-based approach to speed it up.
for (LargePage* chunk = first_page_;
chunk != NULL;
chunk = chunk->next_page()) {
Address chunk_address = chunk->address();
if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
return chunk;
LargePage* LargeObjectSpace::FindPage(Address a) {
uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
key, false);
if (e != NULL) {
ASSERT(e->value != NULL);
LargePage* page = reinterpret_cast<LargePage*>(e->value);
ASSERT(page->is_valid());
if (page->Contains(a)) {
return page;
}
}
return NULL;
......@@ -2659,6 +2673,16 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
objects_size_ -= object->Size();
page_count_--;
// Remove entries belonging to this page.
// Use variable alignment to help pass length check (<= 80 characters)
// of single line in tools/presubmit.py.
const intptr_t alignment = MemoryChunk::kAlignment;
uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
uintptr_t limit = base + (page->size()-1)/alignment;
for (uintptr_t key = base; key <= limit; key++) {
chunk_map_.Remove(reinterpret_cast<void*>(key), key);
}
if (is_pointer_object) {
heap()->QueueMemoryChunkForFree(page);
} else {
......
......@@ -29,6 +29,7 @@
#define V8_SPACES_H_
#include "allocation.h"
#include "hashmap.h"
#include "list.h"
#include "log.h"
......@@ -2499,9 +2500,9 @@ class LargeObjectSpace : public Space {
// space, may be slow.
MaybeObject* FindObject(Address a);
// Finds a large object page containing the given pc, returns NULL
// Finds a large object page containing the given address, returns NULL
// if such a page doesn't exist.
LargePage* FindPageContainingPc(Address pc);
LargePage* FindPage(Address a);
// Frees unmarked objects.
void FreeUnmarkedObjects();
......@@ -2536,6 +2537,8 @@ class LargeObjectSpace : public Space {
intptr_t size_; // allocated bytes
int page_count_; // number of chunks
intptr_t objects_size_; // size of objects
// Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
HashMap chunk_map_;
friend class LargeObjectIterator;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment