Commit 1ea41a46 authored by mlippautz's avatar mlippautz Committed by Commit bot

KeyedLookupCache and DescriptorLookupCache -> lookup-cache{-inl.h,.cc,.h}

R=verwaest@chromium.org
BUG=

Review-Url: https://codereview.chromium.org/2316503004
Cr-Commit-Position: refs/heads/master@{#39212}
parent 2e316502
......@@ -1438,7 +1438,9 @@ v8_source_set("v8_base") {
"src/log-utils.h",
"src/log.cc",
"src/log.h",
"src/lookup-inl.h",
"src/lookup-cache-inl.h",
"src/lookup-cache.cc",
"src/lookup-cache.h",
"src/lookup.cc",
"src/lookup.h",
"src/machine-type.cc",
......
......@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/lookup.h"
#include "src/lookup-cache.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/lookup-cache.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
}
int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
DisallowHeapAllocation no_gc;
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
}
int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
DisallowHeapAllocation no_gc;
int index = (Hash(map, name) & kHashMask);
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index + i];
if ((key.map == *map) && key.name->Equals(*name)) {
return field_offsets_[index + i];
}
}
return kNotFound;
}
void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
int field_offset) {
DisallowHeapAllocation no_gc;
if (!name->IsUniqueName()) {
if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
Handle<String>::cast(name))
.ToHandle(&name)) {
return;
}
}
// This cache is cleared only between mark compact passes, so we expect the
// cache to only contain old space names.
DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
int index = (Hash(map, name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
// help to get the most frequently used one in position 0).
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index];
Object* free_entry_indicator = NULL;
if (key.map == free_entry_indicator) {
key.map = *map;
key.name = *name;
field_offsets_[index + i] = field_offset;
return;
}
}
// No free entry found in this bucket, so we move them all down one and
// put the new entry at position zero.
for (int i = kEntriesPerBucket - 1; i > 0; i--) {
Key& key = keys_[index + i];
Key& key2 = keys_[index + i - 1];
key = key2;
field_offsets_[index + i] = field_offsets_[index + i - 1];
}
// Write the new first entry.
Key& key = keys_[index];
key.map = *map;
key.name = *name;
field_offsets_[index] = field_offset;
}
void KeyedLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
}
} // namespace internal
} // namespace v8
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_LOOKUP_CACHE_H_
#define V8_LOOKUP_CACHE_H_
#include "src/objects.h"
namespace v8 {
namespace internal {
// Cache for mapping (map, property name) into descriptor index.
// The cache contains both positive and negative results.
// Descriptor index equals kNotFound means the property is absent.
// Cleared at startup and prior to any gc.
class DescriptorLookupCache {
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
inline int Lookup(Map* source, Name* name);
// Update an element in the cache.
inline void Update(Map* source, Name* name, int result);
// Clear the cache.
void Clear();
static const int kAbsent = -2;
private:
DescriptorLookupCache() {
for (int i = 0; i < kLength; ++i) {
keys_[i].source = NULL;
keys_[i].name = NULL;
results_[i] = kAbsent;
}
}
static inline int Hash(Object* source, Name* name);
static const int kLength = 64;
struct Key {
Map* source;
Name* name;
};
Key keys_[kLength];
int results_[kLength];
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
};
// Cache for mapping (map, property name) into field offset.
// Cleared at startup and prior to mark sweep collection.
class KeyedLookupCache {
public:
// Lookup field offset for (map, name). If absent, -1 is returned.
int Lookup(Handle<Map> map, Handle<Name> name);
// Update an element in the cache.
void Update(Handle<Map> map, Handle<Name> name, int field_offset);
// Clear the cache.
void Clear();
static const int kLength = 256;
static const int kCapacityMask = kLength - 1;
static const int kMapHashShift = 5;
static const int kHashMask = -4; // Zero the last two bits.
static const int kEntriesPerBucket = 4;
static const int kEntryLength = 2;
static const int kMapIndex = 0;
static const int kKeyIndex = 1;
static const int kNotFound = -1;
// kEntriesPerBucket should be a power of 2.
STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
private:
KeyedLookupCache() {
for (int i = 0; i < kLength; ++i) {
keys_[i].map = NULL;
keys_[i].name = NULL;
field_offsets_[i] = kNotFound;
}
}
static inline int Hash(Handle<Map> map, Handle<Name> name);
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
Address keys_address() { return reinterpret_cast<Address>(&keys_); }
Address field_offsets_address() {
return reinterpret_cast<Address>(&field_offsets_);
}
struct Key {
Map* map;
Name* name;
};
Key keys_[kLength];
int field_offsets_[kLength];
friend class ExternalReference;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
};
} // namespace internal
} // namespace v8
#endif // V8_LOOKUP_CACHE_H_
......@@ -13,77 +13,6 @@
namespace v8 {
namespace internal {
void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
}
int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
DisallowHeapAllocation no_gc;
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
}
int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
DisallowHeapAllocation no_gc;
int index = (Hash(map, name) & kHashMask);
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index + i];
if ((key.map == *map) && key.name->Equals(*name)) {
return field_offsets_[index + i];
}
}
return kNotFound;
}
void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
int field_offset) {
DisallowHeapAllocation no_gc;
if (!name->IsUniqueName()) {
if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
Handle<String>::cast(name))
.ToHandle(&name)) {
return;
}
}
// This cache is cleared only between mark compact passes, so we expect the
// cache to only contain old space names.
DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
int index = (Hash(map, name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
// help to get the most frequently used one in position 0).
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index];
Object* free_entry_indicator = NULL;
if (key.map == free_entry_indicator) {
key.map = *map;
key.name = *name;
field_offsets_[index + i] = field_offset;
return;
}
}
// No free entry found in this bucket, so we move them all down one and
// put the new entry at position zero.
for (int i = kEntriesPerBucket - 1; i > 0; i--) {
Key& key = keys_[index + i];
Key& key2 = keys_[index + i - 1];
key = key2;
field_offsets_[index + i] = field_offsets_[index + i - 1];
}
// Write the new first entry.
Key& key = keys_[index];
key.map = *map;
key.name = *name;
field_offsets_[index] = field_offset;
}
void KeyedLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
}
// static
LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
Handle<Object> receiver,
......
......@@ -12,107 +12,6 @@
namespace v8 {
namespace internal {
// Cache for mapping (map, property name) into descriptor index.
// The cache contains both positive and negative results.
// Descriptor index equals kNotFound means the property is absent.
// Cleared at startup and prior to any gc.
class DescriptorLookupCache {
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
inline int Lookup(Map* source, Name* name);
// Update an element in the cache.
inline void Update(Map* source, Name* name, int result);
// Clear the cache.
void Clear();
static const int kAbsent = -2;
private:
DescriptorLookupCache() {
for (int i = 0; i < kLength; ++i) {
keys_[i].source = NULL;
keys_[i].name = NULL;
results_[i] = kAbsent;
}
}
static inline int Hash(Object* source, Name* name);
static const int kLength = 64;
struct Key {
Map* source;
Name* name;
};
Key keys_[kLength];
int results_[kLength];
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
};
// Cache for mapping (map, property name) into field offset.
// Cleared at startup and prior to mark sweep collection.
class KeyedLookupCache {
public:
// Lookup field offset for (map, name). If absent, -1 is returned.
int Lookup(Handle<Map> map, Handle<Name> name);
// Update an element in the cache.
void Update(Handle<Map> map, Handle<Name> name, int field_offset);
// Clear the cache.
void Clear();
static const int kLength = 256;
static const int kCapacityMask = kLength - 1;
static const int kMapHashShift = 5;
static const int kHashMask = -4; // Zero the last two bits.
static const int kEntriesPerBucket = 4;
static const int kEntryLength = 2;
static const int kMapIndex = 0;
static const int kKeyIndex = 1;
static const int kNotFound = -1;
// kEntriesPerBucket should be a power of 2.
STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
private:
KeyedLookupCache() {
for (int i = 0; i < kLength; ++i) {
keys_[i].map = NULL;
keys_[i].name = NULL;
field_offsets_[i] = kNotFound;
}
}
static inline int Hash(Handle<Map> map, Handle<Name> name);
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
Address keys_address() { return reinterpret_cast<Address>(&keys_); }
Address field_offsets_address() {
return reinterpret_cast<Address>(&field_offsets_);
}
struct Key {
Map* map;
Name* name;
};
Key keys_[kLength];
int field_offsets_[kLength];
friend class ExternalReference;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
};
class LookupIterator final BASE_EMBEDDED {
public:
enum Configuration {
......
......@@ -27,7 +27,8 @@
#include "src/isolate.h"
#include "src/keys.h"
#include "src/layout-descriptor-inl.h"
#include "src/lookup-inl.h"
#include "src/lookup-cache-inl.h"
#include "src/lookup.h"
#include "src/objects.h"
#include "src/property.h"
#include "src/prototype.h"
......
......@@ -1034,7 +1034,9 @@
'log-utils.h',
'log.cc',
'log.h',
'lookup-inl.h',
'lookup-cache-inl.h',
'lookup-cache.cc',
'lookup-cache.h',
'lookup.cc',
'lookup.h',
'macro-assembler.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment