Commit cfc79faa authored by Sathya Gunasekaran's avatar Sathya Gunasekaran Committed by Commit Bot

[hashtable] Move data table to the beginning

TBR: hpayer@chromium.org
Bug: v8:6443, v8:7569
Change-Id: Idd952ed0a832c469b76f1cbc919f700e09dc975d
Reviewed-on: https://chromium-review.googlesource.com/1031559
Commit-Queue: Sathya Gunasekaran <gsathya@chromium.org>
Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52930}
parent a9e2b2ce
......@@ -441,7 +441,7 @@ Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
CHECK_LE(capacity, SmallOrderedHashSet::kMaxCapacity);
DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
int size = SmallOrderedHashSet::Size(capacity);
int size = SmallOrderedHashSet::SizeFor(capacity);
Map* map = *small_ordered_hash_set_map();
HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result),
......@@ -456,7 +456,7 @@ Handle<SmallOrderedHashMap> Factory::NewSmallOrderedHashMap(
CHECK_LE(capacity, SmallOrderedHashMap::kMaxCapacity);
DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
int size = SmallOrderedHashMap::Size(capacity);
int size = SmallOrderedHashMap::SizeFor(capacity);
Map* map = *small_ordered_hash_map_map();
HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result),
......
......@@ -181,7 +181,8 @@ class SmallOrderedHashTable<Derived>::BodyDescriptor final
public:
static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
Derived* table = reinterpret_cast<Derived*>(obj);
if (offset < table->GetDataTableStartOffset()) return false;
if (offset < kDataTableStartOffset) return false;
if (offset >= table->GetBucketsStartOffset()) return false;
return IsValidSlotImpl(map, obj, offset);
}
......@@ -189,7 +190,7 @@ class SmallOrderedHashTable<Derived>::BodyDescriptor final
static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
Derived* table = reinterpret_cast<Derived*>(obj);
int start = table->GetDataTableStartOffset();
int start = kDataTableStartOffset;
for (int i = 0; i < table->Capacity(); i++) {
IteratePointer(obj, start + (i * kPointerSize), v);
}
......@@ -197,7 +198,7 @@ class SmallOrderedHashTable<Derived>::BodyDescriptor final
static inline int SizeOf(Map* map, HeapObject* obj) {
Derived* table = reinterpret_cast<Derived*>(obj);
return table->Size();
return table->SizeFor(table->Capacity());
}
};
......
......@@ -2252,14 +2252,16 @@ int HeapObject::SizeFromMap(Map* map) const {
instance_type);
}
if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) {
return reinterpret_cast<const SmallOrderedHashSet*>(this)->Size();
return SmallOrderedHashSet::SizeFor(
reinterpret_cast<const SmallOrderedHashSet*>(this)->Capacity());
}
if (instance_type == PROPERTY_ARRAY_TYPE) {
return PropertyArray::SizeFor(
reinterpret_cast<const PropertyArray*>(this)->synchronized_length());
}
if (instance_type == SMALL_ORDERED_HASH_MAP_TYPE) {
return reinterpret_cast<const SmallOrderedHashMap*>(this)->Size();
return SmallOrderedHashMap::SizeFor(
reinterpret_cast<const SmallOrderedHashMap*>(this)->Capacity());
}
if (instance_type == FEEDBACK_VECTOR_TYPE) {
return FeedbackVector::SizeFor(
......@@ -2609,9 +2611,10 @@ void Foreign::set_foreign_address(Address value) {
template <class Derived>
void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
Object* value) {
int entry_offset = GetDataEntryOffset(entry, relative_index);
Address entry_offset =
kHeaderSize + GetDataEntryOffset(entry, relative_index);
RELAXED_WRITE_FIELD(this, entry_offset, value);
WRITE_BARRIER(GetHeap(), this, entry_offset, value);
WRITE_BARRIER(GetHeap(), this, static_cast<int>(entry_offset), value);
}
ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
......
......@@ -3573,18 +3573,6 @@ void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
os << value();
}
#define FIELD_ADDR(p, offset) \
(reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
#define READ_INT32_FIELD(p, offset) \
(*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
#define READ_INT64_FIELD(p, offset) \
(*reinterpret_cast<const int64_t*>(FIELD_ADDR(p, offset)))
#define READ_BYTE_FIELD(p, offset) \
(*reinterpret_cast<const byte*>(FIELD_ADDR(p, offset)))
String* JSReceiver::class_name() {
if (IsFunction()) return GetHeap()->Function_string();
if (IsJSArgumentsObject()) return GetHeap()->Arguments_string();
......@@ -18570,6 +18558,7 @@ SmallOrderedHashTable<SmallOrderedHashMap>::Allocate(Isolate* isolate,
template <class Derived>
void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
int capacity) {
DisallowHeapAllocation no_gc;
int num_buckets = capacity / kLoadFactor;
int num_chains = capacity;
......@@ -18577,12 +18566,12 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
SetNumberOfElements(0);
SetNumberOfDeletedElements(0);
byte* hashtable_start =
FIELD_ADDR(this, kHeaderSize + (kBucketsStartOffset * kOneByteSize));
memset(hashtable_start, kNotFound, num_buckets + num_chains);
Address hashtable_start = GetHashTableStartAddress(capacity);
memset(reinterpret_cast<byte*>(hashtable_start), kNotFound,
num_buckets + num_chains);
if (isolate->heap()->InNewSpace(this)) {
MemsetPointer(RawField(this, GetDataTableStartOffset()),
MemsetPointer(RawField(this, kHeaderSize + kDataTableStartOffset),
isolate->heap()->the_hole_value(),
capacity * Derived::kEntrySize);
} else {
......@@ -18601,6 +18590,12 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
for (int i = 0; i < num_chains; ++i) {
DCHECK_EQ(kNotFound, GetNextEntry(i));
}
for (int i = 0; i < capacity; ++i) {
for (int j = 0; j < Derived::kEntrySize; j++) {
DCHECK_EQ(isolate->heap()->the_hole_value(), GetDataEntry(i, j));
}
}
#endif // DEBUG
}
......@@ -19567,10 +19562,5 @@ MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
return MaybeHandle<Name>();
}
#undef FIELD_ADDR
#undef READ_INT32_FIELD
#undef READ_INT64_FIELD
#undef READ_BYTE_FIELD
} // namespace internal
} // namespace v8
......@@ -586,35 +586,45 @@ class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
// that the DataTable entries start aligned. A bucket or chain value
// of 255 is used to denote an unknown entry.
//
// Memory layout: [ Header ] [ HashTable ] [ Chains ] [ Padding ] [ DataTable ]
// Memory layout: [ Header ] [ Padding ] [ DataTable ] [ HashTable ] [ Chains ]
//
// On a 64 bit machine with capacity = 4 and 2 entries,
// The index are represented as bytes, on a 64 bit machine with
// kEntrySize = 1, capacity = 4 and entries = 2:
//
// [ Header ] :
// [0 .. 7] : Number of elements
// [8 .. 15] : Number of deleted elements
// [16 .. 23] : Number of buckets
//
// [ HashTable ] :
// [24 .. 31] : First chain-link for bucket 1
// [32 .. 40] : First chain-link for bucket 2
//
// [ Chains ] :
// [40 .. 47] : Next chain link for entry 1
// [48 .. 55] : Next chain link for entry 2
// [56 .. 63] : Next chain link for entry 3
// [64 .. 71] : Next chain link for entry 4
// [0] : Number of elements
// [1] : Number of deleted elements
// [2] : Number of buckets
//
// [ Padding ] :
// [72 .. 127] : Padding
// [3 .. 7] : Padding
//
// [ DataTable ] :
// [128 .. 128 + kEntrySize - 1] : Entry 1
// [128 + kEntrySize .. 128 + kEntrySize + kEntrySize - 1] : Entry 2
// [8 .. 15] : Entry 1
// [16 .. 23] : Entry 2
// [24 .. 31] : empty
// [32 .. 39] : empty
//
// [ HashTable ] :
// [40] : First chain-link for bucket 1
// [41] : empty
//
// [ Chains ] :
// [42] : Next chain link for bucket 1
// [43] : empty
// [44] : empty
// [45] : empty
//
template <class Derived>
class SmallOrderedHashTable : public HeapObject {
public:
// Offset points to a relative location in the table
typedef int Offset;
// ByteIndex points to a index in the table that needs to be
// converted to an Offset.
typedef int ByteIndex;
void Initialize(Isolate* isolate, int capacity);
static Handle<Derived> Allocate(Isolate* isolate, int capacity,
......@@ -635,54 +645,126 @@ class SmallOrderedHashTable : public HeapObject {
static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity);
void SetDataEntry(int entry, int relative_index, Object* value);
// Returns total size in bytes required for a table of given
// capacity.
static int SizeFor(int capacity) {
DCHECK_GE(capacity, kMinCapacity);
DCHECK_LE(capacity, kMaxCapacity);
static int GetDataTableStartOffset(int capacity) {
int nof_buckets = capacity / kLoadFactor;
int nof_chain_entries = capacity;
int data_table_size = DataTableSizeFor(capacity);
int hash_table_size = capacity / kLoadFactor;
int chain_table_size = capacity;
int total_size = kHeaderSize + kDataTableStartOffset + data_table_size +
hash_table_size + chain_table_size;
int padding_index = kBucketsStartOffset + nof_buckets + nof_chain_entries;
int padding_offset = padding_index * kBitsPerByte;
return ((total_size + kPointerSize - 1) / kPointerSize) * kPointerSize;
}
// Returns the number elements that can fit into the allocated table.
int Capacity() const {
int capacity = NumberOfBuckets() * kLoadFactor;
DCHECK_GE(capacity, kMinCapacity);
DCHECK_LE(capacity, kMaxCapacity);
return ((padding_offset + kPointerSize - 1) / kPointerSize) * kPointerSize;
return capacity;
}
int GetDataTableStartOffset() const {
return GetDataTableStartOffset(Capacity());
// Returns the number elements that are present in the table.
int NumberOfElements() const {
int nof_elements = getByte(0, kNumberOfElementsByteIndex);
DCHECK_LE(nof_elements, Capacity());
return nof_elements;
}
static int Size(int capacity) {
int data_table_start = GetDataTableStartOffset(capacity);
int data_table_size = capacity * Derived::kEntrySize * kBitsPerPointer;
return data_table_start + data_table_size;
int NumberOfDeletedElements() const {
int nof_deleted_elements = getByte(0, kNumberOfDeletedElementsByteIndex);
DCHECK_LE(nof_deleted_elements, Capacity());
return nof_deleted_elements;
}
int Size() const { return Size(Capacity()); }
int NumberOfBuckets() const { return getByte(0, kNumberOfBucketsByteIndex); }
DECL_VERIFIER(SmallOrderedHashTable)
static const int kMinCapacity = 4;
static const byte kNotFound = 0xFF;
// We use the value 255 to indicate kNotFound for chain and bucket
// values, which means that this value can't be used a valid
// index.
static const int kMaxCapacity = 254;
STATIC_ASSERT(kMaxCapacity < kNotFound);
// The load factor is used to derive the number of buckets from
// capacity during Allocation. We also depend on this to calaculate
// the capacity from number of buckets after allocation. If we
// decide to change kLoadFactor to something other than 2, capacity
// should be stored as another field of this object.
static const int kLoadFactor = 2;
protected:
void SetDataEntry(int entry, int relative_index, Object* value);
// TODO(gsathya): Calculate all the various possible values for this
// at compile time since capacity can only be 4 different values.
Offset GetBucketsStartOffset() const {
int capacity = Capacity();
int data_table_size = DataTableSizeFor(capacity);
return kDataTableStartOffset + data_table_size;
}
Address GetHashTableStartAddress(int capacity) const {
return FIELD_ADDR(
this, kHeaderSize + kDataTableStartOffset + DataTableSizeFor(capacity));
}
void SetFirstEntry(int bucket, byte value) {
set(kBucketsStartOffset + bucket, value);
DCHECK_LE(static_cast<unsigned>(bucket), NumberOfBuckets());
setByte(GetBucketsStartOffset(), bucket, value);
}
int GetFirstEntry(int bucket) const {
return get(kBucketsStartOffset + bucket);
DCHECK_LE(static_cast<unsigned>(bucket), NumberOfBuckets());
return getByte(GetBucketsStartOffset(), bucket);
}
// TODO(gsathya): Calculate all the various possible values for this
// at compile time since capacity can only be 4 different values.
Offset GetChainTableOffset() const {
int nof_buckets = NumberOfBuckets();
int capacity = nof_buckets * kLoadFactor;
DCHECK_EQ(Capacity(), capacity);
int data_table_size = DataTableSizeFor(capacity);
int hash_table_size = nof_buckets;
return kDataTableStartOffset + data_table_size + hash_table_size;
}
void SetNextEntry(int entry, int next_entry) {
set(GetChainTableOffset() + entry, next_entry);
DCHECK_LT(static_cast<unsigned>(entry), Capacity());
DCHECK_GE(static_cast<unsigned>(next_entry), 0);
DCHECK(next_entry <= Capacity() || next_entry == kNotFound);
setByte(GetChainTableOffset(), entry, next_entry);
}
int GetNextEntry(int entry) const {
return get(GetChainTableOffset() + entry);
DCHECK_LT(entry, Capacity());
return getByte(GetChainTableOffset(), entry);
}
Object* GetDataEntry(int entry, int relative_index) {
int entry_offset = GetDataEntryOffset(entry, relative_index);
return READ_FIELD(this, entry_offset);
DCHECK_LT(entry, Capacity());
DCHECK_LE(static_cast<unsigned>(relative_index), Derived::kEntrySize);
Offset entry_offset = GetDataEntryOffset(entry, relative_index);
return READ_FIELD(this, kHeaderSize + entry_offset);
}
Object* KeyAt(int entry) const {
int entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
return READ_FIELD(this, entry_offset);
DCHECK_LT(entry, Capacity());
Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
return READ_FIELD(this, kHeaderSize + entry_offset);
}
int HashToBucket(int hash) const { return hash & (NumberOfBuckets() - 1); }
......@@ -690,51 +772,33 @@ class SmallOrderedHashTable : public HeapObject {
int HashToFirstEntry(int hash) const {
int bucket = HashToBucket(hash);
int entry = GetFirstEntry(bucket);
DCHECK(entry < Capacity() || entry == kNotFound);
return entry;
}
int GetChainTableOffset() const {
return kBucketsStartOffset + NumberOfBuckets();
void SetNumberOfBuckets(int num) {
setByte(0, kNumberOfBucketsByteIndex, num);
}
void SetNumberOfBuckets(int num) { set(kNumberOfBucketsOffset, num); }
void SetNumberOfElements(int num) { set(kNumberOfElementsOffset, num); }
void SetNumberOfElements(int num) {
DCHECK_LE(static_cast<unsigned>(num), Capacity());
setByte(0, kNumberOfElementsByteIndex, num);
}
void SetNumberOfDeletedElements(int num) {
set(kNumberOfDeletedElementsOffset, num);
DCHECK_LE(static_cast<unsigned>(num), Capacity());
setByte(0, kNumberOfDeletedElementsByteIndex, num);
}
int NumberOfElements() const { return get(kNumberOfElementsOffset); }
static const int kNumberOfElementsByteIndex = 0;
static const int kNumberOfDeletedElementsByteIndex = 1;
static const int kNumberOfBucketsByteIndex = 2;
int NumberOfDeletedElements() const {
return get(kNumberOfDeletedElementsOffset);
static const Offset kDataTableStartOffset = kPointerSize;
static constexpr int DataTableSizeFor(int capacity) {
return capacity * Derived::kEntrySize * kPointerSize;
}
int NumberOfBuckets() const { return get(kNumberOfBucketsOffset); }
static const byte kNotFound = 0xFF;
static const int kMinCapacity = 4;
// We use the value 255 to indicate kNotFound for chain and bucket
// values, which means that this value can't be used a valid
// index.
static const int kMaxCapacity = 254;
STATIC_ASSERT(kMaxCapacity < kNotFound);
static const int kNumberOfElementsOffset = 0;
static const int kNumberOfDeletedElementsOffset = 1;
static const int kNumberOfBucketsOffset = 2;
static const int kBucketsStartOffset = 3;
// The load factor is used to derive the number of buckets from
// capacity during Allocation. We also depend on this to calaculate
// the capacity from number of buckets after allocation. If we
// decide to change kLoadFactor to something other than 2, capacity
// should be stored as another field of this object.
static const int kLoadFactor = 2;
static const int kBitsPerPointer = kPointerSize * kBitsPerByte;
// Our growth strategy involves doubling the capacity until we reach
// kMaxCapacity, but since the kMaxCapacity is always less than 256,
// we will never fully utilize this table. We special case for 256,
......@@ -742,31 +806,31 @@ class SmallOrderedHashTable : public HeapObject {
// SmallOrderedHashTable::Grow.
static const int kGrowthHack = 256;
DECL_VERIFIER(SmallOrderedHashTable)
protected:
// This is used for accessing the non |DataTable| part of the
// structure.
byte get(int index) const {
return READ_BYTE_FIELD(this, kHeaderSize + (index * kOneByteSize));
byte getByte(Offset offset, ByteIndex index) const {
DCHECK(offset < kDataTableStartOffset || offset >= GetBucketsStartOffset());
return READ_BYTE_FIELD(this, kHeaderSize + offset + (index * kOneByteSize));
}
void set(int index, byte value) {
WRITE_BYTE_FIELD(this, kHeaderSize + (index * kOneByteSize), value);
void setByte(Offset offset, ByteIndex index, byte value) {
DCHECK(offset < kDataTableStartOffset || offset >= GetBucketsStartOffset());
WRITE_BYTE_FIELD(this, kHeaderSize + offset + (index * kOneByteSize),
value);
}
int GetDataEntryOffset(int entry, int relative_index) const {
int datatable_start = GetDataTableStartOffset();
Offset GetDataEntryOffset(int entry, int relative_index) const {
DCHECK_LT(entry, Capacity());
int offset_in_datatable = entry * Derived::kEntrySize * kPointerSize;
int offset_in_entry = relative_index * kPointerSize;
return datatable_start + offset_in_datatable + offset_in_entry;
return kDataTableStartOffset + offset_in_datatable + offset_in_entry;
}
// Returns the number elements that can fit into the allocated buffer.
int Capacity() const { return NumberOfBuckets() * kLoadFactor; }
int UsedCapacity() const {
return NumberOfElements() + NumberOfDeletedElements();
int used = NumberOfElements() + NumberOfDeletedElements();
DCHECK_LE(used, Capacity());
return used;
}
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment