stub-cache.h 6.19 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_STUB_CACHE_H_
#define V8_STUB_CACHE_H_

#include "src/macro-assembler.h"

namespace v8 {
namespace internal {

13
class SmallMapList;
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44

// The stub cache is used for megamorphic property accesses.
// It maps (map, name, type) to property access handlers. The cache does not
// need explicit invalidation when a prototype chain is modified, since the
// handlers verify the chain.


class SCTableReference {
 public:
  Address address() const { return address_; }

 private:
  explicit SCTableReference(Address address) : address_(address) {}

  Address address_;

  friend class StubCache;
};


class StubCache {
 public:
  struct Entry {
    Name* key;
    Code* value;
    Map* map;
  };

  void Initialize();
  // Access cache for entry hash(name, map).
  Code* Set(Name* name, Map* map, Code* code);
45
  Code* Get(Name* name, Map* map);
46 47
  // Clear the lookup table (@ mark compact collection).
  void Clear();
48
  // Collect all maps that match the name.
49
  void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
50
                           Handle<Context> native_context, Zone* zone);
51 52 53
  // Generate code for probing the stub cache table.
  // Arguments extra, extra2 and extra3 may be used to pass additional scratch
  // registers. Set to no_reg if not needed.
54
  // If leave_frame is true, then exit a frame before the tail call.
55
  void GenerateProbe(MacroAssembler* masm, Register receiver, Register name,
56 57
                     Register scratch, Register extra, Register extra2 = no_reg,
                     Register extra3 = no_reg);
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87

  enum Table { kPrimary, kSecondary };

  SCTableReference key_reference(StubCache::Table table) {
    return SCTableReference(
        reinterpret_cast<Address>(&first_entry(table)->key));
  }

  SCTableReference map_reference(StubCache::Table table) {
    return SCTableReference(
        reinterpret_cast<Address>(&first_entry(table)->map));
  }

  SCTableReference value_reference(StubCache::Table table) {
    return SCTableReference(
        reinterpret_cast<Address>(&first_entry(table)->value));
  }

  StubCache::Entry* first_entry(StubCache::Table table) {
    switch (table) {
      case StubCache::kPrimary:
        return StubCache::primary_;
      case StubCache::kSecondary:
        return StubCache::secondary_;
    }
    UNREACHABLE();
    return NULL;
  }

  Isolate* isolate() { return isolate_; }
88
  Code::Kind ic_kind() const { return ic_kind_; }
89 90 91 92 93 94

  // Setting the entry size such that the index is shifted by Name::kHashShift
  // is convenient; shifting down the length field (to extract the hash code)
  // automatically discards the hash bit field.
  static const int kCacheIndexShift = Name::kHashShift;

95 96 97 98 99
  static const int kPrimaryTableBits = 11;
  static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
  static const int kSecondaryTableBits = 9;
  static const int kSecondaryTableSize = (1 << kSecondaryTableBits);

100 101 102 103 104 105
  // Some magic number used in primary and secondary hash computations.
  static const int kPrimaryMagic = 0x3d532433;
  static const int kSecondaryMagic = 0xb16b00b5;

  static int PrimaryOffsetForTesting(Name* name, Map* map) {
    return PrimaryOffset(name, map);
106 107
  }

108 109
  static int SecondaryOffsetForTesting(Name* name, int seed) {
    return SecondaryOffset(name, seed);
110 111 112
  }

  // The constructor is made public only for the purposes of testing.
113
  StubCache(Isolate* isolate, Code::Kind ic_kind);
114

115
 private:
116 117 118 119 120 121 122 123 124 125
  // The stub cache has a primary and secondary level.  The two levels have
  // different hashing algorithms in order to avoid simultaneous collisions
  // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
  // update strategy on updates is fairly clear and simple:  Any existing entry
  // in the primary cache is moved to the secondary cache, and secondary cache
  // entries are overwritten.

  // Hash algorithm for the primary table.  This algorithm is replicated in
  // assembler for every architecture.  Returns an index into the table that
  // is scaled by 1 << kCacheIndexShift.
126
  static int PrimaryOffset(Name* name, Map* map) {
127 128 129 130 131 132 133 134 135
    STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
    // Compute the hash of the name (use entire hash field).
    DCHECK(name->HasHashCode());
    uint32_t field = name->hash_field();
    // Using only the low bits in 64-bit mode is unlikely to increase the
    // risk of collision even if the heap is spread over an area larger than
    // 4Gb (and not at all if it isn't).
    uint32_t map_low32bits =
        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
136 137
    // Base the offset on a simple combination of name and map.
    uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
138 139 140 141 142 143
    return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
  }

  // Hash algorithm for the secondary table.  This algorithm is replicated in
  // assembler for every architecture.  Returns an index into the table that
  // is scaled by 1 << kCacheIndexShift.
144
  static int SecondaryOffset(Name* name, int seed) {
145 146 147
    // Use the seed from the primary cache in the secondary cache.
    uint32_t name_low32bits =
        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
148
    uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
149 150 151 152 153 154 155 156 157 158 159 160 161 162
    return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
  }

  // Compute the entry for a given offset in exactly the same way as
  // we do in generated code.  We generate an hash code that already
  // ends in Name::kHashShift 0s.  Then we multiply it so it is a multiple
  // of sizeof(Entry).  This makes it easier to avoid making mistakes
  // in the hashed offset computations.
  static Entry* entry(Entry* table, int offset) {
    const int multiplier = sizeof(*table) >> Name::kHashShift;
    return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
                                    offset * multiplier);
  }

163
 private:
164 165 166
  Entry primary_[kPrimaryTableSize];
  Entry secondary_[kSecondaryTableSize];
  Isolate* isolate_;
167
  Code::Kind ic_kind_;
168 169 170 171 172 173

  friend class Isolate;
  friend class SCTableReference;

  DISALLOW_COPY_AND_ASSIGN(StubCache);
};
174 175
}  // namespace internal
}  // namespace v8
176 177

#endif  // V8_STUB_CACHE_H_