Commit 38cb7830 authored by Jacob.Bramley@arm.com's avatar Jacob.Bramley@arm.com

Unravel kHeapObjectTagSize from the stub cache.

The stub cache used kHeapObjectTagSize to scale indices, but there
doesn't appear to be a direct need for this. Instead, the stub cache has
its own kCacheIndexShift quantity.

BUG=
R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/401613003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22466 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent de8cac44
......@@ -202,10 +202,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
uint32_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps.
__ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
__ mov(scratch, Operand(scratch, LSR, kCacheIndexShift));
// Mask down the eor argument to the minimum to keep the immediate
// ARM-encodable.
__ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
__ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
// Prefer and_ to ubfx here because ubfx takes 2 cycles.
__ and_(scratch, scratch, Operand(mask));
......@@ -222,9 +222,9 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
__ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
uint32_t mask2 = kSecondaryTableSize - 1;
__ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
__ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
__ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table.
......
......@@ -177,7 +177,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ Add(scratch, scratch, extra);
__ Eor(scratch, scratch, flags);
// We shift out the last two bits because they are not part of the hash.
__ Ubfx(scratch, scratch, kHeapObjectTagSize,
__ Ubfx(scratch, scratch, kCacheIndexShift,
CountTrailingZeros(kPrimaryTableSize, 64));
// Probe the primary table.
......@@ -185,8 +185,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary table.
__ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
__ Add(scratch, scratch, flags >> kHeapObjectTagSize);
__ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
__ Add(scratch, scratch, flags >> kCacheIndexShift);
__ And(scratch, scratch, kSecondaryTableSize - 1);
// Probe the secondary table.
......
......@@ -205,10 +205,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ xor_(offset, flags);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
// ProbeTable expects the offset to be pointer scaled, which it is, because
// the heap object tag size is 2 and the pointer size log 2 is also 2.
ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
ASSERT(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
......@@ -217,10 +217,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name);
__ add(offset, Immediate(flags));
__ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
ProbeTable(
......
......@@ -196,8 +196,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
uint32_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps.
__ srl(scratch, scratch, kHeapObjectTagSize);
__ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
__ srl(scratch, scratch, kCacheIndexShift);
__ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
......@@ -213,10 +213,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
extra3);
// Primary miss: Compute hash for secondary probe.
__ srl(at, name, kHeapObjectTagSize);
__ srl(at, name, kCacheIndexShift);
__ Subu(scratch, scratch, at);
uint32_t mask2 = kSecondaryTableSize - 1;
__ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
__ Addu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
......
......@@ -197,8 +197,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
uint64_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps.
__ dsrl(scratch, scratch, kHeapObjectTagSize);
__ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
__ dsrl(scratch, scratch, kCacheIndexShift);
__ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
......@@ -214,10 +214,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
extra3);
// Primary miss: Compute hash for secondary probe.
__ dsrl(at, name, kHeapObjectTagSize);
__ dsrl(at, name, kCacheIndexShift);
__ Dsubu(scratch, scratch, at);
uint64_t mask2 = kSecondaryTableSize - 1;
__ Daddu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
__ Daddu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
......
......@@ -183,6 +183,11 @@ class StubCache {
static const int kInterceptorArgsHolderIndex = 3;
static const int kInterceptorArgsLength = 4;
// Setting the entry size such that the index is shifted by Name::kHashShift
// is convenient; shifting down the length field (to extract the hash code)
// automatically discards the hash bit field.
static const int kCacheIndexShift = Name::kHashShift;
private:
explicit StubCache(Isolate* isolate);
......@@ -195,13 +200,9 @@ class StubCache {
// Hash algorithm for the primary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kHeapObjectTagSize.
// is scaled by 1 << kCacheIndexShift.
static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
// This works well because the heap object tag size and the hash
// shift are equal. Shifting down the length field to get the
// hash code would effectively throw away two bits of the hash
// code.
STATIC_ASSERT(kHeapObjectTagSize == Name::kHashShift);
STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
ASSERT(name->HasHashCode());
uint32_t field = name->hash_field();
......@@ -216,12 +217,12 @@ class StubCache {
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
// Base the offset on a simple combination of name, flags, and map.
uint32_t key = (map_low32bits + field) ^ iflags;
return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
}
// Hash algorithm for the secondary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kHeapObjectTagSize.
// is scaled by 1 << kCacheIndexShift.
static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
// Use the seed from the primary cache in the secondary cache.
uint32_t name_low32bits =
......@@ -231,7 +232,7 @@ class StubCache {
uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
uint32_t key = (seed - name_low32bits) + iflags;
return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
}
// Compute the entry for a given offset in exactly the same way as
......
......@@ -24,13 +24,13 @@ static void ProbeTable(Isolate* isolate,
Register receiver,
Register name,
// The offset is scaled by 4, based on
// kHeapObjectTagSize, which is two bits
// kCacheIndexShift, which is two bits
Register offset) {
// We need to scale up the pointer by 2 when the offset is scaled by less
// than the pointer size.
ASSERT(kPointerSize == kInt64Size
? kPointerSizeLog2 == kHeapObjectTagSize + 1
: kPointerSizeLog2 == kHeapObjectTagSize);
? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
: kPointerSizeLog2 == StubCache::kCacheIndexShift);
ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
......@@ -175,7 +175,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ xorp(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
......@@ -184,10 +184,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xorp(scratch, Immediate(flags));
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
__ subl(scratch, name);
__ addl(scratch, Immediate(flags));
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
......
......@@ -205,10 +205,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ xor_(offset, flags);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
// ProbeTable expects the offset to be pointer scaled, which it is, because
// the heap object tag size is 2 and the pointer size log 2 is also 2.
ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
ASSERT(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
......@@ -217,10 +217,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name);
__ add(offset, Immediate(flags));
__ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
ProbeTable(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment