Commit 8aeb7439 authored by ishell's avatar ishell Committed by Commit bot

[ic] [stubs] Don't use Code::flags in megamorphic stub cache hash computations.

BUG=chromium:618701

Review-Url: https://codereview.chromium.org/2167493003
Cr-Commit-Position: refs/heads/master@{#37946}
parent d9eb3358
......@@ -2864,7 +2864,6 @@ void CodeStubAssembler::HandlePolymorphicCase(
}
compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
Code::Flags flags,
compiler::Node* map) {
// See v8::internal::StubCache::PrimaryOffset().
STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
......@@ -2878,28 +2877,20 @@ compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
Node* hash = Int32Add(hash_field, map);
// We always set the in_loop bit to zero when generating the lookup code
// so do it here too so the hash codes match.
uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
// Base the offset on a simple combination of name, flags, and map.
hash = Word32Xor(hash, Int32Constant(iflags));
// Base the offset on a simple combination of name and map.
hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
uint32_t mask = (StubCache::kPrimaryTableSize - 1)
<< StubCache::kCacheIndexShift;
return Word32And(hash, Int32Constant(mask));
}
compiler::Node* CodeStubAssembler::StubCacheSecondaryOffset(
compiler::Node* name, Code::Flags flags, compiler::Node* seed) {
compiler::Node* name, compiler::Node* seed) {
// See v8::internal::StubCache::SecondaryOffset().
// Use the seed from the primary cache in the secondary cache.
Node* hash = Int32Sub(seed, name);
// We always set the in_loop bit to zero when generating the lookup code
// so do it here too so the hash codes match.
uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
hash = Int32Add(hash, Int32Constant(iflags));
hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
int32_t mask = (StubCache::kSecondaryTableSize - 1)
<< StubCache::kCacheIndexShift;
return Word32And(hash, Int32Constant(mask));
......@@ -2912,9 +2903,8 @@ enum CodeStubAssembler::StubCacheTable : int {
void CodeStubAssembler::TryProbeStubCacheTable(
StubCache* stub_cache, StubCacheTable table_id,
compiler::Node* entry_offset, compiler::Node* name, Code::Flags flags,
compiler::Node* map, Label* if_handler, Variable* var_handler,
Label* if_miss) {
compiler::Node* entry_offset, compiler::Node* name, compiler::Node* map,
Label* if_handler, Variable* var_handler, Label* if_miss) {
StubCache::Table table = static_cast<StubCache::Table>(table_id);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
......@@ -2944,19 +2934,11 @@ void CodeStubAssembler::TryProbeStubCacheTable(
Int32Add(entry_offset, Int32Constant(kPointerSize * 2)));
GotoIf(WordNotEqual(map, entry_map), if_miss);
// Check that the flags match what we're looking for.
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
stub_cache->key_reference(table).address());
Node* code = Load(MachineType::Pointer(), key_base,
Int32Add(entry_offset, Int32Constant(kPointerSize)));
Node* code_flags =
LoadObjectField(code, Code::kFlagsOffset, MachineType::Uint32());
GotoIf(Word32NotEqual(Int32Constant(flags),
Word32And(code_flags,
Int32Constant(~Code::kFlagsNotUsedInLookup))),
if_miss);
// We found the handler.
var_handler->Bind(code);
Goto(if_handler);
......@@ -2965,9 +2947,6 @@ void CodeStubAssembler::TryProbeStubCacheTable(
void CodeStubAssembler::TryProbeStubCache(
StubCache* stub_cache, compiler::Node* receiver, compiler::Node* name,
Label* if_handler, Variable* var_handler, Label* if_miss) {
Code::Flags flags = Code::RemoveHolderFromFlags(
Code::ComputeHandlerFlags(stub_cache->ic_kind()));
Label try_secondary(this), miss(this);
Counters* counters = isolate()->counters();
......@@ -2979,17 +2958,16 @@ void CodeStubAssembler::TryProbeStubCache(
Node* receiver_map = LoadMap(receiver);
// Probe the primary table.
Node* primary_offset = StubCachePrimaryOffset(name, flags, receiver_map);
TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name, flags,
Node* primary_offset = StubCachePrimaryOffset(name, receiver_map);
TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name,
receiver_map, if_handler, var_handler, &try_secondary);
Bind(&try_secondary);
{
// Probe the secondary table.
Node* secondary_offset =
StubCacheSecondaryOffset(name, flags, primary_offset);
Node* secondary_offset = StubCacheSecondaryOffset(name, primary_offset);
TryProbeStubCacheTable(stub_cache, kSecondary, secondary_offset, name,
flags, receiver_map, if_handler, var_handler, &miss);
receiver_map, if_handler, var_handler, &miss);
}
Bind(&miss);
......
......@@ -450,11 +450,9 @@ class CodeStubAssembler : public compiler::CodeAssembler {
int unroll_count);
compiler::Node* StubCachePrimaryOffset(compiler::Node* name,
Code::Flags flags,
compiler::Node* map);
compiler::Node* StubCacheSecondaryOffset(compiler::Node* name,
Code::Flags flags,
compiler::Node* seed);
// This enum is used here as a replacement for StubCache::Table to avoid
......@@ -463,9 +461,9 @@ class CodeStubAssembler : public compiler::CodeAssembler {
void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
compiler::Node* entry_offset,
compiler::Node* name, Code::Flags flags,
compiler::Node* map, Label* if_handler,
Variable* var_handler, Label* if_miss);
compiler::Node* name, compiler::Node* map,
Label* if_handler, Variable* var_handler,
Label* if_miss);
void TryProbeStubCache(StubCache* stub_cache, compiler::Node* receiver,
compiler::Node* name, Label* if_handler,
......
......@@ -1386,6 +1386,7 @@ class FunctionPrototypeStub : public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
// TODO(mvstanton): only the receiver register is accessed. When this is
// translated to a hydrogen code stub, a new CallInterfaceDescriptor
......@@ -1404,6 +1405,7 @@ class LoadIndexedStringStub : public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::KEYED_LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_PLATFORM_CODE_STUB(LoadIndexedString, PlatformCodeStub);
......@@ -2693,6 +2695,7 @@ class StoreFastElementStub : public HydrogenCodeStub {
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
private:
class ElementsKindBits : public BitField<ElementsKind, 3, 8> {};
......@@ -2890,6 +2893,7 @@ class StoreElementStub : public PlatformCodeStub {
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
private:
ElementsKind elements_kind() const {
......@@ -2994,6 +2998,7 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override;
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
private:
class FromBits : public BitField<ElementsKind, 3, 8> {};
......
......@@ -15,8 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
Code::Flags flags, StubCache::Table table,
Register receiver, Register name,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
......@@ -63,18 +62,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
scratch2 = no_reg;
__ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
Register flags_reg = base_addr;
base_addr = no_reg;
__ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
// It's a nice optimization if this constant is encodable in the bic insn.
uint32_t mask = Code::kFlagsNotUsedInLookup;
DCHECK(__ ImmediateFitsAddrMode1Instruction(mask));
__ bic(flags_reg, flags_reg, Operand(mask));
__ cmp(flags_reg, Operand(flags));
__ b(ne, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
......@@ -93,9 +80,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_));
Label miss;
// Make sure that code is valid. The multiplying code relies on the
......@@ -140,23 +124,23 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
__ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
__ eor(scratch, scratch, Operand(flags));
__ eor(scratch, scratch, Operand(kPrimaryMagic));
__ mov(ip, Operand(kPrimaryTableSize - 1));
__ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
// Probe the primary table.
ProbeTable(this, masm, flags, kPrimary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name));
__ add(scratch, scratch, Operand(flags));
__ add(scratch, scratch, Operand(kSecondaryMagic));
__ mov(ip, Operand(kSecondaryTableSize - 1));
__ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
// Probe the secondary table.
ProbeTable(this, masm, flags, kSecondary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -23,8 +23,7 @@ namespace internal {
//
// 'receiver', 'name' and 'offset' registers are preserved on miss.
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
Code::Flags flags, StubCache::Table table,
Register receiver, Register name,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
......@@ -69,12 +68,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
// Get the code entry from the cache.
__ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
__ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
__ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
__ Cmp(scratch2.W(), flags);
__ B(ne, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ B(&miss);
......@@ -94,9 +87,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_));
Label miss;
// Make sure that there are no register conflicts.
......@@ -136,23 +126,23 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
__ Ldr(scratch.W(), FieldMemOperand(name, Name::kHashFieldOffset));
__ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Add(scratch, scratch, extra);
__ Eor(scratch, scratch, flags);
__ Eor(scratch, scratch, kPrimaryMagic);
__ And(scratch, scratch,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
ProbeTable(this, masm, flags, kPrimary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
extra3);
// Primary miss: Compute hash for secondary table.
__ Sub(scratch, scratch, Operand(name));
__ Add(scratch, scratch, Operand(flags));
__ Add(scratch, scratch, Operand(kSecondaryMagic));
__ And(scratch, scratch,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
ProbeTable(this, masm, flags, kSecondary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -15,7 +15,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register name, Register receiver,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
......@@ -27,6 +26,7 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
Code::Kind ic_kind = stub_cache->ic_kind();
bool is_vector_store =
IC::ICUseVector(ic_kind) &&
(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
......@@ -47,12 +47,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
__ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
......@@ -102,12 +96,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
__ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
......@@ -145,9 +133,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_));
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
......@@ -178,7 +163,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
// Get the map of the receiver and compute the hash.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ xor_(offset, kPrimaryMagic);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
......@@ -187,21 +172,19 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
ProbeTable(this, masm, ic_kind_, flags, kPrimary, name, receiver, offset,
extra);
ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ xor_(offset, kPrimaryMagic);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name);
__ add(offset, Immediate(flags));
__ add(offset, Immediate(kSecondaryMagic));
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
ProbeTable(this, masm, ic_kind_, flags, kSecondary, name, receiver, offset,
extra);
ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -1043,8 +1043,7 @@ Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
// cache (which just missed) is different from the cached handler.
if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
Code* megamorphic_cached_code =
stub_cache()->Get(*lookup->name(), map, code->flags());
Code* megamorphic_cached_code = stub_cache()->Get(*lookup->name(), map);
if (megamorphic_cached_code != *code) {
TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
return code;
......
......@@ -15,8 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
Code::Flags flags, StubCache::Table table,
Register receiver, Register name,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
......@@ -62,13 +61,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
scratch2 = no_reg;
__ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
Register flags_reg = base_addr;
base_addr = no_reg;
__ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
__ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
__ Branch(&miss, ne, flags_reg, Operand(flags));
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
......@@ -88,9 +80,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_));
Label miss;
// Make sure that code is valid. The multiplying code relies on the
......@@ -135,23 +124,23 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
__ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Addu(scratch, scratch, at);
__ Xor(scratch, scratch, Operand(flags));
__ Xor(scratch, scratch, Operand(kPrimaryMagic));
__ And(scratch, scratch,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
ProbeTable(this, masm, flags, kPrimary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
extra3);
// Primary miss: Compute hash for secondary probe.
__ Subu(scratch, scratch, name);
__ Addu(scratch, scratch, Operand(flags));
__ Addu(scratch, scratch, Operand(kSecondaryMagic));
__ And(scratch, scratch,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
ProbeTable(this, masm, flags, kSecondary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -15,8 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
Code::Flags flags, StubCache::Table table,
Register receiver, Register name,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
......@@ -65,13 +64,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
__ ld(code, MemOperand(base_addr,
static_cast<int32_t>(value_off_addr - key_off_addr)));
// Check that the flags match what we're looking for.
Register flags_reg = base_addr;
base_addr = no_reg;
__ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
__ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
__ Branch(&miss, ne, flags_reg, Operand(flags));
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
......@@ -91,9 +83,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_));
Label miss;
// Make sure that code is valid. The multiplying code relies on the
......@@ -139,23 +128,23 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
__ lwu(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Addu(scratch, scratch, at);
__ Xor(scratch, scratch, Operand(flags));
__ Xor(scratch, scratch, Operand(kPrimaryMagic));
__ And(scratch, scratch,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
ProbeTable(this, masm, flags, kPrimary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
extra3);
// Primary miss: Compute hash for secondary probe.
__ Subu(scratch, scratch, name);
__ Addu(scratch, scratch, flags);
__ Addu(scratch, scratch, kSecondaryMagic);
__ And(scratch, scratch,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
ProbeTable(this, masm, flags, kSecondary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -15,8 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
Code::Flags flags, StubCache::Table table,
Register receiver, Register name,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
......@@ -73,18 +72,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
scratch2 = no_reg;
__ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
Register flags_reg = base_addr;
base_addr = no_reg;
__ lwz(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
DCHECK(!r0.is(flags_reg));
__ li(r0, Operand(Code::kFlagsNotUsedInLookup));
__ andc(flags_reg, flags_reg, r0);
__ mov(r0, Operand(flags));
__ cmpl(flags_reg, r0);
__ bne(&miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ b(&miss);
......@@ -105,9 +92,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_));
Label miss;
#if V8_TARGET_ARCH_PPC64
......@@ -158,24 +142,24 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
__ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, ip);
__ xori(scratch, scratch, Operand(flags));
__ xori(scratch, scratch, Operand(kPrimaryMagic));
// The mask omits the last two bits because they are not part of the hash.
__ andi(scratch, scratch,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
ProbeTable(this, masm, flags, kPrimary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, name);
__ addi(scratch, scratch, Operand(flags));
__ addi(scratch, scratch, Operand(kSecondaryMagic));
__ andi(scratch, scratch,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
ProbeTable(this, masm, flags, kSecondary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -15,8 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
Code::Flags flags, StubCache::Table table,
Register receiver, Register name,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
......@@ -71,16 +70,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
scratch2 = no_reg;
__ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
Register flags_reg = base_addr;
base_addr = no_reg;
__ LoadlW(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
DCHECK(!r0.is(flags_reg));
__ AndP(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
__ CmpLogicalP(flags_reg, Operand(flags));
__ bne(&miss, Label::kNear);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ b(&miss, Label::kNear);
......@@ -101,9 +90,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_));
Label miss;
#if V8_TARGET_ARCH_S390X
......@@ -154,24 +140,24 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
__ LoadlW(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ AddP(scratch, scratch, ip);
__ XorP(scratch, scratch, Operand(flags));
__ XorP(scratch, scratch, Operand(kPrimaryMagic));
// The mask omits the last two bits because they are not part of the hash.
__ AndP(scratch, scratch,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
ProbeTable(this, masm, flags, kPrimary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
extra3);
// Primary miss: Compute hash for secondary probe.
__ SubP(scratch, scratch, name);
__ AddP(scratch, scratch, Operand(flags));
__ AddP(scratch, scratch, Operand(kSecondaryMagic));
__ AndP(scratch, scratch,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
ProbeTable(this, masm, flags, kSecondary, receiver, name, scratch, extra,
extra2, extra3);
ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -19,33 +19,34 @@ void StubCache::Initialize() {
Clear();
}
#ifdef DEBUG
namespace {
static Code::Flags CommonStubCacheChecks(Name* name, Map* map,
Code::Flags flags) {
flags = Code::RemoveHolderFromFlags(flags);
bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
Code* code) {
// Validate that the name does not move on scavenge, and that we
// can use identity checks instead of structural equality checks.
DCHECK(!name->GetHeap()->InNewSpace(name));
DCHECK(name->IsUniqueName());
// The state bits are not important to the hash function because the stub
// cache only contains handlers. Make sure that the bits are the least
// significant so they will be the ones masked out.
DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags));
// Make sure that the cache holder are not included in the hash.
DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0);
return flags;
DCHECK(name->HasHashCode());
if (code) {
Code::Flags expected_flags = Code::RemoveHolderFromFlags(
Code::ComputeHandlerFlags(stub_cache->ic_kind()));
Code::Flags flags = Code::RemoveHolderFromFlags(code->flags());
DCHECK_EQ(expected_flags, flags);
DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
}
return true;
}
} // namespace
#endif
Code* StubCache::Set(Name* name, Map* map, Code* code) {
Code::Flags flags = CommonStubCacheChecks(name, map, code->flags());
DCHECK(CommonStubCacheChecks(this, name, map, code));
// Compute the primary entry.
int primary_offset = PrimaryOffset(name, flags, map);
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
Code* old_code = primary->value;
......@@ -53,9 +54,8 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) {
// secondary cache before overwriting it.
if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
Map* old_map = primary->map;
Code::Flags old_flags = Code::RemoveHolderFromFlags(old_code->flags());
int seed = PrimaryOffset(primary->key, old_flags, old_map);
int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
int seed = PrimaryOffset(primary->key, old_map);
int secondary_offset = SecondaryOffset(primary->key, seed);
Entry* secondary = entry(secondary_, secondary_offset);
*secondary = *primary;
}
......@@ -68,19 +68,16 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) {
return code;
}
Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) {
flags = CommonStubCacheChecks(name, map, flags);
int primary_offset = PrimaryOffset(name, flags, map);
Code* StubCache::Get(Name* name, Map* map) {
DCHECK(CommonStubCacheChecks(this, name, map, nullptr));
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
if (primary->key == name && primary->map == map &&
flags == Code::RemoveHolderFromFlags(primary->value->flags())) {
if (primary->key == name && primary->map == map) {
return primary->value;
}
int secondary_offset = SecondaryOffset(name, flags, primary_offset);
int secondary_offset = SecondaryOffset(name, primary_offset);
Entry* secondary = entry(secondary_, secondary_offset);
if (secondary->key == name && secondary->map == map &&
flags == Code::RemoveHolderFromFlags(secondary->value->flags())) {
if (secondary->key == name && secondary->map == map) {
return secondary->value;
}
return NULL;
......@@ -103,7 +100,6 @@ void StubCache::Clear() {
void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
Code::Flags flags,
Handle<Context> native_context,
Zone* zone) {
for (int i = 0; i < kPrimaryTableSize; i++) {
......@@ -113,7 +109,7 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
// with a primitive receiver.
if (map == NULL) continue;
int offset = PrimaryOffset(*name, flags, map);
int offset = PrimaryOffset(*name, map);
if (entry(primary_, offset) == &primary_[i] &&
TypeFeedbackOracle::IsRelevantFeedback(map, *native_context)) {
types->AddMapIfMissing(Handle<Map>(map), zone);
......@@ -129,10 +125,10 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
if (map == NULL) continue;
// Lookup in primary table and skip duplicates.
int primary_offset = PrimaryOffset(*name, flags, map);
int primary_offset = PrimaryOffset(*name, map);
// Lookup in secondary table and add matches.
int offset = SecondaryOffset(*name, flags, primary_offset);
int offset = SecondaryOffset(*name, primary_offset);
if (entry(secondary_, offset) == &secondary_[i] &&
TypeFeedbackOracle::IsRelevantFeedback(map, *native_context)) {
types->AddMapIfMissing(Handle<Map>(map), zone);
......
......@@ -41,13 +41,12 @@ class StubCache {
void Initialize();
// Access cache for entry hash(name, map).
Code* Set(Name* name, Map* map, Code* code);
Code* Get(Name* name, Map* map, Code::Flags flags);
Code* Get(Name* name, Map* map);
// Clear the lookup table (@ mark compact collection).
void Clear();
// Collect all maps that match the name and flags.
// Collect all maps that match the name.
void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
Code::Flags flags, Handle<Context> native_context,
Zone* zone);
Handle<Context> native_context, Zone* zone);
// Generate code for probing the stub cache table.
// Arguments extra, extra2 and extra3 may be used to pass additional scratch
// registers. Set to no_reg if not needed.
......@@ -97,13 +96,16 @@ class StubCache {
static const int kSecondaryTableBits = 9;
static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
static int PrimaryOffsetForTesting(Name* name, Code::Flags flags, Map* map) {
return PrimaryOffset(name, flags, map);
// Some magic number used in primary and secondary hash computations.
static const int kPrimaryMagic = 0x3d532433;
static const int kSecondaryMagic = 0xb16b00b5;
static int PrimaryOffsetForTesting(Name* name, Map* map) {
return PrimaryOffset(name, map);
}
static int SecondaryOffsetForTesting(Name* name, Code::Flags flags,
int seed) {
return SecondaryOffset(name, flags, seed);
static int SecondaryOffsetForTesting(Name* name, int seed) {
return SecondaryOffset(name, seed);
}
// The constructor is made public only for the purposes of testing.
......@@ -120,7 +122,7 @@ class StubCache {
// Hash algorithm for the primary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
static int PrimaryOffset(Name* name, Map* map) {
STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
DCHECK(name->HasHashCode());
......@@ -130,27 +132,19 @@ class StubCache {
// 4Gb (and not at all if it isn't).
uint32_t map_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
// We always set the in_loop bit to zero when generating the lookup code
// so do it here too so the hash codes match.
uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
// Base the offset on a simple combination of name, flags, and map.
uint32_t key = (map_low32bits + field) ^ iflags;
// Base the offset on a simple combination of name and map.
uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
}
// Hash algorithm for the secondary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
static int SecondaryOffset(Name* name, int seed) {
// Use the seed from the primary cache in the secondary cache.
uint32_t name_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
// We always set the in_loop bit to zero when generating the lookup code
// so do it here too so the hash codes match.
uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
uint32_t key = (seed - name_low32bits) + iflags;
uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
}
......
......@@ -15,8 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
Code::Flags flags, StubCache::Table table,
Register receiver, Register name,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
Register offset) {
......@@ -57,12 +56,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
__ LoadAddress(kScratchRegister, value_offset);
__ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0));
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
__ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
......@@ -81,9 +74,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_));
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
USE(extra2); // The register extra2 is not used on the X64 platform.
......@@ -129,25 +119,25 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xorp(scratch, Immediate(flags));
__ xorp(scratch, Immediate(kPrimaryMagic));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
ProbeTable(this, masm, flags, kPrimary, receiver, name, scratch);
ProbeTable(this, masm, kPrimary, receiver, name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xorp(scratch, Immediate(flags));
__ xorp(scratch, Immediate(kPrimaryMagic));
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
__ subl(scratch, name);
__ addl(scratch, Immediate(flags));
__ addl(scratch, Immediate(kSecondaryMagic));
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
ProbeTable(this, masm, flags, kSecondary, receiver, name, scratch);
ProbeTable(this, masm, kSecondary, receiver, name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -15,7 +15,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register name, Register receiver,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
......@@ -27,6 +26,7 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
Code::Kind ic_kind = stub_cache->ic_kind();
bool is_vector_store =
IC::ICUseVector(ic_kind) &&
(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
......@@ -47,12 +47,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
__ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
......@@ -102,12 +96,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
__ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
......@@ -145,9 +133,6 @@ static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_));
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
......@@ -178,7 +163,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
// Get the map of the receiver and compute the hash.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ xor_(offset, kPrimaryMagic);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
......@@ -187,21 +172,19 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
ProbeTable(this, masm, ic_kind_, flags, kPrimary, name, receiver, offset,
extra);
ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ xor_(offset, kPrimaryMagic);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name);
__ add(offset, Immediate(flags));
__ add(offset, Immediate(kSecondaryMagic));
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
ProbeTable(this, masm, ic_kind_, flags, kSecondary, name, receiver, offset,
extra);
ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -280,8 +280,7 @@ void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorSlot slot,
receiver_types->Clear();
if (!slot.IsInvalid()) {
LoadICNexus nexus(feedback_vector_, slot);
Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
CollectReceiverTypes(isolate()->load_stub_cache(), &nexus, name, flags,
CollectReceiverTypes(isolate()->load_stub_cache(), &nexus, name,
receiver_types);
}
}
......@@ -307,8 +306,7 @@ void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
CollectReceiverTypes(isolate()->store_stub_cache(), slot, name, flags,
CollectReceiverTypes(isolate()->store_stub_cache(), slot, name,
receiver_types);
}
......@@ -331,22 +329,19 @@ void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorSlot slot,
void TypeFeedbackOracle::CollectReceiverTypes(StubCache* stub_cache,
FeedbackVectorSlot slot,
Handle<Name> name,
Code::Flags flags,
SmallMapList* types) {
StoreICNexus nexus(feedback_vector_, slot);
CollectReceiverTypes(stub_cache, &nexus, name, flags, types);
CollectReceiverTypes(stub_cache, &nexus, name, types);
}
void TypeFeedbackOracle::CollectReceiverTypes(StubCache* stub_cache,
FeedbackNexus* nexus,
Handle<Name> name,
Code::Flags flags,
SmallMapList* types) {
if (FLAG_collect_megamorphic_maps_from_stub_cache &&
nexus->ic_state() == MEGAMORPHIC) {
types->Reserve(4, zone());
stub_cache->CollectMatchingMaps(types, name, flags, native_context_,
zone());
stub_cache->CollectMatchingMaps(types, name, native_context_, zone());
} else {
CollectReceiverTypes(nexus, types);
}
......
......@@ -97,11 +97,9 @@ class TypeFeedbackOracle: public ZoneObject {
private:
void CollectReceiverTypes(StubCache* stub_cache, FeedbackVectorSlot slot,
Handle<Name> name, Code::Flags flags,
SmallMapList* types);
Handle<Name> name, SmallMapList* types);
void CollectReceiverTypes(StubCache* stub_cache, FeedbackNexus* nexus,
Handle<Name> name, Code::Flags flags,
SmallMapList* types);
Handle<Name> name, SmallMapList* types);
// Returns true if there is at least one string map and if
// all maps are string maps.
......
......@@ -1103,24 +1103,21 @@ TEST(TestOutOfScopeVariable) {
namespace {
void TestStubCacheOffsetCalculation(StubCache::Table table,
Code::Kind handler_kind) {
void TestStubCacheOffsetCalculation(StubCache::Table table) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 2;
CodeStubAssemblerTester m(isolate, kNumParams);
Code::Flags code_flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(handler_kind));
{
Node* name = m.Parameter(0);
Node* map = m.Parameter(1);
Node* primary_offset = m.StubCachePrimaryOffset(name, code_flags, map);
Node* primary_offset = m.StubCachePrimaryOffset(name, map);
Node* result;
if (table == StubCache::kPrimary) {
result = primary_offset;
} else {
CHECK_EQ(StubCache::kSecondary, table);
result = m.StubCacheSecondaryOffset(name, code_flags, primary_offset);
result = m.StubCacheSecondaryOffset(name, primary_offset);
}
m.Return(m.SmiFromWord32(result));
}
......@@ -1163,13 +1160,12 @@ void TestStubCacheOffsetCalculation(StubCache::Table table,
int expected_result;
{
int primary_offset =
StubCache::PrimaryOffsetForTesting(*name, code_flags, *map);
int primary_offset = StubCache::PrimaryOffsetForTesting(*name, *map);
if (table == StubCache::kPrimary) {
expected_result = primary_offset;
} else {
expected_result = StubCache::SecondaryOffsetForTesting(
*name, code_flags, primary_offset);
expected_result =
StubCache::SecondaryOffsetForTesting(*name, primary_offset);
}
}
Handle<Object> result = ft.Call(name, map).ToHandleChecked();
......@@ -1182,20 +1178,12 @@ void TestStubCacheOffsetCalculation(StubCache::Table table,
} // namespace
TEST(StubCachePrimaryOffsetLoadIC) {
TestStubCacheOffsetCalculation(StubCache::kPrimary, Code::LOAD_IC);
TEST(StubCachePrimaryOffset) {
TestStubCacheOffsetCalculation(StubCache::kPrimary);
}
TEST(StubCachePrimaryOffsetStoreIC) {
TestStubCacheOffsetCalculation(StubCache::kPrimary, Code::STORE_IC);
}
TEST(StubCacheSecondaryOffsetLoadIC) {
TestStubCacheOffsetCalculation(StubCache::kSecondary, Code::LOAD_IC);
}
TEST(StubCacheSecondaryOffsetStoreIC) {
TestStubCacheOffsetCalculation(StubCache::kSecondary, Code::STORE_IC);
TEST(StubCacheSecondaryOffset) {
TestStubCacheOffsetCalculation(StubCache::kSecondary);
}
namespace {
......@@ -1217,9 +1205,6 @@ TEST(TryProbeStubCache) {
CodeStubAssemblerTester m(isolate, kNumParams);
Code::Kind ic_kind = Code::LOAD_IC;
Code::Flags flags_to_query =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind));
StubCache stub_cache(isolate, ic_kind);
stub_cache.Clear();
......@@ -1299,25 +1284,8 @@ TEST(TryProbeStubCache) {
// Generate some number of handlers.
for (int i = 0; i < 30; i++) {
Code::Kind code_kind;
switch (rand_gen.NextInt(4)) {
case 0:
code_kind = Code::LOAD_IC;
break;
case 1:
code_kind = Code::KEYED_LOAD_IC;
break;
case 2:
code_kind = Code::STORE_IC;
break;
case 3:
code_kind = Code::KEYED_STORE_IC;
break;
default:
UNREACHABLE();
}
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(code_kind));
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind));
handlers.push_back(CreateCodeWithFlags(flags));
}
......@@ -1342,7 +1310,7 @@ TEST(TryProbeStubCache) {
int index = rand_gen.NextInt();
Handle<Name> name = names[index % names.size()];
Handle<JSObject> receiver = receivers[index % receivers.size()];
Code* handler = stub_cache.Get(*name, receiver->map(), flags_to_query);
Code* handler = stub_cache.Get(*name, receiver->map());
if (handler == nullptr) {
queried_non_existing = true;
} else {
......@@ -1358,7 +1326,7 @@ TEST(TryProbeStubCache) {
int index2 = rand_gen.NextInt();
Handle<Name> name = names[index1 % names.size()];
Handle<JSObject> receiver = receivers[index2 % receivers.size()];
Code* handler = stub_cache.Get(*name, receiver->map(), flags_to_query);
Code* handler = stub_cache.Get(*name, receiver->map());
if (handler == nullptr) {
queried_non_existing = true;
} else {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment