Commit f1ffe311 authored by ishell's avatar ishell Committed by Commit bot

[stubs] Introducing LoadICTFStub and LoadICTrampolineTFStub and a switch to...

[stubs] Introducing LoadICTFStub and LoadICTrampolineTFStub and a switch to enable them instead of respective platform stubs.

The stubs do not increase respective counters as they are in the snapshot and --native-code-counters is off during snapshot creation anyway.

Review-Url: https://codereview.chromium.org/2031753003
Cr-Commit-Position: refs/heads/master@{#36754}
parent 2ecd866d
......@@ -13,6 +13,10 @@ namespace internal {
// static
Callable CodeFactory::LoadIC(Isolate* isolate, TypeofMode typeof_mode) {
if (FLAG_tf_load_ic_stub) {
LoadICTrampolineTFStub stub(isolate, LoadICState(typeof_mode));
return Callable(stub.GetCode(), LoadDescriptor(isolate));
}
LoadICTrampolineStub stub(isolate, LoadICState(typeof_mode));
return Callable(stub.GetCode(), LoadDescriptor(isolate));
}
......
......@@ -4,6 +4,9 @@
#include "src/code-stub-assembler.h"
#include "src/code-factory.h"
#include "src/frames-inl.h"
#include "src/frames.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
......@@ -470,6 +473,17 @@ Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
compiler::Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
compiler::Node* CodeStubAssembler::LoadFromParentFrame(int offset,
MachineType rep) {
Node* frame_pointer = LoadParentFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
MachineType rep) {
return Load(rep, buffer, IntPtrConstant(offset));
......@@ -556,6 +570,10 @@ Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
return LoadObjectField(object, JSValue::kValueOffset);
}
Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell) {
return LoadObjectField(weak_cell, WeakCell::kValueOffset);
}
Node* CodeStubAssembler::AllocateUninitializedFixedArray(Node* length) {
Node* header_size = IntPtrConstant(FixedArray::kHeaderSize);
Node* data_size = WordShl(length, IntPtrConstant(kPointerSizeLog2));
......@@ -1472,16 +1490,13 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
}
template <typename Dictionary>
void CodeStubAssembler::NameDictionaryLookup(
Node* dictionary, Node* unique_name, Label* if_found_, Variable* var_entry,
Label* if_not_found, int inlined_probes) {
void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Node* unique_name, Label* if_found,
Variable* var_entry,
Label* if_not_found,
int inlined_probes) {
DCHECK_EQ(MachineRepresentation::kWord32, var_entry->rep());
// TODO(ishell): Remove this trampoline block once crbug/615621 is fixed.
// This trampoline block is currently necessary here to generate a correct
// phi for |var_entry|.
Label if_found(this, var_entry);
const int kElementsStartOffset =
Dictionary::kElementsStartIndex * kPointerSize;
......@@ -1500,7 +1515,7 @@ void CodeStubAssembler::NameDictionaryLookup(
Node* current =
LoadFixedArrayElement(dictionary, index, kElementsStartOffset);
var_entry->Bind(entry);
GotoIf(WordEqual(current, unique_name), &if_found);
GotoIf(WordEqual(current, unique_name), if_found);
// See Dictionary::NextProbe().
count = Int32Constant(i + 1);
......@@ -1525,7 +1540,7 @@ void CodeStubAssembler::NameDictionaryLookup(
Node* current =
LoadFixedArrayElement(dictionary, index, kElementsStartOffset);
GotoIf(WordEqual(current, undefined), if_not_found);
GotoIf(WordEqual(current, unique_name), &if_found);
GotoIf(WordEqual(current, unique_name), if_found);
// See Dictionary::NextProbe().
count = Int32Add(count, Int32Constant(1));
......@@ -1535,8 +1550,6 @@ void CodeStubAssembler::NameDictionaryLookup(
var_entry->Bind(entry);
Goto(&loop);
}
Bind(&if_found);
Goto(if_found_);
}
// Instantiate template methods to workaround GCC compilation issue.
......@@ -1983,5 +1996,277 @@ compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
: WordShr(index_node, IntPtrConstant(-element_size_shift)));
}
compiler::Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
Node* function =
LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset);
Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
return LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
}
compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
Variable var_receiver_map(this, MachineRepresentation::kTagged);
// TODO(ishell): defer blocks when it works.
Label load_smi_map(this /*, Label::kDeferred*/), load_receiver_map(this),
if_result(this);
Branch(WordIsSmi(receiver), &load_smi_map, &load_receiver_map);
Bind(&load_smi_map);
{
var_receiver_map.Bind(LoadRoot(Heap::kHeapNumberMapRootIndex));
Goto(&if_result);
}
Bind(&load_receiver_map);
{
var_receiver_map.Bind(LoadMap(receiver));
Goto(&if_result);
}
Bind(&if_result);
return var_receiver_map.value();
}
compiler::Node* CodeStubAssembler::TryMonomorphicCase(
const LoadICParameters* p, compiler::Node* receiver_map, Label* if_handler,
Variable* var_handler, Label* if_miss) {
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
// TODO(ishell): add helper class that hides offset computations for a series
// of loads.
int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(p->slot, FAST_HOLEY_ELEMENTS,
SMI_PARAMETERS, header_size);
Node* feedback = Load(MachineType::AnyTagged(), p->vector, offset);
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
GotoUnless(WordEqual(receiver_map, LoadWeakCellValue(feedback)), if_miss);
Node* handler = Load(MachineType::AnyTagged(), p->vector,
IntPtrAdd(offset, IntPtrConstant(kPointerSize)));
var_handler->Bind(handler);
Goto(if_handler);
return feedback;
}
void CodeStubAssembler::HandlePolymorphicCase(
const LoadICParameters* p, compiler::Node* receiver_map,
compiler::Node* feedback, Label* if_handler, Variable* var_handler,
Label* if_miss, int unroll_count) {
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
// Iterate {feedback} array.
const int kEntrySize = 2;
for (int i = 0; i < unroll_count; i++) {
Label next_entry(this);
Node* cached_map = LoadWeakCellValue(
LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize)));
GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
// Found, now call handler.
Node* handler =
LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize + 1));
var_handler->Bind(handler);
Goto(if_handler);
Bind(&next_entry);
}
Node* length = SmiToWord32(LoadFixedArrayBaseLength(feedback));
// Loop from {unroll_count}*kEntrySize to {length}.
Variable var_index(this, MachineRepresentation::kWord32);
Label loop(this, &var_index);
var_index.Bind(Int32Constant(unroll_count * kEntrySize));
Goto(&loop);
Bind(&loop);
{
Node* index = var_index.value();
GotoIf(Int32GreaterThanOrEqual(index, length), if_miss);
Node* cached_map =
LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
Label next_entry(this);
GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
// Found, now call handler.
Node* handler = LoadFixedArrayElement(feedback, index, kPointerSize);
var_handler->Bind(handler);
Goto(if_handler);
Bind(&next_entry);
var_index.Bind(Int32Add(index, Int32Constant(kEntrySize)));
Goto(&loop);
}
}
compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
Code::Flags flags,
compiler::Node* map) {
// See v8::internal::StubCache::PrimaryOffset().
STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
Node* hash_field = LoadNameHashField(name);
Assert(WordEqual(
Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
Int32Constant(0)));
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
Node* hash = Int32Add(hash_field, map);
// We always set the in_loop bit to zero when generating the lookup code
// so do it here too so the hash codes match.
uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
// Base the offset on a simple combination of name, flags, and map.
hash = Word32Xor(hash, Int32Constant(iflags));
uint32_t mask = (StubCache::kPrimaryTableSize - 1)
<< StubCache::kCacheIndexShift;
return Word32And(hash, Int32Constant(mask));
}
compiler::Node* CodeStubAssembler::StubCacheSecondaryOffset(
compiler::Node* name, Code::Flags flags, compiler::Node* seed) {
// See v8::internal::StubCache::SecondaryOffset().
// Use the seed from the primary cache in the secondary cache.
Node* hash = Int32Sub(seed, name);
// We always set the in_loop bit to zero when generating the lookup code
// so do it here too so the hash codes match.
uint32_t iflags =
(static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
hash = Int32Add(hash, Int32Constant(iflags));
int32_t mask = (StubCache::kSecondaryTableSize - 1)
<< StubCache::kCacheIndexShift;
return Word32And(hash, Int32Constant(mask));
}
enum CodeStubAssembler::StubCacheTable : int {
kPrimary = static_cast<int>(StubCache::kPrimary),
kSecondary = static_cast<int>(StubCache::kSecondary)
};
void CodeStubAssembler::TryProbeStubCacheTable(
StubCache* stub_cache, StubCacheTable table_id,
compiler::Node* entry_offset, compiler::Node* name, Code::Flags flags,
compiler::Node* map, Label* if_handler, Variable* var_handler,
Label* if_miss) {
StubCache::Table table = static_cast<StubCache::Table>(table_id);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
Goto(if_miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
Goto(if_miss);
}
#endif
// The {table_offset} holds the entry offset times four (due to masking
// and shifting optimizations).
const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
entry_offset = Int32Mul(entry_offset, Int32Constant(kMultiplier));
// Check that the key in the entry matches the name.
Node* key_base =
ExternalConstant(ExternalReference(stub_cache->key_reference(table)));
Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
GotoIf(WordNotEqual(name, entry_key), if_miss);
// Get the map entry from the cache.
DCHECK_EQ(kPointerSize * 2, stub_cache->map_reference(table).address() -
stub_cache->key_reference(table).address());
Node* entry_map =
Load(MachineType::Pointer(), key_base,
Int32Add(entry_offset, Int32Constant(kPointerSize * 2)));
GotoIf(WordNotEqual(map, entry_map), if_miss);
// Check that the flags match what we're looking for.
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
stub_cache->key_reference(table).address());
Node* code = Load(MachineType::Pointer(), key_base,
Int32Add(entry_offset, Int32Constant(kPointerSize)));
Node* code_flags =
LoadObjectField(code, Code::kFlagsOffset, MachineType::Uint32());
GotoIf(Word32NotEqual(Int32Constant(flags),
Word32And(code_flags,
Int32Constant(~Code::kFlagsNotUsedInLookup))),
if_miss);
// We found the handler.
var_handler->Bind(code);
Goto(if_handler);
}
void CodeStubAssembler::TryProbeStubCache(
StubCache* stub_cache, Code::Flags flags, compiler::Node* receiver,
compiler::Node* name, Label* if_handler, Variable* var_handler,
Label* if_miss) {
Label try_secondary(this);
// Check that the {receiver} isn't a smi.
GotoIf(WordIsSmi(receiver), if_miss);
Node* receiver_map = LoadMap(receiver);
// Probe the primary table.
Node* primary_offset = StubCachePrimaryOffset(name, flags, receiver_map);
TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name, flags,
receiver_map, if_handler, var_handler, &try_secondary);
Bind(&try_secondary);
{
// Probe the secondary table.
Node* secondary_offset =
StubCacheSecondaryOffset(name, flags, primary_offset);
TryProbeStubCacheTable(stub_cache, kSecondary, secondary_offset, name,
flags, receiver_map, if_handler, var_handler,
if_miss);
}
}
void CodeStubAssembler::LoadIC(const LoadICParameters* p, Label* if_miss) {
Variable var_handler(this, MachineRepresentation::kTagged);
// TODO(ishell): defer blocks when it works.
Label if_handler(this, &var_handler), try_polymorphic(this),
try_megamorphic(this /*, Label::kDeferred*/);
Node* receiver_map = LoadReceiverMap(p->receiver);
// Check monomorphic case.
Node* feedback = TryMonomorphicCase(p, receiver_map, &if_handler,
&var_handler, &try_polymorphic);
Bind(&if_handler);
{
LoadWithVectorDescriptor descriptor(isolate());
TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
p->name, p->slot, p->vector);
}
Bind(&try_polymorphic);
{
// Check polymorphic case.
GotoUnless(
WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
&try_megamorphic);
HandlePolymorphicCase(p, receiver_map, feedback, &if_handler, &var_handler,
if_miss, 2);
}
Bind(&try_megamorphic);
{
// Check megamorphic case.
GotoUnless(
WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
if_miss);
Code::Flags code_flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
TryProbeStubCache(isolate()->stub_cache(), code_flags, p->receiver, p->name,
&if_handler, &var_handler, if_miss);
}
}
} // namespace internal
} // namespace v8
......@@ -12,6 +12,8 @@ namespace v8 {
namespace internal {
class CallInterfaceDescriptor;
class StatsCounter;
class StubCache;
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
......@@ -102,6 +104,13 @@ class CodeStubAssembler : public compiler::CodeAssembler {
BranchIfFloat64Equal(value, value, if_false, if_true);
}
// Load value from current frame by given offset in bytes.
compiler::Node* LoadFromFrame(int offset,
MachineType rep = MachineType::AnyTagged());
// Load value from current parent frame by given offset in bytes.
compiler::Node* LoadFromParentFrame(
int offset, MachineType rep = MachineType::AnyTagged());
// Load an object pointer from a buffer that isn't in the heap.
compiler::Node* LoadBufferObject(compiler::Node* buffer, int offset,
MachineType rep = MachineType::AnyTagged());
......@@ -146,6 +155,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadStringLength(compiler::Node* object);
// Load value field of a JSValue object.
compiler::Node* LoadJSValueValue(compiler::Node* object);
// Load value field of a WeakCell object.
compiler::Node* LoadWeakCellValue(compiler::Node* weak_cell);
compiler::Node* AllocateUninitializedFixedArray(compiler::Node* length);
......@@ -280,6 +291,65 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* callable,
compiler::Node* object);
// LoadIC helpers.
struct LoadICParameters {
LoadICParameters(compiler::Node* context, compiler::Node* receiver,
compiler::Node* name, compiler::Node* slot,
compiler::Node* vector)
: context(context),
receiver(receiver),
name(name),
slot(slot),
vector(vector) {}
compiler::Node* context;
compiler::Node* receiver;
compiler::Node* name;
compiler::Node* slot;
compiler::Node* vector;
};
// Load type feedback vector from the stub caller's frame.
compiler::Node* LoadTypeFeedbackVectorForStub();
compiler::Node* LoadReceiverMap(compiler::Node* receiver);
// Checks monomorphic case. Returns {feedback} entry of the vector.
compiler::Node* TryMonomorphicCase(const LoadICParameters* p,
compiler::Node* receiver_map,
Label* if_handler, Variable* var_handler,
Label* if_miss);
void HandlePolymorphicCase(const LoadICParameters* p,
compiler::Node* receiver_map,
compiler::Node* feedback, Label* if_handler,
Variable* var_handler, Label* if_miss,
int unroll_count);
compiler::Node* StubCachePrimaryOffset(compiler::Node* name,
Code::Flags flags,
compiler::Node* map);
compiler::Node* StubCacheSecondaryOffset(compiler::Node* name,
Code::Flags flags,
compiler::Node* seed);
// This enum is used here as a replacement for StubCache::Table to avoid
// including stub cache header.
enum StubCacheTable : int;
void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
compiler::Node* entry_offset,
compiler::Node* name, Code::Flags flags,
compiler::Node* map, Label* if_handler,
Variable* var_handler, Label* if_miss);
void TryProbeStubCache(StubCache* stub_cache, Code::Flags flags,
compiler::Node* receiver, compiler::Node* name,
Label* if_handler, Variable* var_handler,
Label* if_miss);
void LoadIC(const LoadICParameters* p, Label* if_miss);
private:
compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
ElementsKind kind, ParameterMode mode,
......@@ -299,5 +369,4 @@ class CodeStubAssembler : public compiler::CodeAssembler {
} // namespace internal
} // namespace v8
#endif // V8_CODE_STUB_ASSEMBLER_H_
......@@ -443,7 +443,6 @@ void CompareICStub::Generate(MacroAssembler* masm) {
}
}
Handle<Code> TurboFanCodeStub::GenerateCode() {
const char* name = CodeStub::MajorName(MajorKey());
Zone zone(isolate()->allocator());
......@@ -454,6 +453,45 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
return assembler.GenerateCode();
}
void LoadICTrampolineTFStub::GenerateAssembly(
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
Node* receiver = assembler->Parameter(0);
Node* name = assembler->Parameter(1);
Node* slot = assembler->Parameter(2);
Node* context = assembler->Parameter(3);
Node* vector = assembler->LoadTypeFeedbackVectorForStub();
CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
Label miss(assembler);
assembler->LoadIC(&p, &miss);
assembler->Bind(&miss);
assembler->TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
slot, vector);
}
void LoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
Node* receiver = assembler->Parameter(0);
Node* name = assembler->Parameter(1);
Node* slot = assembler->Parameter(2);
Node* vector = assembler->Parameter(3);
Node* context = assembler->Parameter(4);
CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
Label miss(assembler);
assembler->LoadIC(&p, &miss);
assembler->Bind(&miss);
assembler->TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
slot, vector);
}
void AllocateHeapNumberStub::GenerateAssembly(
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
......
......@@ -139,6 +139,8 @@ namespace internal {
V(ToInteger) \
V(ToLength) \
V(HasProperty) \
V(LoadICTrampolineTF) \
V(LoadICTF) \
/* IC Handler stubs */ \
V(ArrayBufferViewLoadField) \
V(KeyedLoadSloppyArguments) \
......@@ -2384,6 +2386,31 @@ class LoadICTrampolineStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(LoadICTrampoline, PlatformCodeStub);
};
class LoadICTrampolineTFStub : public TurboFanCodeStub {
public:
LoadICTrampolineTFStub(Isolate* isolate, const LoadICState& state)
: TurboFanCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
void GenerateAssembly(CodeStubAssembler* assembler) const override;
Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
protected:
LoadICState state() const {
return LoadICState(static_cast<ExtraICState>(minor_key_));
}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_CODE_STUB(LoadICTrampolineTF, TurboFanCodeStub);
};
class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
public:
......@@ -2481,6 +2508,24 @@ class LoadICStub : public PlatformCodeStub {
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
class LoadICTFStub : public TurboFanCodeStub {
public:
explicit LoadICTFStub(Isolate* isolate, const LoadICState& state)
: TurboFanCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
void GenerateAssembly(CodeStubAssembler* assembler) const override;
Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_CODE_STUB(LoadICTF, TurboFanCodeStub);
};
class KeyedLoadICStub : public PlatformCodeStub {
public:
......
......@@ -200,6 +200,10 @@ StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
case Code::BYTECODE_HANDLER:
case Code::HANDLER:
case Code::BUILTIN:
case Code::LOAD_IC:
case Code::KEYED_LOAD_IC:
case Code::STORE_IC:
case Code::KEYED_STORE_IC:
return StackFrame::STUB;
case Code::WASM_FUNCTION:
return StackFrame::WASM;
......
......@@ -529,6 +529,25 @@ Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
return raw_assembler_->TailCallN(call_descriptor, target, args);
}
Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4,
size_t result_size) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node** args = zone()->NewArray<Node*>(5);
args[0] = arg1;
args[1] = arg2;
args[2] = arg3;
args[3] = arg4;
args[4] = context;
return raw_assembler_->TailCallN(call_descriptor, target, args);
}
Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& interface_descriptor,
Node* code_target_address, Node** args) {
......
......@@ -323,6 +323,9 @@ class CodeAssembler {
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3,
size_t result_size = 1);
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3,
Node* arg4, size_t result_size = 1);
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
Node* code_target_address, Node** args);
......
......@@ -764,6 +764,7 @@ DEFINE_BOOL(use_idle_notification, true,
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
DEFINE_BOOL(tf_load_ic_stub, false, "use TF LoadIC stub")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
......
......@@ -807,6 +807,9 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
Handle<Code> LoadIC::initialize_stub_in_optimized_code(
Isolate* isolate, ExtraICState extra_state) {
if (FLAG_tf_load_ic_stub) {
return LoadICTFStub(isolate, LoadICState(extra_state)).GetCode();
}
return LoadICStub(isolate, LoadICState(extra_state)).GetCode();
}
......@@ -2245,7 +2248,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
DCHECK(args.length() == 4);
DCHECK_EQ(4, args.length());
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
......
......@@ -75,12 +75,14 @@ Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) {
flags = CommonStubCacheChecks(name, map, flags);
int primary_offset = PrimaryOffset(name, flags, map);
Entry* primary = entry(primary_, primary_offset);
if (primary->key == name && primary->map == map) {
if (primary->key == name && primary->map == map &&
flags == Code::RemoveHolderFromFlags(primary->value->flags())) {
return primary->value;
}
int secondary_offset = SecondaryOffset(name, flags, primary_offset);
Entry* secondary = entry(secondary_, secondary_offset);
if (secondary->key == name && secondary->map == map) {
if (secondary->key == name && secondary->map == map &&
flags == Code::RemoveHolderFromFlags(secondary->value->flags())) {
return secondary->value;
}
return NULL;
......
......@@ -92,9 +92,24 @@ class StubCache {
// automatically discards the hash bit field.
static const int kCacheIndexShift = Name::kHashShift;
private:
static const int kPrimaryTableBits = 11;
static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
static const int kSecondaryTableBits = 9;
static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
static int PrimaryOffsetForTesting(Name* name, Code::Flags flags, Map* map) {
return PrimaryOffset(name, flags, map);
}
static int SecondaryOffsetForTesting(Name* name, Code::Flags flags,
int seed) {
return SecondaryOffset(name, flags, seed);
}
// The constructor is made public only for the purposes of testing.
explicit StubCache(Isolate* isolate);
private:
// The stub cache has a primary and secondary level. The two levels have
// different hashing algorithms in order to avoid simultaneous collisions
// in both caches. Unlike a probing strategy (quadratic or otherwise) the
......@@ -150,11 +165,6 @@ class StubCache {
offset * multiplier);
}
static const int kPrimaryTableBits = 11;
static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
static const int kSecondaryTableBits = 9;
static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
private:
Entry primary_[kPrimaryTableSize];
Entry secondary_[kSecondaryTableSize];
......
......@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/base/utils/random-number-generator.h"
#include "src/ic/stub-cache.h"
#include "src/interface-descriptors.h"
#include "src/isolate.h"
#include "test/cctest/compiler/function-tester.h"
......@@ -41,6 +42,17 @@ class CodeStubAssemblerTester : private ZoneHolder, public CodeStubAssembler {
Code::ComputeFlags(Code::FUNCTION), "test"),
scope_(isolate) {}
// This constructor is intended to be used for creating code objects with
// specific flags.
CodeStubAssemblerTester(Isolate* isolate, Code::Flags flags)
: ZoneHolder(isolate),
CodeStubAssembler(isolate, ZoneHolder::zone(), 0, flags, "test"),
scope_(isolate) {}
Handle<Code> GenerateCodeCloseAndEscape() {
return scope_.CloseAndEscape(GenerateCode());
}
private:
HandleScope scope_;
LocalContext context_;
......@@ -1119,5 +1131,275 @@ TEST(TestOutOfScopeVariable) {
CHECK(!m.GenerateCode().is_null());
}
namespace {
void TestStubCacheOffsetCalculation(StubCache::Table table,
Code::Kind handler_kind) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int param_count = 2;
CodeStubAssemblerTester m(isolate, param_count);
Code::Flags code_flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(handler_kind));
{
Node* name = m.Parameter(0);
Node* map = m.Parameter(1);
Node* primary_offset = m.StubCachePrimaryOffset(name, code_flags, map);
Node* result;
if (table == StubCache::kPrimary) {
result = primary_offset;
} else {
CHECK_EQ(StubCache::kSecondary, table);
result = m.StubCacheSecondaryOffset(name, code_flags, primary_offset);
}
m.Return(m.SmiFromWord32(result));
}
Handle<Code> code = m.GenerateCode();
FunctionTester ft(code, param_count);
Factory* factory = isolate->factory();
Handle<Name> names[] = {
factory->NewSymbol(),
factory->InternalizeUtf8String("a"),
factory->InternalizeUtf8String("bb"),
factory->InternalizeUtf8String("ccc"),
factory->NewPrivateSymbol(),
factory->InternalizeUtf8String("dddd"),
factory->InternalizeUtf8String("eeeee"),
factory->InternalizeUtf8String("name"),
factory->NewSymbol(),
factory->NewPrivateSymbol(),
};
Handle<Map> maps[] = {
Handle<Map>(nullptr, isolate),
factory->cell_map(),
Map::Create(isolate, 0),
factory->meta_map(),
factory->code_map(),
Map::Create(isolate, 0),
factory->hash_table_map(),
factory->symbol_map(),
factory->string_map(),
Map::Create(isolate, 0),
factory->sloppy_arguments_elements_map(),
};
for (int name_index = 0; name_index < arraysize(names); name_index++) {
Handle<Name> name = names[name_index];
for (int map_index = 0; map_index < arraysize(maps); map_index++) {
Handle<Map> map = maps[map_index];
int expected_result;
{
int primary_offset =
StubCache::PrimaryOffsetForTesting(*name, code_flags, *map);
if (table == StubCache::kPrimary) {
expected_result = primary_offset;
} else {
expected_result = StubCache::SecondaryOffsetForTesting(
*name, code_flags, primary_offset);
}
}
Handle<Object> result = ft.Call(name, map).ToHandleChecked();
Smi* expected = Smi::FromInt(expected_result & Smi::kMaxValue);
CHECK_EQ(expected, Smi::cast(*result));
}
}
}
} // namespace
TEST(StubCachePrimaryOffsetLoadIC) {
TestStubCacheOffsetCalculation(StubCache::kPrimary, Code::LOAD_IC);
}
TEST(StubCachePrimaryOffsetStoreIC) {
TestStubCacheOffsetCalculation(StubCache::kPrimary, Code::STORE_IC);
}
TEST(StubCacheSecondaryOffsetLoadIC) {
TestStubCacheOffsetCalculation(StubCache::kSecondary, Code::LOAD_IC);
}
TEST(StubCacheSecondaryOffsetStoreIC) {
TestStubCacheOffsetCalculation(StubCache::kSecondary, Code::STORE_IC);
}
namespace {
Handle<Code> CreateCodeWithFlags(Code::Flags flags) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeStubAssemblerTester m(isolate, flags);
m.Return(m.UndefinedConstant());
return m.GenerateCodeCloseAndEscape();
}
} // namespace
TEST(TryProbeStubCache) {
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
Isolate* isolate(CcTest::InitIsolateOnce());
const int param_count = 3;
CodeStubAssemblerTester m(isolate, param_count);
Code::Flags flags_to_query =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
StubCache stub_cache(isolate);
stub_cache.Clear();
{
Node* receiver = m.Parameter(0);
Node* name = m.Parameter(1);
Node* expected_handler = m.Parameter(2);
Label passed(&m), failed(&m);
Variable var_handler(&m, MachineRepresentation::kTagged);
Label if_handler(&m), if_miss(&m);
m.TryProbeStubCache(&stub_cache, flags_to_query, receiver, name,
&if_handler, &var_handler, &if_miss);
m.Bind(&if_handler);
m.BranchIfWordEqual(expected_handler, var_handler.value(), &passed,
&failed);
m.Bind(&if_miss);
m.BranchIfWordEqual(expected_handler, m.IntPtrConstant(0), &passed,
&failed);
m.Bind(&passed);
m.Return(m.BooleanConstant(true));
m.Bind(&failed);
m.Return(m.BooleanConstant(false));
}
Handle<Code> code = m.GenerateCode();
FunctionTester ft(code, param_count);
std::vector<Handle<Name>> names;
std::vector<Handle<JSObject>> receivers;
std::vector<Handle<Code>> handlers;
base::RandomNumberGenerator rand_gen(FLAG_random_seed);
Factory* factory = isolate->factory();
// Generate some number of names.
for (int i = 0; i < StubCache::kPrimaryTableSize / 7; i++) {
Handle<Name> name;
switch (rand_gen.NextInt(3)) {
case 0: {
// Generate string.
std::stringstream ss;
ss << "s" << std::hex
<< (rand_gen.NextInt(Smi::kMaxValue) % StubCache::kPrimaryTableSize);
name = factory->InternalizeUtf8String(ss.str().c_str());
break;
}
case 1: {
// Generate number string.
std::stringstream ss;
ss << (rand_gen.NextInt(Smi::kMaxValue) % StubCache::kPrimaryTableSize);
name = factory->InternalizeUtf8String(ss.str().c_str());
break;
}
case 2: {
// Generate symbol.
name = factory->NewSymbol();
break;
}
default:
UNREACHABLE();
}
names.push_back(name);
}
// Generate some number of receiver maps and receivers.
for (int i = 0; i < StubCache::kSecondaryTableSize / 2; i++) {
Handle<Map> map = Map::Create(isolate, 0);
receivers.push_back(factory->NewJSObjectFromMap(map));
}
// Generate some number of handlers.
for (int i = 0; i < StubCache::kSecondaryTableSize; i++) {
Code::Kind code_kind;
switch (rand_gen.NextInt(4)) {
case 0:
code_kind = Code::LOAD_IC;
break;
case 1:
code_kind = Code::KEYED_LOAD_IC;
break;
case 2:
code_kind = Code::STORE_IC;
break;
case 3:
code_kind = Code::KEYED_STORE_IC;
break;
default:
UNREACHABLE();
}
Code::Flags flags =
Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(code_kind));
handlers.push_back(CreateCodeWithFlags(flags));
}
// Ensure that GC does happen because from now on we are going to fill our
// own stub cache instance with raw values.
DisallowHeapAllocation no_gc;
// Populate {stub_cache}.
const int N = StubCache::kPrimaryTableSize + StubCache::kSecondaryTableSize;
for (int i = 0; i < N; i++) {
int index = rand_gen.NextInt();
Handle<Name> name = names[index % names.size()];
Handle<JSObject> receiver = receivers[index % receivers.size()];
Handle<Code> handler = handlers[index % handlers.size()];
stub_cache.Set(*name, receiver->map(), *handler);
}
// Perform some queries.
bool queried_existing = false;
bool queried_non_existing = false;
for (int i = 0; i < N; i++) {
int index = rand_gen.NextInt();
Handle<Name> name = names[index % names.size()];
Handle<JSObject> receiver = receivers[index % receivers.size()];
Code* handler = stub_cache.Get(*name, receiver->map(), flags_to_query);
if (handler == nullptr) {
queried_non_existing = true;
} else {
queried_existing = true;
}
Handle<Code> expected_handler(handler, isolate);
ft.CheckTrue(receiver, name, expected_handler);
}
for (int i = 0; i < N; i++) {
int index1 = rand_gen.NextInt();
int index2 = rand_gen.NextInt();
Handle<Name> name = names[index1 % names.size()];
Handle<JSObject> receiver = receivers[index2 % receivers.size()];
Code* handler = stub_cache.Get(*name, receiver->map(), flags_to_query);
if (handler == nullptr) {
queried_non_existing = true;
} else {
queried_existing = true;
}
Handle<Code> expected_handler(handler, isolate);
ft.CheckTrue(receiver, name, expected_handler);
}
// Ensure we performed both kind of queries.
CHECK(queried_existing && queried_non_existing);
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment