Commit 263131cf authored by oth's avatar oth Committed by Commit bot

[interpreter] Update ForInPrepare to conditionally use runtime.

Copies the behaviour of FullCode in attempting to get the state for
ForInPrepare inline and falling back to the runtime if necessary.

BUG=v8:4280
LOG=N

Review-Url: https://codereview.chromium.org/2155153002
Cr-Commit-Position: refs/heads/master@{#37874}
parent a3f598fc
...@@ -3137,5 +3137,72 @@ void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) { ...@@ -3137,5 +3137,72 @@ void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
} }
} }
Node* CodeStubAssembler::EnumLength(Node* map) {
Node* bitfield_3 = LoadMapBitField3(map);
Node* enum_length = BitFieldDecode<Map::EnumLengthBits>(bitfield_3);
return SmiTag(enum_length);
}
void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
Label* use_runtime) {
Variable current_js_object(this, MachineRepresentation::kTagged);
current_js_object.Bind(receiver);
Variable current_map(this, MachineRepresentation::kTagged);
current_map.Bind(LoadMap(current_js_object.value()));
// These variables are updated in the loop below.
Variable* loop_vars[2] = {&current_js_object, &current_map};
Label loop(this, 2, loop_vars), next(this);
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
{
Node* invalid_enum_cache_sentinel =
SmiConstant(Smi::FromInt(kInvalidEnumCacheSentinel));
Node* enum_length = EnumLength(current_map.value());
BranchIfWordEqual(enum_length, invalid_enum_cache_sentinel, use_runtime,
&loop);
}
// Check that there are no elements. |current_js_object| contains
// the current JS object we've reached through the prototype chain.
Bind(&loop);
{
Label if_elements(this), if_no_elements(this);
Node* elements = LoadElements(current_js_object.value());
Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
// Check that there are no elements.
BranchIfWordEqual(elements, empty_fixed_array, &if_no_elements,
&if_elements);
Bind(&if_elements);
{
// Second chance, the object may be using the empty slow element
// dictionary.
Node* slow_empty_dictionary =
LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex);
BranchIfWordNotEqual(elements, slow_empty_dictionary, use_runtime,
&if_no_elements);
}
Bind(&if_no_elements);
{
// Update map prototype.
current_js_object.Bind(LoadMapPrototype(current_map.value()));
BranchIfWordEqual(current_js_object.value(), NullConstant(), use_cache,
&next);
}
}
Bind(&next);
{
// For all objects but the receiver, check that the cache is empty.
current_map.Bind(LoadMap(current_js_object.value()));
Node* enum_length = EnumLength(current_map.value());
Node* zero_constant = SmiConstant(Smi::FromInt(0));
BranchIf(WordEqual(enum_length, zero_constant), &loop, use_runtime);
}
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -474,6 +474,15 @@ class CodeStubAssembler : public compiler::CodeAssembler { ...@@ -474,6 +474,15 @@ class CodeStubAssembler : public compiler::CodeAssembler {
void LoadIC(const LoadICParameters* p); void LoadIC(const LoadICParameters* p);
void LoadGlobalIC(const LoadICParameters* p); void LoadGlobalIC(const LoadICParameters* p);
// Get the enumerable length from |map| and return the result as a Smi.
compiler::Node* EnumLength(compiler::Node* map);
// Check the cache validity for |receiver|. Branch to |use_cache| if
// the cache is valid, otherwise branch to |use_runtime|.
void CheckEnumCache(compiler::Node* receiver,
CodeStubAssembler::Label* use_cache,
CodeStubAssembler::Label* use_runtime);
private: private:
compiler::Node* ElementOffsetFromIndex(compiler::Node* index, compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
ElementsKind kind, ParameterMode mode, ElementsKind kind, ParameterMode mode,
......
...@@ -1840,6 +1840,17 @@ void Interpreter::DoDebugger(InterpreterAssembler* assembler) { ...@@ -1840,6 +1840,17 @@ void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK #undef DEBUG_BREAK
void Interpreter::BuildForInPrepareResult(Node* output_register,
Node* cache_type, Node* cache_array,
Node* cache_length,
InterpreterAssembler* assembler) {
__ StoreRegister(cache_type, output_register);
output_register = __ NextRegister(output_register);
__ StoreRegister(cache_array, output_register);
output_register = __ NextRegister(output_register);
__ StoreRegister(cache_length, output_register);
}
// ForInPrepare <cache_info_triple> // ForInPrepare <cache_info_triple>
// //
// Returns state for for..in loop execution based on the object in the // Returns state for for..in loop execution based on the object in the
...@@ -1849,17 +1860,92 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); ...@@ -1849,17 +1860,92 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) { void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
Node* object = __ GetAccumulator(); Node* object = __ GetAccumulator();
Node* context = __ GetContext(); Node* context = __ GetContext();
Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object); Node* const zero_smi = __ SmiConstant(Smi::FromInt(0));
// Set output registers: Label test_if_null(assembler), test_if_undefined(assembler),
// 0 == cache_type, 1 == cache_array, 2 == cache_length nothing_to_iterate(assembler, Label::kDeferred),
Node* output_register = __ BytecodeOperandReg(0); convert_to_receiver(assembler, Label::kDeferred),
for (int i = 0; i < 3; i++) { already_receiver(assembler), check_enum_cache(assembler);
Node* cache_info = __ Projection(i, result_triple);
__ StoreRegister(cache_info, output_register); Variable receiver(assembler, MachineRepresentation::kTagged);
output_register = __ NextRegister(output_register);
// Test if object is already a receiver, no conversion necessary if so.
Node* instance_type = __ LoadInstanceType(object);
Node* first_receiver_type = __ Int32Constant(FIRST_JS_RECEIVER_TYPE);
__ BranchIfInt32GreaterThanOrEqual(instance_type, first_receiver_type,
&already_receiver, &test_if_null);
__ Bind(&test_if_null);
{
__ BranchIfWordEqual(object, assembler->NullConstant(), &nothing_to_iterate,
&test_if_undefined);
}
__ Bind(&test_if_undefined);
{
__ BranchIfWordEqual(object, assembler->UndefinedConstant(),
&nothing_to_iterate, &convert_to_receiver);
}
__ Bind(&convert_to_receiver);
{
Callable callable = CodeFactory::ToObject(assembler->isolate());
Node* target = __ HeapConstant(callable.code());
Node* result = __ CallStub(callable.descriptor(), target, context, object);
receiver.Bind(result);
__ Goto(&check_enum_cache);
}
__ Bind(&already_receiver);
{
receiver.Bind(object);
__ Goto(&check_enum_cache);
}
Label use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
__ Bind(&check_enum_cache);
{ __ CheckEnumCache(receiver.value(), &use_enum_cache, &use_runtime); }
__ Bind(&use_enum_cache);
{
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Node* cache_type = __ LoadMap(receiver.value());
Node* cache_length = __ EnumLength(cache_type);
__ GotoIf(assembler->WordEqual(cache_length, zero_smi),
&nothing_to_iterate);
Node* descriptors = __ LoadMapDescriptors(cache_type);
Node* cache_offset =
__ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
Node* cache_array = __ LoadObjectField(
cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
Node* output_register = __ BytecodeOperandReg(0);
BuildForInPrepareResult(output_register, cache_type, cache_array,
cache_length, assembler);
__ Dispatch();
}
__ Bind(&use_runtime);
{
Node* result_triple =
__ CallRuntime(Runtime::kForInPrepare, context, object);
Node* cache_type = __ Projection(0, result_triple);
Node* cache_array = __ Projection(1, result_triple);
Node* cache_length = __ Projection(2, result_triple);
Node* output_register = __ BytecodeOperandReg(0);
BuildForInPrepareResult(output_register, cache_type, cache_array,
cache_length, assembler);
__ Dispatch();
}
__ Bind(&nothing_to_iterate);
{
// Receiver is null or undefined or descriptors are zero length.
Node* output_register = __ BytecodeOperandReg(0);
BuildForInPrepareResult(output_register, zero_smi, zero_smi, zero_smi,
assembler);
__ Dispatch();
} }
__ Dispatch();
} }
// ForInNext <receiver> <index> <cache_info_pair> // ForInNext <receiver> <index> <cache_info_pair>
...@@ -1882,8 +1968,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) { ...@@ -1882,8 +1968,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
// Check if we can use the for-in fast path potentially using the enum cache. // Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred); Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
Node* condition = __ WordEqual(receiver_map, cache_type); __ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow);
__ BranchIf(condition, &if_fast, &if_slow);
__ Bind(&if_fast); __ Bind(&if_fast);
{ {
// Enum cache in use for {receiver}, the {key} is definitely valid. // Enum cache in use for {receiver}, the {key} is definitely valid.
......
...@@ -143,6 +143,15 @@ class Interpreter { ...@@ -143,6 +143,15 @@ class Interpreter {
compiler::Node* BuildLoadKeyedProperty(Callable ic, compiler::Node* BuildLoadKeyedProperty(Callable ic,
InterpreterAssembler* assembler); InterpreterAssembler* assembler);
// Generates code to prepare the result for ForInPrepare. Cache data
// are placed into the consecutive series of registers starting at
// |output_register|.
void BuildForInPrepareResult(compiler::Node* output_register,
compiler::Node* cache_type,
compiler::Node* cache_array,
compiler::Node* cache_length,
InterpreterAssembler* assembler);
uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const; uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
// Get dispatch table index of bytecode. // Get dispatch table index of bytecode.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment