Commit 0c76b0ae authored by caitp's avatar caitp Committed by Commit bot

Reland [builtins] implement Array.prototype.includes in TurboFan

BUG=v8:5162
R=bmeurer@chromium.org, cbruni@chromium.org

Review-Url: https://codereview.chromium.org/2205883003
Cr-Commit-Position: refs/heads/master@{#38266}
parent 2eb75b62
This diff is collapsed.
......@@ -192,6 +192,8 @@ namespace internal {
CPP(ArrayConcat) \
/* ES6 section 22.1.2.2 Array.isArray */ \
TFJ(ArrayIsArray, 2) \
/* ES7 #sec-array.prototype.includes */ \
TFJ(ArrayIncludes, 3) \
CPP(ArrayPop) \
CPP(ArrayPush) \
CPP(ArrayShift) \
......
......@@ -471,6 +471,216 @@ Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
IntPtrConstant(0));
}
void CodeStubAssembler::BranchIfSameValueZero(Node* a, Node* b, Node* context,
Label* if_true, Label* if_false) {
Node* number_map = HeapNumberMapConstant();
Label a_isnumber(this), a_isnotnumber(this), b_isnumber(this), a_isnan(this),
float_not_equal(this);
// If register A and register B are identical, goto `if_true`
GotoIf(WordEqual(a, b), if_true);
// If either register A or B are Smis, goto `if_false`
GotoIf(Word32Or(WordIsSmi(a), WordIsSmi(b)), if_false);
// GotoIf(WordIsSmi(b), if_false);
Node* a_map = LoadMap(a);
Node* b_map = LoadMap(b);
Branch(WordEqual(a_map, number_map), &a_isnumber, &a_isnotnumber);
// If both register A and B are HeapNumbers, return true if they are equal,
// or if both are NaN
Bind(&a_isnumber);
{
Branch(WordEqual(b_map, number_map), &b_isnumber, if_false);
Bind(&b_isnumber);
Node* a_value = LoadHeapNumberValue(a);
Node* b_value = LoadHeapNumberValue(b);
BranchIfFloat64Equal(a_value, b_value, if_true, &float_not_equal);
Bind(&float_not_equal);
BranchIfFloat64IsNaN(a_value, &a_isnan, if_false);
Bind(&a_isnan);
BranchIfFloat64IsNaN(a_value, if_true, if_false);
}
Bind(&a_isnotnumber);
{
Label a_isstring(this), a_isnotstring(this);
Node* a_instance_type = LoadMapInstanceType(a_map);
Branch(Int32LessThan(a_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
&a_isstring, &a_isnotstring);
Bind(&a_isstring);
{
Label b_isstring(this), b_isnotstring(this);
Node* b_instance_type = LoadInstanceType(b_map);
Branch(
Int32LessThan(b_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
&b_isstring, if_false);
Bind(&b_isstring);
{
Callable callable = CodeFactory::StringEqual(isolate());
Node* result = CallStub(callable, context, a, b);
Branch(WordEqual(BooleanConstant(true), result), if_true, if_false);
}
}
Bind(&a_isnotstring);
{
// Check if {lhs} is a Simd128Value.
Label a_issimd128value(this);
Branch(Word32Equal(a_instance_type, Int32Constant(SIMD128_VALUE_TYPE)),
&a_issimd128value, if_false);
Bind(&a_issimd128value);
{
// Load the map of {rhs}.
BranchIfSimd128Equal(a, a_map, b, b_map, if_true, if_false);
}
}
}
}
void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
Node* rhs, Node* rhs_map,
Label* if_equal,
Label* if_notequal) {
Label if_mapsame(this), if_mapnotsame(this);
Branch(WordEqual(lhs_map, rhs_map), &if_mapsame, &if_mapnotsame);
Bind(&if_mapsame);
{
// Both {lhs} and {rhs} are Simd128Values with the same map, need special
// handling for Float32x4 because of NaN comparisons.
Label if_float32x4(this), if_notfloat32x4(this);
Node* float32x4_map = HeapConstant(factory()->float32x4_map());
Branch(WordEqual(lhs_map, float32x4_map), &if_float32x4, &if_notfloat32x4);
Bind(&if_float32x4);
{
// Both {lhs} and {rhs} are Float32x4, compare the lanes individually
// using a floating point comparison.
for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
offset < Float32x4::kSize - kHeapObjectTag;
offset += sizeof(float)) {
// Load the floating point values for {lhs} and {rhs}.
Node* lhs_value =
Load(MachineType::Float32(), lhs, IntPtrConstant(offset));
Node* rhs_value =
Load(MachineType::Float32(), rhs, IntPtrConstant(offset));
// Perform a floating point comparison.
Label if_valueequal(this), if_valuenotequal(this);
Branch(Float32Equal(lhs_value, rhs_value), &if_valueequal,
&if_valuenotequal);
Bind(&if_valuenotequal);
Goto(if_notequal);
Bind(&if_valueequal);
}
// All 4 lanes match, {lhs} and {rhs} considered equal.
Goto(if_equal);
}
Bind(&if_notfloat32x4);
{
// For other Simd128Values we just perform a bitwise comparison.
for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
offset < Simd128Value::kSize - kHeapObjectTag;
offset += kPointerSize) {
// Load the word values for {lhs} and {rhs}.
Node* lhs_value =
Load(MachineType::Pointer(), lhs, IntPtrConstant(offset));
Node* rhs_value =
Load(MachineType::Pointer(), rhs, IntPtrConstant(offset));
// Perform a bitwise word-comparison.
Label if_valueequal(this), if_valuenotequal(this);
Branch(WordEqual(lhs_value, rhs_value), &if_valueequal,
&if_valuenotequal);
Bind(&if_valuenotequal);
Goto(if_notequal);
Bind(&if_valueequal);
}
// Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
Goto(if_equal);
}
}
Bind(&if_mapnotsame);
Goto(if_notequal);
}
void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
Label* if_true, Label* if_false) {
Node* int32_zero = Int32Constant(0);
Node* int32_one = Int32Constant(1);
Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
Variable last_map(this, MachineRepresentation::kTagged);
Label check_prototype(this);
// Bailout if Smi
GotoIf(WordIsSmi(object), if_false);
Node* map = LoadMap(object);
last_map.Bind(map);
// Bailout if instance type is not JS_ARRAY_TYPE
GotoIf(WordNotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
if_false);
Node* bit_field2 = LoadMapBitField2(map);
Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
// Bailout if slow receiver elements
GotoIf(
Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
if_false);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
// Check prototype chain if receiver does not have packed elements
Node* holey_elements = Word32And(elements_kind, int32_one);
Branch(Word32Equal(holey_elements, int32_zero), if_true, &check_prototype);
Bind(&check_prototype);
{
Label loop_body(this, &last_map);
Goto(&loop_body);
Bind(&loop_body);
Node* current_map = last_map.value();
Node* proto = LoadObjectField(current_map, Map::kPrototypeOffset);
// End loop
GotoIf(WordEqual(proto, NullConstant()), if_true);
// ASSERT: proto->IsHeapObject()
Node* proto_map = LoadMap(proto);
// Bailout if a Proxy, API Object, or JSValue wrapper found in prototype
// Because of this bailout, it's not necessary to check for interceptors or
// access checks on the prototype chain.
GotoIf(Int32LessThanOrEqual(LoadMapInstanceType(proto_map),
Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
if_false);
// Bailout if prototype contains non-empty elements
GotoUnless(WordEqual(LoadElements(proto), empty_elements), if_false);
last_map.Bind(proto_map);
Goto(&loop_body);
}
}
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
......
......@@ -124,6 +124,22 @@ class CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfToBooleanIsTrue(compiler::Node* value, Label* if_true,
Label* if_false);
void BranchIfSimd128Equal(compiler::Node* lhs, compiler::Node* lhs_map,
compiler::Node* rhs, compiler::Node* rhs_map,
Label* if_equal, Label* if_notequal);
void BranchIfSimd128Equal(compiler::Node* lhs, compiler::Node* rhs,
Label* if_equal, Label* if_notequal) {
BranchIfSimd128Equal(lhs, LoadMap(lhs), rhs, LoadMap(rhs), if_equal,
if_notequal);
}
void BranchIfSameValueZero(compiler::Node* a, compiler::Node* b,
compiler::Node* context, Label* if_true,
Label* if_false);
void BranchIfFastJSArray(compiler::Node* object, compiler::Node* context,
Label* if_true, Label* if_false);
// Load value from current frame by given offset in bytes.
compiler::Node* LoadFromFrame(int offset,
MachineType rep = MachineType::AnyTagged());
......
......@@ -2462,78 +2462,8 @@ void GenerateEqual_Simd128Value_HeapObject(
CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
compiler::Node* rhs, compiler::Node* rhs_map,
CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
// Check if {lhs} and {rhs} have the same map.
Label if_mapsame(assembler), if_mapnotsame(assembler);
assembler->Branch(assembler->WordEqual(lhs_map, rhs_map), &if_mapsame,
&if_mapnotsame);
assembler->Bind(&if_mapsame);
{
// Both {lhs} and {rhs} are Simd128Values with the same map, need special
// handling for Float32x4 because of NaN comparisons.
Label if_float32x4(assembler), if_notfloat32x4(assembler);
Node* float32x4_map =
assembler->HeapConstant(assembler->factory()->float32x4_map());
assembler->Branch(assembler->WordEqual(lhs_map, float32x4_map),
&if_float32x4, &if_notfloat32x4);
assembler->Bind(&if_float32x4);
{
// Both {lhs} and {rhs} are Float32x4, compare the lanes individually
// using a floating point comparison.
for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
offset < Float32x4::kSize - kHeapObjectTag;
offset += sizeof(float)) {
// Load the floating point values for {lhs} and {rhs}.
Node* lhs_value = assembler->Load(MachineType::Float32(), lhs,
assembler->IntPtrConstant(offset));
Node* rhs_value = assembler->Load(MachineType::Float32(), rhs,
assembler->IntPtrConstant(offset));
// Perform a floating point comparison.
Label if_valueequal(assembler), if_valuenotequal(assembler);
assembler->Branch(assembler->Float32Equal(lhs_value, rhs_value),
&if_valueequal, &if_valuenotequal);
assembler->Bind(&if_valuenotequal);
assembler->Goto(if_notequal);
assembler->Bind(&if_valueequal);
}
// All 4 lanes match, {lhs} and {rhs} considered equal.
assembler->Goto(if_equal);
}
assembler->Bind(&if_notfloat32x4);
{
// For other Simd128Values we just perform a bitwise comparison.
for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
offset < Simd128Value::kSize - kHeapObjectTag;
offset += kPointerSize) {
// Load the word values for {lhs} and {rhs}.
Node* lhs_value = assembler->Load(MachineType::Pointer(), lhs,
assembler->IntPtrConstant(offset));
Node* rhs_value = assembler->Load(MachineType::Pointer(), rhs,
assembler->IntPtrConstant(offset));
// Perform a bitwise word-comparison.
Label if_valueequal(assembler), if_valuenotequal(assembler);
assembler->Branch(assembler->WordEqual(lhs_value, rhs_value),
&if_valueequal, &if_valuenotequal);
assembler->Bind(&if_valuenotequal);
assembler->Goto(if_notequal);
assembler->Bind(&if_valueequal);
}
// Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
assembler->Goto(if_equal);
}
}
assembler->Bind(&if_mapnotsame);
assembler->Goto(if_notequal);
assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
if_notequal);
}
// ES6 section 7.2.12 Abstract Equality Comparison
......
......@@ -869,7 +869,7 @@ void CodeAssembler::Branch(Node* condition, CodeAssembler::Label* true_label,
}
void CodeAssembler::Switch(Node* index, Label* default_label,
int32_t* case_values, Label** case_labels,
const int32_t* case_values, Label** case_labels,
size_t case_count) {
RawMachineLabel** labels =
new (zone()->New(sizeof(RawMachineLabel*) * case_count))
......
......@@ -246,7 +246,7 @@ class CodeAssembler {
void GotoUnless(Node* condition, Label* false_label);
void Branch(Node* condition, Label* true_label, Label* false_label);
void Switch(Node* index, Label* default_label, int32_t* case_values,
void Switch(Node* index, Label* default_label, const int32_t* case_values,
Label** case_labels, size_t case_count);
Node* Select(Node* condition, Node* true_value, Node* false_value,
......
......@@ -85,9 +85,8 @@ void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
current_block_ = nullptr;
}
void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
int32_t* case_values,
const int32_t* case_values,
RawMachineLabel** case_labels,
size_t case_count) {
DCHECK_NE(schedule()->end(), current_block_);
......
......@@ -754,8 +754,9 @@ class RawMachineAssembler {
void Goto(RawMachineLabel* label);
void Branch(Node* condition, RawMachineLabel* true_val,
RawMachineLabel* false_val);
void Switch(Node* index, RawMachineLabel* default_label, int32_t* case_values,
RawMachineLabel** case_labels, size_t case_count);
void Switch(Node* index, RawMachineLabel* default_label,
const int32_t* case_values, RawMachineLabel** case_labels,
size_t case_count);
void Return(Node* value);
void Return(Node* v1, Node* v2);
void Return(Node* v1, Node* v2, Node* v3);
......
This diff is collapsed.
......@@ -154,6 +154,12 @@ class ElementsAccessor {
virtual uint32_t GetCapacity(JSObject* holder,
FixedArrayBase* backing_store) = 0;
// Check an Object's own elements for an element (using SameValueZero
// semantics)
virtual Maybe<bool> IncludesValue(Isolate* isolate, Handle<JSObject> receiver,
Handle<Object> value, uint32_t start,
uint32_t length) = 0;
protected:
friend class LookupIterator;
......
......@@ -380,6 +380,16 @@ class Factory final {
}
return NewNumber(static_cast<double>(value), pretenure);
}
Handle<Object> NewNumberFromInt64(int64_t value,
PretenureFlag pretenure = NOT_TENURED) {
if (value <= std::numeric_limits<int32_t>::max() &&
value >= std::numeric_limits<int32_t>::min() &&
Smi::IsValid(static_cast<int32_t>(value))) {
return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)),
isolate());
}
return NewNumber(static_cast<double>(value), pretenure);
}
Handle<HeapNumber> NewHeapNumber(double value,
MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
......
......@@ -1492,47 +1492,6 @@ function ArrayFill(value, start, end) {
}
function InnerArrayIncludes(searchElement, fromIndex, array, length) {
if (length === 0) {
return false;
}
var n = TO_INTEGER(fromIndex);
var k;
if (n >= 0) {
k = n;
} else {
k = length + n;
if (k < 0) {
k = 0;
}
}
while (k < length) {
var elementK = array[k];
if (%SameValueZero(searchElement, elementK)) {
return true;
}
++k;
}
return false;
}
// ES2016 draft, section 22.1.3.11
function ArrayIncludes(searchElement, fromIndex) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.includes");
var array = TO_OBJECT(this);
var length = TO_LENGTH(array.length);
return InnerArrayIncludes(searchElement, fromIndex, array, length);
}
// ES6, draft 10-14-14, section 22.1.2.1
function ArrayFrom(arrayLike, mapfn, receiver) {
var items = TO_OBJECT(arrayLike);
......@@ -1677,7 +1636,7 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"find", getFunction("find", ArrayFind, 1),
"findIndex", getFunction("findIndex", ArrayFindIndex, 1),
"fill", getFunction("fill", ArrayFill, 1),
"includes", getFunction("includes", ArrayIncludes, 1),
"includes", getFunction("includes", null, 1)
]);
utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies);
......@@ -1731,7 +1690,6 @@ utils.Export(function(to) {
to.InnerArrayFind = InnerArrayFind;
to.InnerArrayFindIndex = InnerArrayFindIndex;
to.InnerArrayForEach = InnerArrayForEach;
to.InnerArrayIncludes = InnerArrayIncludes;
to.InnerArrayIndexOf = InnerArrayIndexOf;
to.InnerArrayJoin = InnerArrayJoin;
to.InnerArrayLastIndexOf = InnerArrayLastIndexOf;
......
......@@ -28,7 +28,6 @@ var InnerArrayFilter;
var InnerArrayFind;
var InnerArrayFindIndex;
var InnerArrayForEach;
var InnerArrayIncludes;
var InnerArrayIndexOf;
var InnerArrayJoin;
var InnerArrayLastIndexOf;
......@@ -82,7 +81,6 @@ utils.Import(function(from) {
InnerArrayFind = from.InnerArrayFind;
InnerArrayFindIndex = from.InnerArrayFindIndex;
InnerArrayForEach = from.InnerArrayForEach;
InnerArrayIncludes = from.InnerArrayIncludes;
InnerArrayIndexOf = from.InnerArrayIndexOf;
InnerArrayJoin = from.InnerArrayJoin;
InnerArrayLastIndexOf = from.InnerArrayLastIndexOf;
......@@ -713,7 +711,29 @@ function TypedArrayIncludes(searchElement, fromIndex) {
var length = %_TypedArrayGetLength(this);
return InnerArrayIncludes(searchElement, fromIndex, this, length);
if (length === 0) return false;
var n = TO_INTEGER(fromIndex);
var k;
if (n >= 0) {
k = n;
} else {
k = length + n;
if (k < 0) {
k = 0;
}
}
while (k < length) {
var elementK = this[k];
if (%SameValueZero(searchElement, elementK)) {
return true;
}
++k;
}
return false;
}
%FunctionSetLength(TypedArrayIncludes, 1);
......
......@@ -1144,6 +1144,20 @@ MUST_USE_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
GetKeysConversion::kConvertToString);
}
bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
DisallowHeapAllocation no_gc;
HeapObject* prototype = HeapObject::cast(object->map()->prototype());
HeapObject* null = isolate->heap()->null_value();
HeapObject* empty = isolate->heap()->empty_fixed_array();
while (prototype != null) {
Map* map = prototype->map();
if (map->instance_type() <= LAST_CUSTOM_ELEMENTS_RECEIVER) return false;
if (JSObject::cast(prototype)->elements() != empty) return false;
prototype = HeapObject::cast(map->prototype());
}
return true;
}
#define FIELD_ADDR(p, offset) \
(reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
......
......@@ -2210,6 +2210,9 @@ class JSObject: public JSReceiver {
static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
static void InvalidatePrototypeChains(Map* map);
// Utility used by many Array builtins and runtime functions
static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object);
// Alternative implementation of WeakFixedArray::NullCallback.
class PrototypeRegistryCompactionCallback {
public:
......
......@@ -32,18 +32,24 @@ RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
}
static void InstallCode(Isolate* isolate, Handle<JSObject> holder,
const char* name, Handle<Code> code) {
const char* name, Handle<Code> code, int argc = -1) {
Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
Handle<JSFunction> optimized =
isolate->factory()->NewFunctionWithoutPrototype(key, code);
optimized->shared()->DontAdaptArguments();
if (argc < 0) {
optimized->shared()->DontAdaptArguments();
} else {
optimized->shared()->set_internal_formal_parameter_count(argc);
}
JSObject::AddProperty(holder, key, optimized, NONE);
}
static void InstallBuiltin(Isolate* isolate, Handle<JSObject> holder,
const char* name, Builtins::Name builtin_name) {
const char* name, Builtins::Name builtin_name,
int argc = -1) {
InstallCode(isolate, holder, name,
handle(isolate->builtins()->builtin(builtin_name), isolate));
handle(isolate->builtins()->builtin(builtin_name), isolate),
argc);
}
RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
......@@ -63,6 +69,7 @@ RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
InstallBuiltin(isolate, holder, "includes", Builtins::kArrayIncludes, 2);
return *holder;
}
......@@ -443,5 +450,98 @@ RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
isolate, Object::ArraySpeciesConstructor(isolate, original_array));
}
// ES7 22.1.3.11 Array.prototype.includes
RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
HandleScope shs(isolate);
DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
// Let O be ? ToObject(this value).
Handle<JSReceiver> object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, object, Object::ToObject(isolate, handle(args[0], isolate)));
// Let len be ? ToLength(? Get(O, "length")).
int64_t len;
{
if (object->map()->instance_type() == JS_ARRAY_TYPE) {
uint32_t len32 = 0;
bool success = JSArray::cast(*object)->length()->ToArrayLength(&len32);
DCHECK(success);
USE(success);
len = len32;
} else {
Handle<Object> len_;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, len_,
Object::GetProperty(object, isolate->factory()->length_string()));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len_,
Object::ToLength(isolate, len_));
len = static_cast<int64_t>(len_->Number());
DCHECK_EQ(len, len_->Number());
}
}
if (len == 0) return isolate->heap()->false_value();
// Let n be ? ToInteger(fromIndex). (If fromIndex is undefined, this step
// produces the value 0.)
int64_t start_from;
{
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, from_index,
Object::ToInteger(isolate, from_index));
double fp = from_index->Number();
if (fp > len) return isolate->heap()->false_value();
start_from = static_cast<int64_t>(fp);
}
int64_t index;
if (start_from >= 0) {
index = start_from;
} else {
index = len + start_from;
if (index < 0) {
index = 0;
}
}
// If the receiver is not a special receiver type, and the length is a valid
// element index, perform fast operation tailored to specific ElementsKinds.
if (object->map()->instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
len < kMaxUInt32 &&
JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
Handle<JSObject> obj = Handle<JSObject>::cast(object);
ElementsAccessor* elements = obj->GetElementsAccessor();
Maybe<bool> result = elements->IncludesValue(isolate, obj, search_element,
static_cast<uint32_t>(index),
static_cast<uint32_t>(len));
MAYBE_RETURN(result, isolate->heap()->exception());
return *isolate->factory()->ToBoolean(result.FromJust());
}
// Otherwise, perform slow lookups for special receiver types
for (; index < len; ++index) {
// Let elementK be the result of ? Get(O, ! ToString(k)).
Handle<Object> element_k;
{
Handle<Object> index_obj = isolate->factory()->NewNumberFromInt64(index);
bool success;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, index_obj, &success);
DCHECK(success);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element_k,
Object::GetProperty(&it));
}
// If SameValueZero(searchElement, elementK) is true, return true.
if (search_element->SameValueZero(*element_k)) {
return isolate->heap()->true_value();
}
}
return isolate->heap()->false_value();
}
} // namespace internal
} // namespace v8
......@@ -55,7 +55,8 @@ namespace internal {
F(GetCachedArrayIndex, 1, 1) \
F(FixedArrayGet, 2, 1) \
F(FixedArraySet, 3, 1) \
F(ArraySpeciesConstructor, 1, 1)
F(ArraySpeciesConstructor, 1, 1) \
F(ArrayIncludes_Slow, 3, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment