Commit 12ad2554 authored by cbruni's avatar cbruni Committed by Commit bot

Revert of Array.prototype.unshift builtin improvements (patchset #3 id:40001...

Revert of Array.prototype.unshift builtin improvements (patchset #3 id:40001 of https://codereview.chromium.org/1311343002/ )

Reason for revert:
https://codereview.chromium.org/1315823004/

Original issue's description:
> Array.prototype.unshift builtin improvements
>
> Moving unshift to ElementAccessor and increasing the range of arguments
> handled directly in C++, namely directly supporting FastDoubleElementsKind.
> This should yield a factor 19 speedup for unshift on fast double arrays.
>
> BUG=
>
> Committed: https://crrev.com/bf6764e6c1197e50ae148755488307a423b1d9b4
> Cr-Commit-Position: refs/heads/master@{#30347}

TBR=yangguo@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1311363003

Cr-Commit-Position: refs/heads/master@{#30350}
parent 1507da8d
......@@ -175,9 +175,9 @@ BUILTIN(EmptyFunction) {
return isolate->heap()->undefined_value();
}
namespace {
bool ObjectToClampedInteger(Object* object, int* out) {
// TODO(cbruni): check if this is a suitable method on Object
bool ClampedToInteger(Object* object, int* out) {
// This is an extended version of ECMA-262 9.4, but additionally
// clamps values to [kMinInt, kMaxInt]
if (object->IsSmi()) {
......@@ -331,7 +331,6 @@ MUST_USE_RESULT static Object* CallJsBuiltin(
return *result;
}
} // namespace
BUILTIN(ArrayPush) {
HandleScope scope(isolate);
......@@ -433,6 +432,7 @@ BUILTIN(ArrayShift) {
}
}
// Set the length.
array->set_length(Smi::FromInt(len - 1));
return *first;
......@@ -450,15 +450,51 @@ BUILTIN(ArrayUnshift) {
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
DCHECK(!array->map()->is_observed());
if (!array->HasFastSmiOrObjectElements()) {
return CallJsBuiltin(isolate, "$arrayUnshift", args);
}
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
int new_length = len + to_add;
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
DCHECK(to_add <= (Smi::kMaxValue - len));
if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
return CallJsBuiltin(isolate, "$arrayUnshift", args);
}
Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Handle<FixedArray> new_elms =
isolate->factory()->NewUninitializedFixedArray(capacity);
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
int new_length = accessor->Unshift(array, elms_obj, args, to_add);
accessor->CopyElements(
elms, 0, kind, new_elms, to_add,
ElementsAccessor::kCopyToEndAndInitializeToHole);
elms = new_elms;
array->set_elements(*elms);
} else {
DisallowHeapAllocation no_gc;
Heap* heap = isolate->heap();
heap->MoveElements(*elms, to_add, 0, len);
}
// Add the provided values.
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int i = 0; i < to_add; i++) {
elms->set(i, args[i + 1], mode);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
}
......@@ -620,7 +656,7 @@ BUILTIN(ArraySplice) {
int relative_start = 0;
if (argument_count > 0) {
DisallowHeapAllocation no_gc;
if (!ObjectToClampedInteger(args[1], &relative_start)) {
if (!ClampedToInteger(args[1], &relative_start)) {
AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "$arraySplice", args);
}
......@@ -642,7 +678,7 @@ BUILTIN(ArraySplice) {
int delete_count = 0;
DisallowHeapAllocation no_gc;
if (argument_count > 1) {
if (!ObjectToClampedInteger(args[2], &delete_count)) {
if (!ClampedToInteger(args[2], &delete_count)) {
AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "$arraySplice", args);
}
......
......@@ -585,20 +585,6 @@ class ElementsAccessorBase : public ElementsAccessor {
return 0;
}
virtual uint32_t Unshift(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store, Arguments args,
uint32_t add_count) {
return ElementsAccessorSubclass::UnshiftImpl(receiver, backing_store, args,
add_count);
}
static uint32_t UnshiftImpl(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store,
Arguments args, uint32_t add_count) {
UNREACHABLE();
return 0;
}
virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store,
uint32_t start, uint32_t delete_count,
......@@ -1195,50 +1181,6 @@ class FastElementsAccessor
#endif
}
static uint32_t UnshiftImpl(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store,
Arguments args, uint32_t add_count) {
int len = Smi::cast(receiver->length())->value();
if (add_count == 0) {
return len;
}
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
DCHECK(add_count <= static_cast<uint32_t>(Smi::kMaxValue - len));
int new_length = len + add_count;
Handle<FixedArrayBase> new_elements;
if (new_length > backing_store->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
new_elements = FastElementsAccessorSubclass::ConvertElementsWithCapacity(
receiver, backing_store, KindTraits::Kind, capacity, 0);
FastElementsAccessorSubclass::CopyElementsImpl(
*backing_store, 0, *new_elements, KindTraits::Kind, add_count,
kPackedSizeNotKnown, ElementsAccessor::kCopyToEndAndInitializeToHole);
} else {
DisallowHeapAllocation no_gc;
Heap* heap = receiver->GetIsolate()->heap();
FastElementsAccessorSubclass::MoveElements(heap, backing_store, add_count,
0, len, 0, 0);
new_elements = backing_store;
}
// Add the provided values.
DisallowHeapAllocation no_gc;
for (uint32_t i = 0; i < add_count; i++) {
FastElementsAccessorSubclass::SetImpl(*new_elements, i, args[i + 1]);
}
if (!new_elements.is_identical_to(backing_store)) {
receiver->set_elements(*new_elements);
}
// Set the length.
receiver->set_length(Smi::FromInt(new_length));
return new_length;
}
static uint32_t PushImpl(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store,
Object** objects, uint32_t push_size,
......@@ -1247,13 +1189,14 @@ class FastElementsAccessor
if (push_size == 0) {
return len;
}
uint32_t elms_len = backing_store->length();
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
DCHECK(push_size <= static_cast<uint32_t>(Smi::kMaxValue - len));
uint32_t new_length = len + push_size;
Handle<FixedArrayBase> new_elms;
if (new_length > static_cast<uint32_t>(backing_store->length())) {
if (new_length > elms_len) {
// New backing storage is needed.
uint32_t capacity = new_length + (new_length >> 1) + 16;
new_elms = FastElementsAccessorSubclass::ConvertElementsWithCapacity(
......@@ -1328,10 +1271,19 @@ class FastElementsAccessor
}
// Copy new Elements from args
if (IsFastDoubleElementsKind(KindTraits::Kind)) {
for (uint32_t index = start; index < start + add_count; index++) {
Object* arg = args[3 + index - start];
FastElementsAccessorSubclass::SetImpl(*backing_store, index, arg);
}
} else {
// FastSmiOrObjectElementsKind
Handle<FixedArray> elms = Handle<FixedArray>::cast(backing_store);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (uint32_t index = start; index < start + add_count; index++) {
Object* object = args[3 + index - start];
FastElementsAccessorSubclass::SetImpl(*backing_store, index, object);
elms->set(index, args[3 + index - start], mode);
}
}
if (elms_changed) {
......
......@@ -125,10 +125,6 @@ class ElementsAccessor {
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) = 0;
virtual uint32_t Unshift(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store, Arguments args,
uint32_t add_count) = 0;
// TODO(cbruni): Consider passing Arguments* instead of Object** depending on
// the requirements of future callers.
virtual uint32_t Push(Handle<JSArray> receiver,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment