Commit 696b2ced authored by Mike Stanton's avatar Mike Stanton Committed by Commit Bot

[Builtins] Array.prototype.splice performance improvements

a) The current size of the backing store for the array under splice
    wasn't considered. Additionally, allocate the array with the
    normal growing strategy.

b) Use primitives memcpy and memmove when
   appropriate. These calls are wrapped in new CSA functions
   MoveElements and CopyElements, which use the C functions when a
   write barrier isn't needed (otherwise they just copy elements
   in a loop).

Bug: chromium:880780
Change-Id: I39a917c71036f52250c68f2cced77a1c24f97b67
Reviewed-on: https://chromium-review.googlesource.com/c/1243104
Commit-Queue: Michael Stanton <mvstanton@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56534}
parent d4f749ca
......@@ -9,8 +9,12 @@ module array {
// zero-length input FixedArray is handled here.
macro Extract<FixedArrayType: type>(
elements: FixedArrayBase, first: Smi, count: Smi,
capacity: Smi): FixedArrayType {
return UnsafeCast<FixedArrayType>(
capacity: Smi): FixedArrayType;
Extract<FixedArray>(
elements: FixedArrayBase, first: Smi, count: Smi,
capacity: Smi): FixedArray {
return UnsafeCast<FixedArray>(
ExtractFixedArray(elements, first, count, capacity));
}
......@@ -24,31 +28,73 @@ module array {
ExtractFixedArray(elements, first, count, capacity));
}
macro DoMoveElements<FixedArrayType: type>(
elements: FixedArrayType, dstIndex: Smi, srcIndex: Smi,
count: Smi): void {
TorqueMoveElements(
elements, Convert<intptr>(dstIndex), Convert<intptr>(srcIndex),
Convert<intptr>(count));
}
macro StoreHoles<FixedArrayType: type>(
elements: FixedArrayType, holeStartIndex: Smi, holeEndIndex: Smi): void {
for (let i: Smi = holeStartIndex; i < holeEndIndex; i++) {
StoreArrayHole(elements, i);
}
}
macro DoCopyElements<FixedArrayType: type>(
dstElements: FixedArrayType, dstIndex: Smi, srcElements: FixedArrayType,
srcIndex: Smi, count: Smi): void {
TorqueCopyElements(
dstElements, Convert<intptr>(dstIndex), srcElements,
Convert<intptr>(srcIndex), Convert<intptr>(count));
}
macro FastSplice<FixedArrayType: type, ElementType: type>(
args: constexpr Arguments, a: JSArray, length: Smi, newLength: Smi,
lengthDelta: Smi, actualStart: Smi, insertCount: Smi,
actualDeleteCount: Smi): void
labels Bailout {
const elements: FixedArrayBase = a.elements;
const elementsMap: Map = elements.map;
// Make sure elements are writable.
EnsureWriteableFastElements(a);
// If the spliced array is larger then the
// source array, then allocate a new FixedArrayType to hold the result.
let newElements: FixedArrayBase = elements;
if (elementsMap == kCOWMap || lengthDelta > 0) {
newElements =
Extract<FixedArrayType>(elements, 0, actualStart, newLength);
if (elementsMap == kCOWMap) {
newElements.map = elementsMap;
}
if (insertCount != actualDeleteCount) {
const elements: FixedArrayBase = a.elements;
const dstIndex: Smi = actualStart + insertCount;
const srcIndex: Smi = actualStart + actualDeleteCount;
const count: Smi = length - actualDeleteCount - actualStart;
if (insertCount < actualDeleteCount) {
// Shrink.
DoMoveElements<FixedArrayType>(
UnsafeCast<FixedArrayType>(elements), dstIndex, srcIndex, count);
StoreHoles<FixedArrayType>(
UnsafeCast<FixedArrayType>(elements), newLength, length);
} else if (insertCount > actualDeleteCount) {
// If the backing store is big enough, then moving elements is enough.
if (newLength <= elements.length) {
DoMoveElements<FixedArrayType>(
UnsafeCast<FixedArrayType>(elements), dstIndex, srcIndex, count);
} else {
// Grow.
let capacity: Smi = CalculateNewElementsCapacity(newLength);
const newElements: FixedArrayType =
Extract<FixedArrayType>(elements, 0, actualStart, capacity);
a.elements = newElements;
if (elements.length > 0) {
DoCopyElements<FixedArrayType>(
newElements, dstIndex, UnsafeCast<FixedArrayType>(elements),
srcIndex, count);
}
}
}
}
// Copy over inserted elements.
// Copy arguments.
let k: Smi = actualStart;
if (insertCount > 0) {
const typedNewElements: FixedArrayType =
UnsafeCast<FixedArrayType>(newElements);
UnsafeCast<FixedArrayType>(a.elements);
for (let e: Object of args [2: ]) {
// The argument elements were already validated to be an appropriate
// {ElementType} to store in {FixedArrayType}.
......@@ -56,31 +102,6 @@ module array {
}
}
// Copy over elements after deleted elements.
let count: Smi = length - actualStart - actualDeleteCount;
while (count > 0) {
const typedElements: FixedArrayType =
UnsafeCast<FixedArrayType>(elements);
const typedNewElements: FixedArrayType =
UnsafeCast<FixedArrayType>(newElements);
CopyArrayElement(typedElements, typedNewElements, k - lengthDelta, k);
k++;
count--;
}
// Fill rest of spliced FixedArray with the hole, but only if the
// destination FixedArray is the original array's, since otherwise the array
// is pre-filled with holes.
if (elements == newElements) {
const typedNewElements: FixedArrayType =
UnsafeCast<FixedArrayType>(newElements);
const limit: Smi = elements.length;
while (k < limit) {
StoreArrayHole(typedNewElements, k);
k++;
}
}
// Update the array's length after all the FixedArray shuffling is done.
a.length = newLength;
}
......
......@@ -844,6 +844,8 @@ macro AllowNonNumberElements(kind: ElementsKind): ElementsKind {
extern macro AllocateZeroedFixedArray(intptr): FixedArray;
extern macro AllocateZeroedFixedDoubleArray(intptr): FixedDoubleArray;
extern macro CalculateNewElementsCapacity(Smi): Smi;
extern macro CopyFixedArrayElements(
constexpr ElementsKind, FixedArray, constexpr ElementsKind, FixedArray,
intptr, intptr, intptr): void;
......@@ -879,6 +881,36 @@ extern macro ExtractFixedArray(
extern builtin ExtractFastJSArray(Context, JSArray, Smi, Smi): JSArray;
extern macro MoveElements(
constexpr ElementsKind, FixedArrayBase, intptr, intptr, intptr): void;
macro TorqueMoveElements(
elements: FixedArray, dstIndex: intptr, srcIndex: intptr,
count: intptr): void {
MoveElements(HOLEY_ELEMENTS, elements, dstIndex, srcIndex, count);
}
macro TorqueMoveElements(
elements: FixedDoubleArray, dstIndex: intptr, srcIndex: intptr,
count: intptr): void {
MoveElements(HOLEY_DOUBLE_ELEMENTS, elements, dstIndex, srcIndex, count);
}
extern macro CopyElements(
constexpr ElementsKind, FixedArrayBase, intptr, FixedArrayBase, intptr,
intptr): void;
macro TorqueCopyElements(
dstElements: FixedArray, dstIndex: intptr, srcElements: FixedArray,
srcIndex: intptr, count: intptr): void {
CopyElements(
HOLEY_ELEMENTS, dstElements, dstIndex, srcElements, srcIndex, count);
}
macro TorqueCopyElements(
dstElements: FixedDoubleArray, dstIndex: intptr,
srcElements: FixedDoubleArray, srcIndex: intptr, count: intptr): void {
CopyElements(
HOLEY_DOUBLE_ELEMENTS, dstElements, dstIndex, srcElements, srcIndex,
count);
}
macro LoadElementNoHole<T: type>(a: JSArray, index: Smi): Object
labels IfHole;
......
......@@ -4541,6 +4541,179 @@ void CodeStubAssembler::FillFixedDoubleArrayWithZero(
backing_store, IntPtrConstant(0), byte_length);
}
void CodeStubAssembler::JumpIfPointersFromHereAreInteresting(
TNode<Object> object, Label* interesting) {
Label finished(this);
TNode<IntPtrT> object_word = BitcastTaggedToWord(object);
TNode<IntPtrT> object_page = PageFromAddress(object_word);
TNode<IntPtrT> page_flags = UncheckedCast<IntPtrT>(Load(
MachineType::IntPtr(), object_page, IntPtrConstant(Page::kFlagsOffset)));
Branch(
WordEqual(WordAnd(page_flags,
IntPtrConstant(
MemoryChunk::kPointersFromHereAreInterestingMask)),
IntPtrConstant(0)),
&finished, interesting);
BIND(&finished);
}
void CodeStubAssembler::MoveElements(ElementsKind kind,
TNode<FixedArrayBase> elements,
TNode<IntPtrT> dst_index,
TNode<IntPtrT> src_index,
TNode<IntPtrT> length) {
Label finished(this);
Label needs_barrier(this);
const bool needs_barrier_check = IsObjectElementsKind(kind);
DCHECK(IsFastElementsKind(kind));
CSA_ASSERT(this, IsFixedArrayWithKind(elements, kind));
CSA_ASSERT(this,
IntPtrLessThanOrEqual(IntPtrAdd(dst_index, length),
LoadAndUntagFixedArrayBaseLength(elements)));
CSA_ASSERT(this,
IntPtrLessThanOrEqual(IntPtrAdd(src_index, length),
LoadAndUntagFixedArrayBaseLength(elements)));
// The write barrier can be ignored if {elements} is in new space, or if
// we have a SMI or double ElementsKind.
if (needs_barrier_check) {
JumpIfPointersFromHereAreInteresting(elements, &needs_barrier);
}
const TNode<IntPtrT> source_byte_length =
IntPtrMul(length, IntPtrConstant(ElementsKindToByteSize(kind)));
static const int32_t fa_base_data_offset =
FixedArrayBase::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> elements_intptr = BitcastTaggedToWord(elements);
TNode<IntPtrT> target_data_ptr =
IntPtrAdd(elements_intptr,
ElementOffsetFromIndex(dst_index, kind, INTPTR_PARAMETERS,
fa_base_data_offset));
TNode<IntPtrT> source_data_ptr =
IntPtrAdd(elements_intptr,
ElementOffsetFromIndex(src_index, kind, INTPTR_PARAMETERS,
fa_base_data_offset));
TNode<ExternalReference> memmove =
ExternalConstant(ExternalReference::libc_memmove_function());
CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
MachineType::Pointer(), MachineType::UintPtr(), memmove,
target_data_ptr, source_data_ptr, source_byte_length);
if (needs_barrier_check) {
Goto(&finished);
BIND(&needs_barrier);
{
const TNode<IntPtrT> begin = src_index;
const TNode<IntPtrT> end = IntPtrAdd(begin, length);
// If dst_index is less than src_index, then walk forward.
const TNode<IntPtrT> delta =
IntPtrMul(IntPtrSub(dst_index, begin),
IntPtrConstant(ElementsKindToByteSize(kind)));
auto loop_body = [&](Node* array, Node* offset) {
Node* const element = Load(MachineType::AnyTagged(), array, offset);
Node* const delta_offset = IntPtrAdd(offset, delta);
Store(array, delta_offset, element);
};
Label iterate_forward(this);
Label iterate_backward(this);
Branch(IntPtrLessThan(delta, IntPtrConstant(0)), &iterate_forward,
&iterate_backward);
BIND(&iterate_forward);
{
// Make a loop for the stores.
BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body,
INTPTR_PARAMETERS,
ForEachDirection::kForward);
Goto(&finished);
}
BIND(&iterate_backward);
{
BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body,
INTPTR_PARAMETERS,
ForEachDirection::kReverse);
Goto(&finished);
}
}
BIND(&finished);
}
}
void CodeStubAssembler::CopyElements(ElementsKind kind,
TNode<FixedArrayBase> dst_elements,
TNode<IntPtrT> dst_index,
TNode<FixedArrayBase> src_elements,
TNode<IntPtrT> src_index,
TNode<IntPtrT> length) {
Label finished(this);
Label needs_barrier(this);
const bool needs_barrier_check = IsObjectElementsKind(kind);
DCHECK(IsFastElementsKind(kind));
CSA_ASSERT(this, IsFixedArrayWithKind(dst_elements, kind));
CSA_ASSERT(this, IsFixedArrayWithKind(src_elements, kind));
CSA_ASSERT(this, IntPtrLessThanOrEqual(
IntPtrAdd(dst_index, length),
LoadAndUntagFixedArrayBaseLength(dst_elements)));
CSA_ASSERT(this, IntPtrLessThanOrEqual(
IntPtrAdd(src_index, length),
LoadAndUntagFixedArrayBaseLength(src_elements)));
CSA_ASSERT(this, WordNotEqual(dst_elements, src_elements));
// The write barrier can be ignored if {dst_elements} is in new space, or if
// we have a SMI or double ElementsKind.
if (needs_barrier_check) {
JumpIfPointersFromHereAreInteresting(dst_elements, &needs_barrier);
}
TNode<IntPtrT> source_byte_length =
IntPtrMul(length, IntPtrConstant(ElementsKindToByteSize(kind)));
static const int32_t fa_base_data_offset =
FixedArrayBase::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> src_offset_start = ElementOffsetFromIndex(
src_index, kind, INTPTR_PARAMETERS, fa_base_data_offset);
TNode<IntPtrT> dst_offset_start = ElementOffsetFromIndex(
dst_index, kind, INTPTR_PARAMETERS, fa_base_data_offset);
TNode<IntPtrT> src_elements_intptr = BitcastTaggedToWord(src_elements);
TNode<IntPtrT> source_data_ptr =
IntPtrAdd(src_elements_intptr, src_offset_start);
TNode<IntPtrT> dst_elements_intptr = BitcastTaggedToWord(dst_elements);
TNode<IntPtrT> dst_data_ptr =
IntPtrAdd(dst_elements_intptr, dst_offset_start);
TNode<ExternalReference> memcpy =
ExternalConstant(ExternalReference::libc_memcpy_function());
CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
MachineType::Pointer(), MachineType::UintPtr(), memcpy,
dst_data_ptr, source_data_ptr, source_byte_length);
if (needs_barrier_check) {
Goto(&finished);
BIND(&needs_barrier);
{
const TNode<IntPtrT> begin = src_index;
const TNode<IntPtrT> end = IntPtrAdd(begin, length);
const TNode<IntPtrT> delta =
IntPtrMul(IntPtrSub(dst_index, src_index),
IntPtrConstant(ElementsKindToByteSize(kind)));
BuildFastFixedArrayForEach(
src_elements, kind, begin, end,
[&](Node* array, Node* offset) {
Node* const element = Load(MachineType::AnyTagged(), array, offset);
Node* const delta_offset = IntPtrAdd(offset, delta);
Store(dst_elements, delta_offset, element);
},
INTPTR_PARAMETERS, ForEachDirection::kForward);
Goto(&finished);
}
BIND(&finished);
}
}
void CodeStubAssembler::CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
Node* to_array, Node* first_element, Node* element_count, Node* capacity,
......
......@@ -1568,6 +1568,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SMI_PARAMETERS);
}
void JumpIfPointersFromHereAreInteresting(TNode<Object> object,
Label* interesting);
// Efficiently copy elements within a single array. The regions
// [src_index, src_index + length) and [dst_index, dst_index + length)
// can be overlapping.
void MoveElements(ElementsKind kind, TNode<FixedArrayBase> elements,
TNode<IntPtrT> dst_index, TNode<IntPtrT> src_index,
TNode<IntPtrT> length);
// Efficiently copy elements from one array to another. The ElementsKind
// needs to be the same. Copy from src_elements at
// [src_index, src_index + length) to dst_elements at
// [dst_index, dst_index + length).
void CopyElements(ElementsKind kind, TNode<FixedArrayBase> dst_elements,
TNode<IntPtrT> dst_index,
TNode<FixedArrayBase> src_elements,
TNode<IntPtrT> src_index, TNode<IntPtrT> length);
TNode<FixedArray> HeapObjectToFixedArray(TNode<HeapObject> base,
Label* cast_fail);
......@@ -1741,6 +1760,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* CalculateNewElementsCapacity(Node* old_capacity,
ParameterMode mode = INTPTR_PARAMETERS);
TNode<Smi> CalculateNewElementsCapacity(TNode<Smi> old_capacity) {
return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS));
}
// Tries to grow the |elements| array of given |object| to store the |key|
// or bails out if the growing gap is too big. Returns new elements.
Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment