Commit 5e2e047a authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[elements] Fix slowdown from switching to atomic accesses

I found no way to speed up the (relaxed) atomic accesses, so the only
way to get back the original performance is having a separate path for
the non-shared case.

R=ulan@chromium.org

Bug: v8:11704, chromium:1206552, chromium:1207351
Change-Id: I2ea0ecf07583dfe24f4085533491a1d5709c9ffb
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2878750Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74533}
parent c9971ce1
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "src/objects/elements.h" #include "src/objects/elements.h"
#include "src/base/atomicops.h"
#include "src/common/message-template.h" #include "src/common/message-template.h"
#include "src/execution/arguments.h" #include "src/execution/arguments.h"
#include "src/execution/frames.h" #include "src/execution/frames.h"
...@@ -3029,6 +3030,8 @@ class FastHoleyDoubleElementsAccessor ...@@ -3029,6 +3030,8 @@ class FastHoleyDoubleElementsAccessor
FastHoleyDoubleElementsAccessor, FastHoleyDoubleElementsAccessor,
ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>> {}; ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>> {};
enum IsSharedBuffer : bool { kShared = true, kUnshared = false };
// Super class for all external element arrays. // Super class for all external element arrays.
template <ElementsKind Kind, typename ElementType> template <ElementsKind Kind, typename ElementType>
class TypedElementsAccessor class TypedElementsAccessor
...@@ -3076,50 +3079,56 @@ class TypedElementsAccessor ...@@ -3076,50 +3079,56 @@ class TypedElementsAccessor
Object value) { Object value) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder); Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
DCHECK_LE(entry.raw_value(), typed_array->GetLength()); DCHECK_LE(entry.raw_value(), typed_array->GetLength());
SetImpl(static_cast<ElementType*>(typed_array->DataPtr()), auto* entry_ptr =
entry.raw_value(), FromObject(value)); static_cast<ElementType*>(typed_array->DataPtr()) + entry.raw_value();
auto is_shared = typed_array->buffer().is_shared() ? kShared : kUnshared;
SetImpl(entry_ptr, FromObject(value), is_shared);
} }
static void SetImpl(ElementType* data_ptr, size_t entry, ElementType value) { static void SetImpl(ElementType* data_ptr, ElementType value,
IsSharedBuffer is_shared) {
// TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size
// fields (external pointers, doubles and BigInt data) are not always 8-byte
// aligned. This is relying on undefined behaviour in C++, since {data_ptr}
// is not aligned to {alignof(ElementType)}.
if (!is_shared) {
base::WriteUnalignedValue(reinterpret_cast<Address>(data_ptr), value);
return;
}
// The JavaScript memory model allows for racy reads and writes to a // The JavaScript memory model allows for racy reads and writes to a
// SharedArrayBuffer's backing store. Using relaxed atomics is not strictly // SharedArrayBuffer's backing store. Using relaxed atomics is not strictly
// required for JavaScript, but will avoid undefined behaviour in C++ and is // required for JavaScript, but will avoid undefined behaviour in C++ and is
// unlikely to introduce noticable overhead. // unlikely to introduce noticable overhead.
// TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr),
// fields (external pointers, doubles and BigInt data) are not always 8-byte
// aligned. Thus we have to store them wordwise. This is relying on
// undefined behaviour in C++, since {data_ptr} is not aligned to
// {alignof(ElementType)}.
if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr + entry),
alignof(std::atomic<ElementType>))) { alignof(std::atomic<ElementType>))) {
// Use a single relaxed atomic store. // Use a single relaxed atomic store.
STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType)); STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType));
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), reinterpret_cast<std::atomic<ElementType>*>(data_ptr)->store(
alignof(std::atomic<ElementType>))); value, std::memory_order_relaxed);
reinterpret_cast<std::atomic<ElementType>*>(data_ptr + entry) return;
->store(value, std::memory_order_relaxed); }
} else {
// Some static CHECKs (are optimized out if succeeding) to ensure that // Some static CHECKs (are optimized out if succeeding) to ensure that
// {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>} // {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>}
// has size and alignment of four bytes, such that we can cast the // has size and alignment of four bytes, such that we can cast the
// {data_ptr} to it. // {data_ptr} to it.
CHECK_LE(kInt32Size, alignof(ElementType)); CHECK_LE(kInt32Size, alignof(ElementType));
CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>)); CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>));
CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>)); CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>));
// And dynamically check that we indeed have at least four byte alignment. // And dynamically check that we indeed have at least four byte alignment.
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size)); DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size));
// Store as multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler // Store as multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler
// warnings for the empty array or memcpy to an empty object. // warnings for the empty array or memcpy to an empty object.
constexpr size_t kNumWords = constexpr size_t kNumWords =
std::max(size_t{1}, sizeof(ElementType) / kInt32Size); std::max(size_t{1}, sizeof(ElementType) / kInt32Size);
uint32_t words[kNumWords]; uint32_t words[kNumWords];
CHECK_EQ(sizeof(words), sizeof(value)); CHECK_EQ(sizeof(words), sizeof(value));
memcpy(words, &value, sizeof(value)); memcpy(words, &value, sizeof(value));
for (size_t word = 0; word < kNumWords; ++word) { for (size_t word = 0; word < kNumWords; ++word) {
STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t)); STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t));
reinterpret_cast<std::atomic<uint32_t>*>(data_ptr + entry)[word].store( reinterpret_cast<std::atomic<uint32_t>*>(data_ptr)[word].store(
words[word], std::memory_order_relaxed); words[word], std::memory_order_relaxed);
}
} }
} }
...@@ -3129,8 +3138,10 @@ class TypedElementsAccessor ...@@ -3129,8 +3138,10 @@ class TypedElementsAccessor
Isolate* isolate = typed_array->GetIsolate(); Isolate* isolate = typed_array->GetIsolate();
DCHECK_LT(entry.raw_value(), typed_array->GetLength()); DCHECK_LT(entry.raw_value(), typed_array->GetLength());
DCHECK(!typed_array->WasDetached()); DCHECK(!typed_array->WasDetached());
ElementType elem = GetImpl( auto* element_ptr =
static_cast<ElementType*>(typed_array->DataPtr()), entry.raw_value()); static_cast<ElementType*>(typed_array->DataPtr()) + entry.raw_value();
auto is_shared = typed_array->buffer().is_shared() ? kShared : kUnshared;
ElementType elem = GetImpl(element_ptr, is_shared);
return ToHandle(isolate, elem); return ToHandle(isolate, elem);
} }
...@@ -3139,47 +3150,53 @@ class TypedElementsAccessor ...@@ -3139,47 +3150,53 @@ class TypedElementsAccessor
UNREACHABLE(); UNREACHABLE();
} }
static ElementType GetImpl(ElementType* data_ptr, size_t entry) { static ElementType GetImpl(ElementType* data_ptr, IsSharedBuffer is_shared) {
// TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size
// fields (external pointers, doubles and BigInt data) are not always
// 8-byte aligned.
if (!is_shared) {
return base::ReadUnalignedValue<ElementType>(
reinterpret_cast<Address>(data_ptr));
}
// The JavaScript memory model allows for racy reads and writes to a // The JavaScript memory model allows for racy reads and writes to a
// SharedArrayBuffer's backing store. Using relaxed atomics is not strictly // SharedArrayBuffer's backing store. Using relaxed atomics is not strictly
// required for JavaScript, but will avoid undefined behaviour in C++ and is // required for JavaScript, but will avoid undefined behaviour in C++ and is
// unlikely to introduce noticable overhead. // unlikely to introduce noticable overhead.
ElementType result; if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr),
// TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size
// fields (external pointers, doubles and BigInt data) are not always 8-byte
// aligned. Thus we have to load them wordwise. This is relying on undefined
// behaviour in C++, since {data_ptr} is not aligned to
// {alignof(ElementType)}.
if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr + entry),
alignof(std::atomic<ElementType>))) { alignof(std::atomic<ElementType>))) {
// Use a single relaxed atomic load. // Use a single relaxed atomic load.
STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType)); STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType));
result = reinterpret_cast<std::atomic<ElementType>*>(data_ptr + entry) // Note: acquire semantics are not needed here, but clang seems to merge
->load(std::memory_order_relaxed); // this atomic load with the non-atomic load above if we use relaxed
} else { // semantics. This will result in TSan failures.
// Some static CHECKs (are optimized out if succeeding) to ensure that return reinterpret_cast<std::atomic<ElementType>*>(data_ptr)->load(
// {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>} std::memory_order_acquire);
// has size and alignment of four bytes, such that we can cast the }
// {data_ptr} to it.
CHECK_LE(kInt32Size, alignof(ElementType)); // Some static CHECKs (are optimized out if succeeding) to ensure that
CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>)); // {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>}
CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>)); // has size and alignment of four bytes, such that we can cast the
// And dynamically check that we indeed have at least four byte alignment. // {data_ptr} to it.
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size)); CHECK_LE(kInt32Size, alignof(ElementType));
// Load in multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>));
// warnings for the empty array or memcpy to an empty object. CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>));
constexpr size_t kNumWords = // And dynamically check that we indeed have at least four byte alignment.
std::max(size_t{1}, sizeof(ElementType) / kInt32Size); DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size));
uint32_t words[kNumWords]; // Load in multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler
for (size_t word = 0; word < kNumWords; ++word) { // warnings for the empty array or memcpy to an empty object.
STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t)); constexpr size_t kNumWords =
words[word] = std::max(size_t{1}, sizeof(ElementType) / kInt32Size);
reinterpret_cast<std::atomic<uint32_t>*>(data_ptr + entry)[word] uint32_t words[kNumWords];
.load(std::memory_order_relaxed); for (size_t word = 0; word < kNumWords; ++word) {
} STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t));
CHECK_EQ(sizeof(words), sizeof(result)); words[word] =
memcpy(&result, words, sizeof(result)); reinterpret_cast<std::atomic<uint32_t>*>(data_ptr)[word].load(
std::memory_order_relaxed);
} }
ElementType result;
CHECK_EQ(sizeof(words), sizeof(result));
memcpy(&result, words, sizeof(result));
return result; return result;
} }
...@@ -3316,6 +3333,7 @@ class TypedElementsAccessor ...@@ -3316,6 +3333,7 @@ class TypedElementsAccessor
ElementType typed_search_value; ElementType typed_search_value;
ElementType* data_ptr = ElementType* data_ptr =
reinterpret_cast<ElementType*>(typed_array.DataPtr()); reinterpret_cast<ElementType*>(typed_array.DataPtr());
auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) { if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
if (!value->IsBigInt()) return Just(false); if (!value->IsBigInt()) return Just(false);
bool lossless; bool lossless;
...@@ -3331,8 +3349,8 @@ class TypedElementsAccessor ...@@ -3331,8 +3349,8 @@ class TypedElementsAccessor
} }
if (std::isnan(search_value)) { if (std::isnan(search_value)) {
for (size_t k = start_from; k < length; ++k) { for (size_t k = start_from; k < length; ++k) {
double elem_k = double elem_k = static_cast<double>(
static_cast<double>(AccessorClass::GetImpl(data_ptr, k)); AccessorClass::GetImpl(data_ptr + k, is_shared));
if (std::isnan(elem_k)) return Just(true); if (std::isnan(elem_k)) return Just(true);
} }
return Just(false); return Just(false);
...@@ -3349,7 +3367,7 @@ class TypedElementsAccessor ...@@ -3349,7 +3367,7 @@ class TypedElementsAccessor
} }
for (size_t k = start_from; k < length; ++k) { for (size_t k = start_from; k < length; ++k) {
ElementType elem_k = AccessorClass::GetImpl(data_ptr, k); ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
if (elem_k == typed_search_value) return Just(true); if (elem_k == typed_search_value) return Just(true);
} }
return Just(false); return Just(false);
...@@ -3401,8 +3419,9 @@ class TypedElementsAccessor ...@@ -3401,8 +3419,9 @@ class TypedElementsAccessor
length = typed_array.length(); length = typed_array.length();
} }
auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
for (size_t k = start_from; k < length; ++k) { for (size_t k = start_from; k < length; ++k) {
ElementType elem_k = AccessorClass::GetImpl(data_ptr, k); ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
if (elem_k == typed_search_value) return Just<int64_t>(k); if (elem_k == typed_search_value) return Just<int64_t>(k);
} }
return Just<int64_t>(-1); return Just<int64_t>(-1);
...@@ -3449,8 +3468,9 @@ class TypedElementsAccessor ...@@ -3449,8 +3468,9 @@ class TypedElementsAccessor
DCHECK_LT(start_from, typed_array.length()); DCHECK_LT(start_from, typed_array.length());
size_t k = start_from; size_t k = start_from;
auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
do { do {
ElementType elem_k = AccessorClass::GetImpl(data_ptr, k); ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
if (elem_k == typed_search_value) return Just<int64_t>(k); if (elem_k == typed_search_value) return Just<int64_t>(k);
} while (k-- != 0); } while (k-- != 0);
return Just<int64_t>(-1); return Just<int64_t>(-1);
...@@ -3500,12 +3520,16 @@ class TypedElementsAccessor ...@@ -3500,12 +3520,16 @@ class TypedElementsAccessor
size_t count = end - start; size_t count = end - start;
DCHECK_LE(count, destination.length()); DCHECK_LE(count, destination.length());
ElementType* dest_data = static_cast<ElementType*>(destination.DataPtr()); ElementType* dest_data = static_cast<ElementType*>(destination.DataPtr());
auto is_shared =
source.buffer().is_shared() || destination.buffer().is_shared()
? kShared
: kUnshared;
switch (source.GetElementsKind()) { switch (source.GetElementsKind()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: { \ case TYPE##_ELEMENTS: { \
ctype* source_data = reinterpret_cast<ctype*>(source.DataPtr()) + start; \ ctype* source_data = reinterpret_cast<ctype*>(source.DataPtr()) + start; \
CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>(source_data, dest_data, \ CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>(source_data, dest_data, \
count); \ count, is_shared); \
break; \ break; \
} }
TYPED_ARRAYS(TYPED_ARRAY_CASE) TYPED_ARRAYS(TYPED_ARRAY_CASE)
...@@ -3524,16 +3548,16 @@ class TypedElementsAccessor ...@@ -3524,16 +3548,16 @@ class TypedElementsAccessor
template <ElementsKind SourceKind, typename SourceElementType> template <ElementsKind SourceKind, typename SourceElementType>
static void CopyBetweenBackingStores(SourceElementType* source_data_ptr, static void CopyBetweenBackingStores(SourceElementType* source_data_ptr,
ElementType* dest_data_ptr, ElementType* dest_data_ptr,
size_t length) { size_t length,
DisallowGarbageCollection no_gc; IsSharedBuffer is_shared) {
for (size_t i = 0; i < length; i++) { for (; length > 0; --length, ++source_data_ptr, ++dest_data_ptr) {
// We use scalar accessors to avoid boxing/unboxing, so there are no // We use scalar accessors to avoid boxing/unboxing, so there are no
// allocations. // allocations.
SourceElementType source_elem = SourceElementType source_elem =
TypedElementsAccessor<SourceKind, SourceElementType>::GetImpl( TypedElementsAccessor<SourceKind, SourceElementType>::GetImpl(
source_data_ptr, i); source_data_ptr, is_shared);
ElementType dest_elem = FromScalar(source_elem); ElementType dest_elem = FromScalar(source_elem);
SetImpl(dest_data_ptr, i, dest_elem); SetImpl(dest_data_ptr, dest_elem, is_shared);
} }
} }
...@@ -3564,14 +3588,24 @@ class TypedElementsAccessor ...@@ -3564,14 +3588,24 @@ class TypedElementsAccessor
size_t source_byte_length = source.byte_length(); size_t source_byte_length = source.byte_length();
size_t dest_byte_length = destination.byte_length(); size_t dest_byte_length = destination.byte_length();
bool source_shared = source.buffer().is_shared();
bool destination_shared = destination.buffer().is_shared();
// We can simply copy the backing store if the types are the same, or if // We can simply copy the backing store if the types are the same, or if
// we are converting e.g. Uint8 <-> Int8, as the binary representation // we are converting e.g. Uint8 <-> Int8, as the binary representation
// will be the same. This is not the case for floats or clamped Uint8, // will be the same. This is not the case for floats or clamped Uint8,
// which have special conversion operations. // which have special conversion operations.
if (same_type || (same_size && both_are_simple)) { if (same_type || (same_size && both_are_simple)) {
size_t element_size = source.element_size(); size_t element_size = source.element_size();
std::memmove(dest_data + offset * element_size, source_data, if (source_shared || destination_shared) {
length * element_size); base::Relaxed_Memcpy(
reinterpret_cast<base::Atomic8*>(dest_data + offset * element_size),
reinterpret_cast<base::Atomic8*>(source_data),
length * element_size);
} else {
std::memmove(dest_data + offset * element_size, source_data,
length * element_size);
}
} else { } else {
std::unique_ptr<uint8_t[]> cloned_source_elements; std::unique_ptr<uint8_t[]> cloned_source_elements;
...@@ -3579,17 +3613,25 @@ class TypedElementsAccessor ...@@ -3579,17 +3613,25 @@ class TypedElementsAccessor
if (dest_data + dest_byte_length > source_data && if (dest_data + dest_byte_length > source_data &&
source_data + source_byte_length > dest_data) { source_data + source_byte_length > dest_data) {
cloned_source_elements.reset(new uint8_t[source_byte_length]); cloned_source_elements.reset(new uint8_t[source_byte_length]);
std::memcpy(cloned_source_elements.get(), source_data, if (source_shared) {
source_byte_length); base::Relaxed_Memcpy(
reinterpret_cast<base::Atomic8*>(cloned_source_elements.get()),
reinterpret_cast<base::Atomic8*>(source_data),
source_byte_length);
} else {
std::memcpy(cloned_source_elements.get(), source_data,
source_byte_length);
}
source_data = cloned_source_elements.get(); source_data = cloned_source_elements.get();
} }
switch (source.GetElementsKind()) { switch (source.GetElementsKind()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \ case TYPE##_ELEMENTS: \
CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>( \ CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>( \
reinterpret_cast<ctype*>(source_data), \ reinterpret_cast<ctype*>(source_data), \
reinterpret_cast<ElementType*>(dest_data) + offset, length); \ reinterpret_cast<ElementType*>(dest_data) + offset, length, \
source_shared || destination_shared ? kShared : kUnshared); \
break; break;
TYPED_ARRAYS(TYPED_ARRAY_CASE) TYPED_ARRAYS(TYPED_ARRAY_CASE)
default: default:
...@@ -3645,6 +3687,9 @@ class TypedElementsAccessor ...@@ -3645,6 +3687,9 @@ class TypedElementsAccessor
ElementsKind kind = source.GetElementsKind(); ElementsKind kind = source.GetElementsKind();
auto destination_shared =
destination.buffer().is_shared() ? kShared : kUnshared;
// When we find the hole, we normally have to look up the element on the // When we find the hole, we normally have to look up the element on the
// prototype chain, which is not handled here and we return false instead. // prototype chain, which is not handled here and we return false instead.
// When the array has the original array prototype, and that prototype has // When the array has the original array prototype, and that prototype has
...@@ -3662,17 +3707,19 @@ class TypedElementsAccessor ...@@ -3662,17 +3707,19 @@ class TypedElementsAccessor
for (size_t i = 0; i < length; i++) { for (size_t i = 0; i < length; i++) {
Object elem = source_store.get(static_cast<int>(i)); Object elem = source_store.get(static_cast<int>(i));
SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem))); SetImpl(dest_data + i, FromScalar(Smi::ToInt(elem)),
destination_shared);
} }
return true; return true;
} else if (kind == HOLEY_SMI_ELEMENTS) { } else if (kind == HOLEY_SMI_ELEMENTS) {
FixedArray source_store = FixedArray::cast(source.elements()); FixedArray source_store = FixedArray::cast(source.elements());
for (size_t i = 0; i < length; i++) { for (size_t i = 0; i < length; i++) {
if (source_store.is_the_hole(isolate, static_cast<int>(i))) { if (source_store.is_the_hole(isolate, static_cast<int>(i))) {
SetImpl(dest_data, i, FromObject(undefined)); SetImpl(dest_data + i, FromObject(undefined), destination_shared);
} else { } else {
Object elem = source_store.get(static_cast<int>(i)); Object elem = source_store.get(static_cast<int>(i));
SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem))); SetImpl(dest_data + i, FromScalar(Smi::ToInt(elem)),
destination_shared);
} }
} }
return true; return true;
...@@ -3685,17 +3732,17 @@ class TypedElementsAccessor ...@@ -3685,17 +3732,17 @@ class TypedElementsAccessor
// Use the from_double conversion for this specific TypedArray type, // Use the from_double conversion for this specific TypedArray type,
// rather than relying on C++ to convert elem. // rather than relying on C++ to convert elem.
double elem = source_store.get_scalar(static_cast<int>(i)); double elem = source_store.get_scalar(static_cast<int>(i));
SetImpl(dest_data, i, FromScalar(elem)); SetImpl(dest_data + i, FromScalar(elem), destination_shared);
} }
return true; return true;
} else if (kind == HOLEY_DOUBLE_ELEMENTS) { } else if (kind == HOLEY_DOUBLE_ELEMENTS) {
FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements()); FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements());
for (size_t i = 0; i < length; i++) { for (size_t i = 0; i < length; i++) {
if (source_store.is_the_hole(static_cast<int>(i))) { if (source_store.is_the_hole(static_cast<int>(i))) {
SetImpl(dest_data, i, FromObject(undefined)); SetImpl(dest_data + i, FromObject(undefined), destination_shared);
} else { } else {
double elem = source_store.get_scalar(static_cast<int>(i)); double elem = source_store.get_scalar(static_cast<int>(i));
SetImpl(dest_data, i, FromScalar(elem)); SetImpl(dest_data + i, FromScalar(elem), destination_shared);
} }
} }
return true; return true;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment