Commit 84d5b027 authored by Maya Lekova's avatar Maya Lekova Committed by V8 LUCI CQ

[fastcall] Implement support for TypedArray arguments

This CL adds TypedArrays as supported arguments for fast API calls.
It implements "exact type" matching, i.e. if Float32Array is expected
and e.g. Int32Array is passed instead, the generated code bails to the
slow callback.

Bug: chromium:1052746, chromium:1018624
Change-Id: I01d4e681d2b367cbb57b06effcb591c090a23295
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2999094
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75756}
parent cdb88251
......@@ -578,7 +578,7 @@ PRIMITIVE_C_TYPES(DEFINE_TYPE_INFO_TRAITS)
#define SPECIALIZE_GET_TYPE_INFO_HELPER_FOR_TA(T, Enum) \
template <> \
struct TypeInfoHelper<FastApiTypedArray<T>> { \
struct TypeInfoHelper<const FastApiTypedArray<T>&> { \
static constexpr CTypeInfo::Flags Flags() { \
return CTypeInfo::Flags::kNone; \
} \
......
......@@ -197,6 +197,9 @@ class EffectControlLinearizer {
void LowerTransitionElementsKind(Node* node);
Node* LowerLoadFieldByIndex(Node* node);
Node* LowerLoadMessage(Node* node);
Node* AdaptFastCallTypedArrayArgument(Node* node,
ElementsKind expected_elements_kind,
GraphAssemblerLabel<0>* bailout);
Node* AdaptFastCallArgument(Node* node, CTypeInfo arg_type,
GraphAssemblerLabel<0>* if_error);
......@@ -5004,16 +5007,102 @@ MachineType MachineTypeFor(CTypeInfo::Type type) {
}
} // namespace
Node* EffectControlLinearizer::AdaptFastCallTypedArrayArgument(
Node* node, ElementsKind expected_elements_kind,
GraphAssemblerLabel<0>* bailout) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
Node* value_is_typed_array = __ Word32Equal(
value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE));
__ GotoIfNot(value_is_typed_array, bailout);
Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), value_map);
Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
Node* andit = __ Word32And(bit_field2, mask);
Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
Node* kind = __ Word32Shr(andit, shift);
Node* value_is_expected_elements_kind =
__ Word32Equal(kind, __ Int32Constant(expected_elements_kind));
__ GotoIfNot(value_is_expected_elements_kind, bailout);
Node* buffer =
__ LoadField(AccessBuilder::ForJSArrayBufferViewBuffer(), node);
Node* buffer_bit_field =
__ LoadField(AccessBuilder::ForJSArrayBufferBitField(), buffer);
// Go to the slow path if the {buffer} was detached.
Node* buffer_is_not_detached = __ Word32Equal(
__ Word32And(buffer_bit_field,
__ Int32Constant(JSArrayBuffer::WasDetachedBit::kMask)),
__ ZeroConstant());
__ GotoIfNot(buffer_is_not_detached, bailout);
// Go to the slow path if the {buffer} is shared.
Node* buffer_is_not_shared = __ Word32Equal(
__ Word32And(buffer_bit_field,
__ Int32Constant(JSArrayBuffer::IsSharedBit::kMask)),
__ ZeroConstant());
__ GotoIfNot(buffer_is_not_shared, bailout);
// Unpack the store and length, and store them to a struct
// FastApiTypedArray.
Node* external_pointer =
__ LoadField(AccessBuilder::ForJSTypedArrayExternalPointer(), node);
// Load the base pointer for the buffer. This will always be Smi
// zero unless we allow on-heap TypedArrays, which is only the case
// for Chrome. Node and Electron both set this limit to 0. Setting
// the base to Smi zero here allows the BuildTypedArrayDataPointer
// to optimize away the tricky part of the access later.
Node* base_pointer =
__ LoadField(AccessBuilder::ForJSTypedArrayBasePointer(), node);
if (JSTypedArray::kMaxSizeInHeap == 0) {
base_pointer = jsgraph()->ZeroConstant();
}
Node* data_ptr = BuildTypedArrayDataPointer(base_pointer, external_pointer);
Node* length_in_bytes =
__ LoadField(AccessBuilder::ForJSTypedArrayLength(), node);
// We hard-code int32_t here, because all specializations of
// FastApiTypedArray have the same size.
constexpr int kAlign = alignof(FastApiTypedArray<int32_t>);
constexpr int kSize = sizeof(FastApiTypedArray<int32_t>);
static_assert(kAlign == alignof(FastApiTypedArray<double>),
"Alignment mismatch between different specializations of "
"FastApiTypedArray");
static_assert(kSize == sizeof(FastApiTypedArray<double>),
"Size mismatch between different specializations of "
"FastApiTypedArray");
static_assert(
kSize == sizeof(uintptr_t) + sizeof(size_t),
"The size of "
"FastApiTypedArray isn't equal to the sum of its expected members.");
Node* stack_slot = __ StackSlot(kSize, kAlign);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, data_ptr);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, sizeof(uintptr_t), length_in_bytes);
static_assert(sizeof(uintptr_t) == sizeof(size_t),
"The buffer length can't "
"fit the PointerRepresentation used to store it.");
return stack_slot;
}
Node* EffectControlLinearizer::AdaptFastCallArgument(
Node* node, CTypeInfo arg_type, GraphAssemblerLabel<0>* if_error) {
int kAlign = alignof(uintptr_t);
int kSize = sizeof(uintptr_t);
switch (arg_type.GetSequenceType()) {
case CTypeInfo::SequenceType::kScalar: {
switch (arg_type.GetType()) {
case CTypeInfo::Type::kV8Value: {
int kAlign = alignof(uintptr_t);
int kSize = sizeof(uintptr_t);
Node* stack_slot = __ StackSlot(kSize, kAlign);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, node);
......@@ -5035,10 +5124,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
Node* value_is_smi = ObjectIsSmi(node);
__ GotoIf(value_is_smi, if_error);
int kAlign = alignof(uintptr_t);
int kSize = sizeof(uintptr_t);
Node* stack_slot = __ StackSlot(kSize, kAlign);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, node);
......@@ -5053,9 +5139,15 @@ Node* EffectControlLinearizer::AdaptFastCallArgument(
return stack_slot;
}
case CTypeInfo::SequenceType::kIsTypedArray:
// TODO(mslekova): Implement typed arrays.
return node;
case CTypeInfo::SequenceType::kIsTypedArray: {
// Check that the value is a HeapObject.
Node* value_is_smi = ObjectIsSmi(node);
__ GotoIf(value_is_smi, if_error);
return AdaptFastCallTypedArrayArgument(
node, fast_api_call::GetTypedArrayElementsKind(arg_type.GetType()),
if_error);
}
default: {
UNREACHABLE();
}
......@@ -5069,14 +5161,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
GraphAssemblerLabel<0>* if_error) {
static constexpr int kReceiver = 1;
auto merge = __ MakeLabel(MachineRepresentation::kTagged);
int kAlign = alignof(uintptr_t);
int kSize = sizeof(uintptr_t);
Node* stack_slot = __ StackSlot(kSize, kAlign);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, node);
auto merge = __ MakeLabel(MachineRepresentation::kTagged,
MachineRepresentation::kTagged);
for (size_t func_index = 0; func_index < c_functions.size(); func_index++) {
const CFunctionInfo* c_signature = c_functions[func_index].signature;
......@@ -5101,34 +5187,31 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
value_instance_type, __ Int32Constant(JS_ARRAY_TYPE));
__ GotoIfNot(value_is_js_array, &next);
int kAlign = alignof(uintptr_t);
int kSize = sizeof(uintptr_t);
Node* stack_slot = __ StackSlot(kSize, kAlign);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot, 0, node);
Node* target_address = __ ExternalConstant(
ExternalReference::Create(c_functions[func_index].address));
__ Goto(&merge, target_address);
__ Goto(&merge, target_address, stack_slot);
break;
}
case CTypeInfo::SequenceType::kIsTypedArray: {
// Check that the value is a TypedArray with a type that matches the
// type declared in the c-function.
ElementsKind typed_array_elements_kind =
Node* stack_slot = AdaptFastCallTypedArrayArgument(
node,
fast_api_call::GetTypedArrayElementsKind(
overloads_resolution_result.element_type);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
Node* value_bit_field2 =
__ LoadField(AccessBuilder::ForMapBitField2(), value_map);
Node* value_elements_kind = __ WordShr(
__ WordAnd(value_bit_field2,
__ Int32Constant(Map::Bits2::ElementsKindBits::kMask)),
__ Int32Constant(Map::Bits2::ElementsKindBits::kShift));
Node* is_same_kind = __ Word32Equal(
value_elements_kind,
__ Int32Constant(GetPackedElementsKind(typed_array_elements_kind)));
__ GotoIfNot(is_same_kind, &next);
overloads_resolution_result.element_type),
&next);
Node* target_address = __ ExternalConstant(
ExternalReference::Create(c_functions[func_index].address));
__ Goto(&merge, target_address);
__ Goto(&merge, target_address, stack_slot);
break;
}
......@@ -5142,7 +5225,7 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
__ Goto(if_error);
__ Bind(&merge);
return {merge.PhiAt(0), stack_slot};
return {merge.PhiAt(0), merge.PhiAt(1)};
}
Node* EffectControlLinearizer::WrapFastCall(
......
......@@ -140,16 +140,20 @@ class FastCApiObject {
FastCApiObject* self = UnwrapObject(args.This());
CHECK_SELF_OR_THROW();
self->slow_call_count_++;
HandleScope handle_scope(isolate);
if (args.Length() < 2) {
self->slow_call_count_++;
isolate->ThrowError("This method expects at least 2 arguments.");
return;
}
if (args[1]->IsTypedArray()) {
// Not supported yet.
AddAllTypedArraySlowCallback(args);
return;
}
self->slow_call_count_++;
if (args[1]->IsUndefined()) {
Type dummy_result = 0;
args.GetReturnValue().Set(Number::New(isolate, dummy_result));
return;
......@@ -181,13 +185,11 @@ class FastCApiObject {
}
args.GetReturnValue().Set(Number::New(isolate, sum));
}
// TODO(mslekova) - The typed array param should be a
// {size_t length, uint32_t* data}
static Type AddAllTypedArrayFastCallback(Local<Object> receiver,
bool should_fallback,
Local<Uint32Array> typed_array_arg,
FastApiCallbackOptions& options) {
template <typename T>
static Type AddAllTypedArrayFastCallback(
Local<Object> receiver, bool should_fallback,
const FastApiTypedArray<T>& typed_array_arg,
FastApiCallbackOptions& options) {
FastCApiObject* self = UnwrapObject(receiver);
CHECK_SELF_OR_FALLBACK(0);
self->fast_call_count_++;
......@@ -197,12 +199,72 @@ class FastCApiObject {
return 0;
}
// Not implemented.
return 0;
if (!typed_array_arg.data) {
options.fallback = 1;
return 0;
}
T sum = 0;
for (unsigned i = 0; i < typed_array_arg.length; ++i) {
sum += typed_array_arg.data[i];
}
return static_cast<Type>(sum);
}
static void AddAllTypedArraySlowCallback(
const FunctionCallbackInfo<Value>& args) {
// Not implemented.
Isolate* isolate = args.GetIsolate();
FastCApiObject* self = UnwrapObject(args.This());
CHECK_SELF_OR_THROW();
self->slow_call_count_++;
HandleScope handle_scope(isolate);
if (args.Length() < 2) {
isolate->ThrowError("This method expects at least 2 arguments.");
return;
}
if (!args[1]->IsTypedArray()) {
isolate->ThrowError(
"This method expects a TypedArray as a second argument.");
return;
}
Local<TypedArray> typed_array_arg = args[1].As<TypedArray>();
size_t length = typed_array_arg->Length();
void* data = typed_array_arg->Buffer()->GetBackingStore()->Data();
if (typed_array_arg->IsInt32Array() || typed_array_arg->IsUint32Array() ||
typed_array_arg->IsBigInt64Array() ||
typed_array_arg->IsBigUint64Array()) {
int64_t sum = 0;
for (unsigned i = 0; i < length; ++i) {
if (typed_array_arg->IsInt32Array()) {
sum += static_cast<int32_t*>(data)[i];
} else if (typed_array_arg->IsUint32Array()) {
sum += static_cast<uint32_t*>(data)[i];
} else if (typed_array_arg->IsBigInt64Array()) {
sum += static_cast<int64_t*>(data)[i];
} else if (typed_array_arg->IsBigUint64Array()) {
sum += static_cast<uint64_t*>(data)[i];
}
}
args.GetReturnValue().Set(Number::New(isolate, sum));
} else if (typed_array_arg->IsFloat32Array() ||
typed_array_arg->IsFloat64Array()) {
double sum = 0;
for (unsigned i = 0; i < length; ++i) {
if (typed_array_arg->IsInt32Array()) {
sum += static_cast<float*>(data)[i];
} else if (typed_array_arg->IsUint32Array()) {
sum += static_cast<double*>(data)[i];
}
}
args.GetReturnValue().Set(Number::New(isolate, sum));
} else {
isolate->ThrowError("TypedArray type is not supported.");
return;
}
}
static int32_t AddAllIntInvalidCallback(Local<Object> receiver,
......@@ -472,17 +534,46 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_seq_c_func));
CFunction add_all_typed_array_c_func =
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback);
CFunction add_all_int32_typed_array_c_func =
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<int32_t>);
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_int32_typed_array",
FunctionTemplate::New(
isolate, FastCApiObject::AddAllTypedArraySlowCallback,
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_int32_typed_array_c_func));
CFunction add_all_uint32_typed_array_c_func =
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<uint32_t>);
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_uint32_typed_array",
FunctionTemplate::New(
isolate, FastCApiObject::AddAllTypedArraySlowCallback,
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect,
&add_all_uint32_typed_array_c_func));
CFunction add_all_int64_typed_array_c_func =
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<int64_t>);
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_int64_typed_array",
FunctionTemplate::New(
isolate, FastCApiObject::AddAllTypedArraySlowCallback,
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_int64_typed_array_c_func));
CFunction add_all_uint64_typed_array_c_func =
CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<uint64_t>);
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_typed_array",
isolate, "add_all_uint64_typed_array",
FunctionTemplate::New(
isolate, FastCApiObject::AddAllTypedArraySlowCallback,
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_typed_array_c_func));
SideEffectType::kHasSideEffect,
&add_all_uint64_typed_array_c_func));
const CFunction add_all_overloads[] = {
add_all_typed_array_c_func,
add_all_uint32_typed_array_c_func,
add_all_seq_c_func,
};
api_obj_ctor->PrototypeTemplate()->Set(
......
......@@ -28737,7 +28737,7 @@ TEST(FastApiCalls) {
#ifndef V8_LITE_MODE
namespace {
void FastCallback1TypedArray(v8::Local<v8::Object> receiver, int arg0,
v8::FastApiTypedArray<double> arg1) {
const v8::FastApiTypedArray<double>& arg1) {
// TODO(mslekova): Use the TypedArray parameter
}
......@@ -163,7 +163,7 @@ assertEquals(add_32bit_int_result, add_32bit_int_mismatch(false, -42, 45));
assertOptimized(add_32bit_int_mismatch);
// Test that passing too few argument falls down the slow path,
// because it's an argument type mismatch (undefined vs. int).
// because one of the arguments is undefined.
fast_c_api.reset_counts();
assertEquals(-42, add_32bit_int_mismatch(false, -42));
assertUnoptimized(add_32bit_int_mismatch);
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
// Helper for sequence tests.
function optimize_and_check(func, fast_count, slow_count, expected) {
%PrepareFunctionForOptimization(func);
let result = func();
assertEquals(expected, result);
fast_c_api.reset_counts();
%OptimizeFunctionOnNextCall(func);
result = func();
assertEquals(expected, result);
assertOptimized(func);
assertEquals(fast_count, fast_c_api.fast_call_count());
assertEquals(slow_count, fast_c_api.slow_call_count());
}
function ExpectFastCall(func, expected) {
optimize_and_check(func, 1, 0, expected);
}
function ExpectSlowCall(func, expected) {
optimize_and_check(func, 0, 1, expected);
}
function assert_throws_and_optimized(func, arg) {
fast_c_api.reset_counts();
assertThrows(() => func(arg));
assertOptimized(func);
assertEquals(0, fast_c_api.fast_call_count());
assertEquals(1, fast_c_api.slow_call_count());
}
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file adds x64 specific tests to the ones in fast-api-sequence.js.
// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
// --always-opt is disabled because we rely on particular feedback for
// optimizing to the fastest path.
// Flags: --no-always-opt
// The test relies on optimizing/deoptimizing at predictable moments, so
// it's not suitable for deoptimization fuzzing.
// Flags: --deopt-every-n-times=0
d8.file.execute('test/mjsunit/compiler/fast-api-helpers.js');
const fast_c_api = new d8.test.FastCAPI();
assertTrue(fast_c_api.supports_fp_params);
(function () {
const max_safe_float = 2 ** 24 - 1;
const add_all_result = -42 + 45 +
Number.MIN_SAFE_INTEGER + Number.MAX_SAFE_INTEGER +
max_safe_float * 0.5 + Math.PI;
function add_all_sequence() {
const arr = [-42, 45,
Number.MIN_SAFE_INTEGER, Number.MAX_SAFE_INTEGER,
max_safe_float * 0.5, Math.PI];
return fast_c_api.add_all_sequence(false /* should_fallback */, arr);
}
ExpectFastCall(add_all_sequence, add_all_result);
})();
const max_safe_as_bigint = BigInt(Number.MAX_SAFE_INTEGER);
(function () {
function int64_test(should_fallback = false) {
let typed_array = new BigInt64Array([-42n, 1n, max_safe_as_bigint]);
return fast_c_api.add_all_int64_typed_array(false /* should_fallback */,
typed_array);
}
const expected = Number(BigInt.asIntN(64, -42n + 1n + max_safe_as_bigint));
ExpectFastCall(int64_test, expected);
})();
(function () {
function uint64_test(should_fallback = false) {
let typed_array = new BigUint64Array([max_safe_as_bigint, 1n, 2n]);
return fast_c_api.add_all_uint64_typed_array(false /* should_fallback */,
typed_array);
}
const expected = Number(BigInt.asUintN(64, max_safe_as_bigint + 1n + 2n));
ExpectFastCall(uint64_test, expected);
})();
This diff is collapsed.
......@@ -1660,4 +1660,10 @@
'wasm/shared-memory-gc-stress': [SKIP],
}], # third_party_heap
##############################################################################
['arch != x64', {
# Tests that include types only supported on x64.
'compiler/fast-api-sequences-x64': [SKIP],
}], # arch != x64
]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment