Commit b4e2a1e3 authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

[fastcall] Add fallback for 64-bit params on non-x64

This is a tentative fix for the linked issue. The CL enables all
int64/uint64 tests for fast API calls on all platforms.

Bug: chromium:1144751
Change-Id: Ie892ad625257d3b0e0bdd9ac24261b3cbeaaba62
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2520902
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71043}
parent 2a69a92c
......@@ -883,14 +883,13 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
class FastApiCallReducerAssembler : public JSCallReducerAssembler {
public:
FastApiCallReducerAssembler(
JSCallReducer* reducer, Node* node, Address c_function,
const CFunctionInfo* c_signature,
JSCallReducer* reducer, Node* node,
const FunctionTemplateInfoRef function_template_info, Node* receiver,
Node* holder, const SharedFunctionInfoRef shared, Node* target,
const int arity, Node* effect)
: JSCallReducerAssembler(reducer, node),
c_function_(c_function),
c_signature_(c_signature),
c_function_(function_template_info.c_function()),
c_signature_(function_template_info.c_signature()),
function_template_info_(function_template_info),
receiver_(receiver),
holder_(holder),
......@@ -3468,6 +3467,40 @@ bool HasFPParamsInSignature(const CFunctionInfo* c_signature) {
} // namespace
#endif
#ifndef V8_TARGET_ARCH_64_BIT
namespace {
bool Has64BitIntegerParamsInSignature(const CFunctionInfo* c_signature) {
for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kInt64 ||
c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kUint64) {
return true;
}
}
return false;
}
} // namespace
#endif
bool CanOptimizeFastCall(
const FunctionTemplateInfoRef& function_template_info) {
const CFunctionInfo* c_signature = function_template_info.c_signature();
bool optimize_to_fast_call =
FLAG_turbo_fast_api_calls &&
function_template_info.c_function() != kNullAddress;
#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
optimize_to_fast_call =
optimize_to_fast_call && !HasFPParamsInSignature(c_signature);
#else
USE(c_signature);
#endif
#ifndef V8_TARGET_ARCH_64_BIT
optimize_to_fast_call =
optimize_to_fast_call && !Has64BitIntegerParamsInSignature(c_signature);
#endif
return optimize_to_fast_call;
}
Reduction JSCallReducer::ReduceCallApiFunction(
Node* node, const SharedFunctionInfoRef& shared) {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
......@@ -3639,19 +3672,9 @@ Reduction JSCallReducer::ReduceCallApiFunction(
return NoChange();
}
Address c_function = function_template_info.c_function();
bool optimize_to_fast_call =
FLAG_turbo_fast_api_calls && c_function != kNullAddress;
const CFunctionInfo* c_signature = function_template_info.c_signature();
#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
optimize_to_fast_call =
optimize_to_fast_call && !HasFPParamsInSignature(c_signature);
#endif
if (optimize_to_fast_call) {
FastApiCallReducerAssembler a(this, node, c_function, c_signature,
function_template_info, receiver, holder,
shared, target, argc, effect);
if (CanOptimizeFastCall(function_template_info)) {
FastApiCallReducerAssembler a(this, node, function_template_info, receiver,
holder, shared, target, argc, effect);
Node* fast_call_subgraph = a.ReduceFastApiCall();
ReplaceWithSubgraph(&a, fast_call_subgraph);
......
......@@ -28149,33 +28149,9 @@ TEST(FastApiCalls) {
CallAndCheck<uint32_t>(3, Behavior::kNoException,
ApiCheckerResult::kFastCalled, v8_num(3.14));
// Both 32- and 64-bit platforms should execute the following tests
// through the slow path.
// Corner cases - int64
#ifdef V8_TARGET_ARCH_X64
CallAndCheck<int64_t>(static_cast<int64_t>(i::Smi::kMaxValue) + 1,
Behavior::kNoException, ApiCheckerResult::kFastCalled,
v8_num(static_cast<int64_t>(i::Smi::kMaxValue) + 1));
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
Behavior::kNoException, ApiCheckerResult::kFastCalled,
v8_num(std::numeric_limits<int64_t>::min()));
CallAndCheck<int64_t>(1ll << 62, Behavior::kNoException,
ApiCheckerResult::kFastCalled, v8_num(1ll << 62));
CallAndCheck<int64_t>(i::kMaxSafeInteger, Behavior::kNoException,
ApiCheckerResult::kFastCalled,
v8_num(i::kMaxSafeInteger));
CallAndCheck<int64_t>(-i::kMaxSafeInteger, Behavior::kNoException,
ApiCheckerResult::kFastCalled,
v8_num(-i::kMaxSafeInteger));
CallAndCheck<int64_t>((1ull << 63) - 1024, Behavior::kNoException,
ApiCheckerResult::kFastCalled,
v8_num((1ull << 63) - 1024));
// TODO(mslekova): We deopt for unsafe integers, but ultimately we want to
// stay on the fast path.
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63)));
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(1ull << 63));
CallAndCheck<int64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled, v8_num(std::pow(2, 65)));
CallAndCheck<int64_t>(8192, Behavior::kNoException,
......@@ -28184,8 +28160,6 @@ TEST(FastApiCalls) {
CallAndCheck<int64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(std::pow(2, 1023)));
CallAndCheck<int64_t>(0, Behavior::kNoException,
ApiCheckerResult::kFastCalled, v8_num(-0.0));
CallAndCheck<int64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(std::numeric_limits<double>::quiet_NaN()));
......@@ -28206,20 +28180,10 @@ TEST(FastApiCalls) {
v8::BigInt::New(isolate, 42));
CallAndCheck<int64_t>(3, Behavior::kNoException,
ApiCheckerResult::kSlowCalled, v8_num(3.14));
CallAndCheck<int64_t>(
std::numeric_limits<int64_t>::min(), Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(std::numeric_limits<int64_t>::max()) + 3.14));
CallAndCheck<int64_t>(
0, Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(std::numeric_limits<int64_t>::max()) * 2 +
3.14));
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63)));
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(-static_cast<double>(1ll << 63)));
CallAndCheck<int64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63) * 2));
......@@ -28231,28 +28195,6 @@ TEST(FastApiCalls) {
v8_num(static_cast<double>(1ull << 63) * 3 + 4096));
// Corner cases - uint64_t
CallAndCheck<uint64_t>(static_cast<uint64_t>(i::Smi::kMaxValue) + 1,
Behavior::kNoException, ApiCheckerResult::kFastCalled,
v8_num(static_cast<uint64_t>(i::Smi::kMaxValue) + 1));
CallAndCheck<uint64_t>(std::numeric_limits<uint64_t>::min(),
Behavior::kNoException, ApiCheckerResult::kFastCalled,
v8_num(std::numeric_limits<uint64_t>::min()));
CallAndCheck<uint64_t>(1ll << 62, Behavior::kNoException,
ApiCheckerResult::kFastCalled, v8_num(1ll << 62));
CallAndCheck<uint64_t>(
std::numeric_limits<uint64_t>::max() - ((1ll << 62) - 1),
Behavior::kNoException, ApiCheckerResult::kFastCalled,
v8_num(-(1ll << 62)));
CallAndCheck<uint64_t>(i::kMaxSafeIntegerUint64, Behavior::kNoException,
ApiCheckerResult::kFastCalled,
v8_num(i::kMaxSafeInteger));
CallAndCheck<uint64_t>(
std::numeric_limits<uint64_t>::max() - (i::kMaxSafeIntegerUint64 - 1),
Behavior::kNoException, ApiCheckerResult::kFastCalled,
v8_num(-i::kMaxSafeInteger));
CallAndCheck<uint64_t>(1ull << 63, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63)));
CallAndCheck<uint64_t>(static_cast<double>(1ull << 63) * 2 - 2048,
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63) * 2 - 2048));
......@@ -28261,8 +28203,6 @@ TEST(FastApiCalls) {
CallAndCheck<uint64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63) * 2));
CallAndCheck<uint64_t>(0, Behavior::kNoException,
ApiCheckerResult::kFastCalled, v8_num(-0.0));
CallAndCheck<uint64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(std::numeric_limits<double>::quiet_NaN()));
......@@ -28290,7 +28230,87 @@ TEST(FastApiCalls) {
CallAndCheck<uint64_t>(static_cast<double>(1ull << 63) + 4096,
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63) * 3 + 4096));
#endif // V8_TARGET_ARCH_X64
// The following int64/uint64 tests are platform-dependent, because Turbofan
// currently doesn't support 64-bit integers on 32-bit architectures. So if
// we attempt to follow the fast path on them, this leads to unsupported
// situations, e.g. attempting to call IA32OperandConverter::ToImmediate
// for a 64-bit operand.
#ifdef V8_TARGET_ARCH_64_BIT
ApiCheckerResult expected_path_for_64bit_test = ApiCheckerResult::kFastCalled;
#else
ApiCheckerResult expected_path_for_64bit_test = ApiCheckerResult::kSlowCalled;
#endif
// Corner cases - int64
CallAndCheck<int64_t>(static_cast<int64_t>(i::Smi::kMaxValue) + 1,
Behavior::kNoException, expected_path_for_64bit_test,
v8_num(static_cast<int64_t>(i::Smi::kMaxValue) + 1));
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
Behavior::kNoException, expected_path_for_64bit_test,
v8_num(std::numeric_limits<int64_t>::min()));
CallAndCheck<int64_t>(1ll << 62, Behavior::kNoException,
expected_path_for_64bit_test, v8_num(1ll << 62));
CallAndCheck<int64_t>(i::kMaxSafeInteger, Behavior::kNoException,
expected_path_for_64bit_test,
v8_num(i::kMaxSafeInteger));
CallAndCheck<int64_t>(-i::kMaxSafeInteger, Behavior::kNoException,
expected_path_for_64bit_test,
v8_num(-i::kMaxSafeInteger));
CallAndCheck<int64_t>((1ull << 63) - 1024, Behavior::kNoException,
expected_path_for_64bit_test,
v8_num((1ull << 63) - 1024));
CallAndCheck<int64_t>(0, Behavior::kNoException, expected_path_for_64bit_test,
v8_num(-0.0));
// Corner cases - uint64_t
CallAndCheck<uint64_t>(static_cast<uint64_t>(i::Smi::kMaxValue) + 1,
Behavior::kNoException, expected_path_for_64bit_test,
v8_num(static_cast<uint64_t>(i::Smi::kMaxValue) + 1));
CallAndCheck<uint64_t>(std::numeric_limits<uint64_t>::min(),
Behavior::kNoException, expected_path_for_64bit_test,
v8_num(std::numeric_limits<uint64_t>::min()));
CallAndCheck<uint64_t>(1ll << 62, Behavior::kNoException,
expected_path_for_64bit_test, v8_num(1ll << 62));
CallAndCheck<uint64_t>(
std::numeric_limits<uint64_t>::max() - ((1ll << 62) - 1),
Behavior::kNoException, expected_path_for_64bit_test,
v8_num(-(1ll << 62)));
CallAndCheck<uint64_t>(i::kMaxSafeIntegerUint64, Behavior::kNoException,
expected_path_for_64bit_test,
v8_num(i::kMaxSafeInteger));
CallAndCheck<uint64_t>(
std::numeric_limits<uint64_t>::max() - (i::kMaxSafeIntegerUint64 - 1),
Behavior::kNoException, expected_path_for_64bit_test,
v8_num(-i::kMaxSafeInteger));
CallAndCheck<uint64_t>(0, Behavior::kNoException,
expected_path_for_64bit_test, v8_num(-0.0));
#ifndef V8_TARGET_ARCH_ARM64
// TODO(mslekova): We deopt for unsafe integers, but ultimately we want to
// stay on the fast path.
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(1ull << 63));
CallAndCheck<int64_t>(
std::numeric_limits<int64_t>::min(), Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(std::numeric_limits<int64_t>::max()) + 3.14));
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63)));
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(-static_cast<double>(1ll << 63)));
CallAndCheck<uint64_t>(1ull << 63, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63)));
#else
// TODO(v8:11121): Currently the tests above are executed for non-arm64
// because they fall down the fast path due to incorrect behaviour of
// CheckedFloat64ToInt64 on arm64 (see the linked issue for details).
// Eventually we want to remove the conditional compilation and ensure
// consistent behaviour on all platforms.
#endif // V8_TARGET_ARCH_ARM64
// Corner cases - float and double
#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment