Commit d4b29d75 authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

[turbofan] Fix CheckedFloat64ToInt64 on arm64

This CL extends the TruncateFloat64ToInt64 machine operator with a
TruncateKind, allowing EffectControlLinearizer to request truncating
to INT64_MIN in case of overflow. The CL adds the necessary low-level
support when generating code for kArm64Float64ToInt64. It also enables
relevant tests as part of the fast API call suite.

Bug: v8:11121
Change-Id: I0cb9964cc3c2ff49e6b0bbfe4a20f280e4aab337
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2560718Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71449}
parent 03758904
......@@ -1645,15 +1645,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cset(i.OutputRegister(1), vc);
}
break;
case kArm64Float64ToInt64:
case kArm64Float64ToInt64: {
__ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0));
if (i.OutputCount() > 1) {
bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode());
DCHECK_IMPLIES(set_overflow_to_min_i64, i.OutputCount() == 1);
if (set_overflow_to_min_i64) {
// Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
// because INT64_MIN allows easier out-of-bounds detection.
__ Cmn(i.OutputRegister64(), 1);
__ Csinc(i.OutputRegister64(), i.OutputRegister64(),
i.OutputRegister64(), vc);
} else if (i.OutputCount() > 1) {
// See kArm64Float32ToInt64 for a detailed description.
__ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN));
__ Ccmp(i.OutputRegister(0), -1, VFlag, ge);
__ Cset(i.OutputRegister(1), vc);
}
break;
}
case kArm64Float32ToUint64:
__ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0));
if (i.OutputCount() > 1) {
......
......@@ -1389,7 +1389,6 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(ChangeFloat64ToInt64, kArm64Float64ToInt64) \
V(ChangeFloat64ToUint32, kArm64Float64ToUint32) \
V(ChangeFloat64ToUint64, kArm64Float64ToUint64) \
V(TruncateFloat64ToInt64, kArm64Float64ToInt64) \
V(TruncateFloat64ToUint32, kArm64Float64ToUint32) \
V(TruncateFloat64ToFloat32, kArm64Float64ToFloat32) \
V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
......@@ -1807,6 +1806,18 @@ void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
}
void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
Arm64OperandGenerator g(this);
InstructionCode opcode = kArm64Float64ToInt64;
TruncateKind kind = OpParameter<TruncateKind>(node->op());
if (kind == TruncateKind::kSetOverflowToMin) {
opcode |= MiscField::encode(true);
}
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
Arm64OperandGenerator g(this);
......
......@@ -2607,7 +2607,12 @@ Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
Node* EffectControlLinearizer::BuildCheckedFloat64ToIndex(
const FeedbackSource& feedback, Node* value, Node* frame_state) {
if (machine()->Is64()) {
Node* value64 = __ TruncateFloat64ToInt64(value);
Node* value64 =
__ TruncateFloat64ToInt64(value, TruncateKind::kArchitectureDefault);
// The TruncateKind above means there will be a precision loss in case
// INT64_MAX input is passed, but that precision loss would not be
// detected and would not lead to a deoptimization from the first check.
// But in this case, we'll deopt anyway because of the following checks.
Node* check_same = __ Float64Equal(value, __ ChangeInt64ToFloat64(value64));
__ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
check_same, frame_state);
......@@ -2641,7 +2646,8 @@ Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
Node* EffectControlLinearizer::BuildCheckedFloat64ToInt64(
CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value,
Node* frame_state) {
Node* value64 = __ TruncateFloat64ToInt64(value);
Node* value64 =
__ TruncateFloat64ToInt64(value, TruncateKind::kSetOverflowToMin);
Node* check_same = __ Float64Equal(value, __ ChangeInt64ToFloat64(value64));
__ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
check_same, frame_state);
......
......@@ -495,6 +495,11 @@ Node* GraphAssembler::Float64RoundTruncate(Node* value) {
graph()->NewNode(machine()->Float64RoundTruncate().op(), value));
}
Node* GraphAssembler::TruncateFloat64ToInt64(Node* value, TruncateKind kind) {
return AddNode(
graph()->NewNode(machine()->TruncateFloat64ToInt64(kind), value));
}
Node* GraphAssembler::Projection(int index, Node* value) {
return AddNode(
graph()->NewNode(common()->Projection(index), value, control()));
......
......@@ -48,7 +48,6 @@ class BasicBlock;
V(Float64SilenceNaN) \
V(RoundFloat64ToInt32) \
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt64) \
V(TruncateFloat64ToWord32) \
V(TruncateInt64ToInt32) \
V(Word32ReverseBytes) \
......@@ -284,6 +283,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* Float64RoundDown(Node* value);
Node* Float64RoundTruncate(Node* value);
Node* TruncateFloat64ToInt64(Node* value, TruncateKind kind);
Node* BitcastWordToTagged(Node* value);
Node* BitcastWordToTaggedSigned(Node* value);
......
......@@ -271,7 +271,6 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(ChangeFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
......@@ -1138,6 +1137,25 @@ const Operator* MachineOperatorBuilder::TruncateFloat32ToInt32(
}
}
template <TruncateKind kind>
struct TruncateFloat64ToInt64Operator : Operator1<TruncateKind> {
TruncateFloat64ToInt64Operator()
: Operator1(IrOpcode::kTruncateFloat64ToInt64, Operator::kPure,
"TruncateFloat64ToInt64", 1, 0, 0, 1, 0, 0, kind) {}
};
const Operator* MachineOperatorBuilder::TruncateFloat64ToInt64(
TruncateKind kind) {
switch (kind) {
case TruncateKind::kArchitectureDefault:
return GetCachedOperator<
TruncateFloat64ToInt64Operator<TruncateKind::kArchitectureDefault>>();
case TruncateKind::kSetOverflowToMin:
return GetCachedOperator<
TruncateFloat64ToInt64Operator<TruncateKind::kSetOverflowToMin>>();
}
}
size_t hash_value(TruncateKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
......
......@@ -465,7 +465,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* ChangeFloat64ToInt64();
const Operator* ChangeFloat64ToUint32(); // narrowing
const Operator* ChangeFloat64ToUint64();
const Operator* TruncateFloat64ToInt64();
const Operator* TruncateFloat64ToInt64(TruncateKind kind);
const Operator* TruncateFloat64ToUint32();
const Operator* TruncateFloat32ToInt32(TruncateKind kind);
const Operator* TruncateFloat32ToUint32(TruncateKind kind);
......
......@@ -28250,13 +28250,10 @@ TEST(FastApiCalls) {
CallAndCheck<uint64_t>(0, Behavior::kNoException,
expected_path_for_64bit_test, v8_num(-0.0));
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64)
// TODO(v8:11121): Currently the tests below are executed for non-arm64
// and non-mips64 because they fall down the fast path due to incorrect
// behaviour of CheckedFloat64ToInt64 on arm64 and mips64 (see the
// linked issue for details). Eventually we want to remove the conditional
// compilation and ensure consistent behaviour on all platforms.
#else
// TODO(v8:11121): Currently the tests below are successful only for
// non-mips64 because they fall down the fast path due to incorrect
// behaviour of CheckedFloat64ToInt64 on mips64 (see the
// linked issue for details). Please port the arm64 fix from v8:11121.
// TODO(mslekova): We deopt for unsafe integers, but ultimately we want to
// stay on the fast path.
CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
......@@ -28275,7 +28272,6 @@ TEST(FastApiCalls) {
CallAndCheck<uint64_t>(1ull << 63, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63)));
#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS64
// Corner cases - float and double
#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment