Commit ca54b833 authored by Nico Hartmann's avatar Nico Hartmann Committed by Commit Bot

[turbofan] Fix lost exception on BigInt ops

Speculative BigInt addition fails to throw the expected exception
when called with non-BigInt inputs when the result of the computation
is unused. In paricular, this CL does:
 - Remove kNoThrow on speculative BigInt operators
 - Fix AddWithFeedback to not lose type feedback if builtin throws
   to elide existing deopt loops
 - Add handling of TypeCheckKind in RepresentationChanger where this
   was previously ignored

Bug: chromium:1073440
Change-Id: I953a5b790fc3b37a6824f0b6546a0488c51fbb3b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2228493Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Reviewed-by: 's avatarMythri Alle <mythria@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Commit-Queue: Mythri Alle <mythria@chromium.org>
Auto-Submit: Nico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68181}
parent a69f31c1
......@@ -211,6 +211,7 @@ Node* RepresentationChanger::GetRepresentationFor(
return GetFloat32RepresentationFor(node, output_rep, output_type,
use_info.truncation());
case MachineRepresentation::kFloat64:
DCHECK_NE(TypeCheckKind::kBigInt, use_info.type_check());
return GetFloat64RepresentationFor(node, output_rep, output_type,
use_node, use_info);
case MachineRepresentation::kBit:
......@@ -403,7 +404,22 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
node);
} else if (output_rep == MachineRepresentation::kBit) {
}
if (use_info.type_check() == TypeCheckKind::kBigInt &&
!output_type.Is(Type::BigInt())) {
// BigInt checks can only be performed on tagged representations. Note that
// a corresponding check is inserted down below.
if (!CanBeTaggedPointer(output_rep)) {
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
unreachable);
}
}
if (output_rep == MachineRepresentation::kBit) {
if (output_type.Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
} else {
......@@ -428,7 +444,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
op = machine()->ChangeInt64ToFloat64();
node = jsgraph()->graph()->NewNode(op, node);
op = simplified()->ChangeFloat64ToTaggedPointer();
} else if (output_type.Is(Type::BigInt())) {
} else if (output_type.Is(Type::BigInt()) &&
use_info.type_check() == TypeCheckKind::kBigInt) {
op = simplified()->ChangeUint64ToBigInt();
} else {
return TypeError(node, output_rep, output_type,
......@@ -1058,12 +1075,14 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
case IrOpcode::kFloat64Constant:
UNREACHABLE();
case IrOpcode::kNumberConstant: {
double const fv = OpParameter<double>(node->op());
using limits = std::numeric_limits<int64_t>;
if (fv <= limits::max() && fv >= limits::min()) {
int64_t const iv = static_cast<int64_t>(fv);
if (static_cast<double>(iv) == fv) {
return jsgraph()->Int64Constant(iv);
if (use_info.type_check() != TypeCheckKind::kBigInt) {
double const fv = OpParameter<double>(node->op());
using limits = std::numeric_limits<int64_t>;
if (fv <= limits::max() && fv >= limits::min()) {
int64_t const iv = static_cast<int64_t>(fv);
if (static_cast<double>(iv) == fv) {
return jsgraph()->Int64Constant(iv);
}
}
}
break;
......@@ -1082,6 +1101,19 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
break;
}
if (use_info.type_check() == TypeCheckKind::kBigInt) {
// BigInts are only represented as tagged pointer and word64.
if (!CanBeTaggedPointer(output_rep) &&
output_rep != MachineRepresentation::kWord64) {
DCHECK(!output_type.Is(Type::BigInt()));
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
unreachable);
}
}
// Select the correct X -> Word64 operator.
const Operator* op;
if (output_type.Is(Type::None())) {
......@@ -1092,6 +1124,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
CHECK(output_type.Is(Type::Boolean()));
CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
CHECK_NE(use_info.type_check(), TypeCheckKind::kNumberOrOddball);
CHECK_NE(use_info.type_check(), TypeCheckKind::kBigInt);
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
return jsgraph()->graph()->NewNode(
......
......@@ -2839,6 +2839,11 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeBigIntAdd: {
// TODO(nicohartmann@, chromium:1073440): There should be special
// handling for trunction.IsUnused() that correctly propagates deadness,
// but preserves type checking which may throw exceptions. Until this
// is fully supported, we lower to int64 operations but keep pushing
// type constraints.
if (truncation.IsUsedAsWord64()) {
VisitBinop<T>(
node, UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
......
......@@ -1507,16 +1507,15 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAdd(
BigIntOperationHint hint) {
return new (zone()) Operator1<BigIntOperationHint>(
IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable | Operator::kNoThrow,
IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable,
"SpeculativeBigIntAdd", 2, 1, 1, 1, 1, 0, hint);
}
const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntSubtract(
BigIntOperationHint hint) {
return new (zone()) Operator1<BigIntOperationHint>(
IrOpcode::kSpeculativeBigIntSubtract,
Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntSubtract", 2,
1, 1, 1, 1, 0, hint);
IrOpcode::kSpeculativeBigIntSubtract, Operator::kFoldable,
"SpeculativeBigIntSubtract", 2, 1, 1, 1, 1, 0, hint);
}
const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate(
......
......@@ -38,6 +38,7 @@ namespace internal {
V(MinusZero, "minus zero") \
V(NaN, "NaN") \
V(NoCache, "no cache") \
V(NotABigInt, "not a BigInt") \
V(NotAHeapNumber, "not a heap number") \
V(NotAJavaScriptObject, "not a JavaScript object") \
V(NotAJavaScriptObjectOrNullOrUndefined, \
......
......@@ -69,6 +69,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
// Not overflowed.
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
slot_id);
var_result = smi_result;
Goto(&end);
}
......@@ -116,6 +118,7 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
BIND(&do_fadd);
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
TNode<Float64T> value =
Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
......@@ -166,6 +169,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
&call_with_any_feedback);
var_type_feedback = SmiConstant(BinaryOperationFeedback::kString);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
slot_id);
var_result =
CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs);
......@@ -194,6 +199,7 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
Goto(&end);
BIND(&bigint_too_big);
......@@ -219,12 +225,12 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
BIND(&call_add_stub);
{
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs);
Goto(&end);
}
BIND(&end);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
return var_result.value();
}
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --opt --allow-natives-syntax --no-always-opt
function foo(n) {
try {
let v = 0n;
for (let i = 0; i < n; ++i) {
v = 3n + v;
v = i;
}
} catch (e) {
return 1;
}
return 0;
}
%PrepareFunctionForOptimization(foo);
assertEquals(foo(1), 0);
assertEquals(foo(1), 0);
%OptimizeFunctionOnNextCall(foo);
assertEquals(foo(1), 0);
assertOptimized(foo);
%PrepareFunctionForOptimization(foo);
assertEquals(foo(2), 1);
assertUnoptimized(foo);
// Check that we learned something and do not loop deoptimizations.
%OptimizeFunctionOnNextCall(foo);
assertEquals(foo(1), 0);
assertOptimized(foo);
assertEquals(foo(2), 1);
assertOptimized(foo);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment