Commit ca54b833 authored by Nico Hartmann's avatar Nico Hartmann Committed by Commit Bot

[turbofan] Fix lost exception on BigInt ops

Speculative BigInt addition fails to throw the expected exception
when called with non-BigInt inputs when the result of the computation
is unused. In paricular, this CL does:
 - Remove kNoThrow on speculative BigInt operators
 - Fix AddWithFeedback to not lose type feedback if builtin throws
   to elide existing deopt loops
 - Add handling of TypeCheckKind in RepresentationChanger where this
   was previously ignored

Bug: chromium:1073440
Change-Id: I953a5b790fc3b37a6824f0b6546a0488c51fbb3b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2228493Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Reviewed-by: 's avatarMythri Alle <mythria@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Commit-Queue: Mythri Alle <mythria@chromium.org>
Auto-Submit: Nico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68181}
parent a69f31c1
...@@ -211,6 +211,7 @@ Node* RepresentationChanger::GetRepresentationFor( ...@@ -211,6 +211,7 @@ Node* RepresentationChanger::GetRepresentationFor(
return GetFloat32RepresentationFor(node, output_rep, output_type, return GetFloat32RepresentationFor(node, output_rep, output_type,
use_info.truncation()); use_info.truncation());
case MachineRepresentation::kFloat64: case MachineRepresentation::kFloat64:
DCHECK_NE(TypeCheckKind::kBigInt, use_info.type_check());
return GetFloat64RepresentationFor(node, output_rep, output_type, return GetFloat64RepresentationFor(node, output_rep, output_type,
use_node, use_info); use_node, use_info);
case MachineRepresentation::kBit: case MachineRepresentation::kBit:
...@@ -403,7 +404,22 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor( ...@@ -403,7 +404,22 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
return jsgraph()->graph()->NewNode( return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer), jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
node); node);
} else if (output_rep == MachineRepresentation::kBit) { }
if (use_info.type_check() == TypeCheckKind::kBigInt &&
!output_type.Is(Type::BigInt())) {
// BigInt checks can only be performed on tagged representations. Note that
// a corresponding check is inserted down below.
if (!CanBeTaggedPointer(output_rep)) {
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
unreachable);
}
}
if (output_rep == MachineRepresentation::kBit) {
if (output_type.Is(Type::Boolean())) { if (output_type.Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged(); op = simplified()->ChangeBitToTagged();
} else { } else {
...@@ -428,7 +444,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor( ...@@ -428,7 +444,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
op = machine()->ChangeInt64ToFloat64(); op = machine()->ChangeInt64ToFloat64();
node = jsgraph()->graph()->NewNode(op, node); node = jsgraph()->graph()->NewNode(op, node);
op = simplified()->ChangeFloat64ToTaggedPointer(); op = simplified()->ChangeFloat64ToTaggedPointer();
} else if (output_type.Is(Type::BigInt())) { } else if (output_type.Is(Type::BigInt()) &&
use_info.type_check() == TypeCheckKind::kBigInt) {
op = simplified()->ChangeUint64ToBigInt(); op = simplified()->ChangeUint64ToBigInt();
} else { } else {
return TypeError(node, output_rep, output_type, return TypeError(node, output_rep, output_type,
...@@ -1058,12 +1075,14 @@ Node* RepresentationChanger::GetWord64RepresentationFor( ...@@ -1058,12 +1075,14 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
case IrOpcode::kFloat64Constant: case IrOpcode::kFloat64Constant:
UNREACHABLE(); UNREACHABLE();
case IrOpcode::kNumberConstant: { case IrOpcode::kNumberConstant: {
double const fv = OpParameter<double>(node->op()); if (use_info.type_check() != TypeCheckKind::kBigInt) {
using limits = std::numeric_limits<int64_t>; double const fv = OpParameter<double>(node->op());
if (fv <= limits::max() && fv >= limits::min()) { using limits = std::numeric_limits<int64_t>;
int64_t const iv = static_cast<int64_t>(fv); if (fv <= limits::max() && fv >= limits::min()) {
if (static_cast<double>(iv) == fv) { int64_t const iv = static_cast<int64_t>(fv);
return jsgraph()->Int64Constant(iv); if (static_cast<double>(iv) == fv) {
return jsgraph()->Int64Constant(iv);
}
} }
} }
break; break;
...@@ -1082,6 +1101,19 @@ Node* RepresentationChanger::GetWord64RepresentationFor( ...@@ -1082,6 +1101,19 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
break; break;
} }
if (use_info.type_check() == TypeCheckKind::kBigInt) {
// BigInts are only represented as tagged pointer and word64.
if (!CanBeTaggedPointer(output_rep) &&
output_rep != MachineRepresentation::kWord64) {
DCHECK(!output_type.Is(Type::BigInt()));
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
unreachable);
}
}
// Select the correct X -> Word64 operator. // Select the correct X -> Word64 operator.
const Operator* op; const Operator* op;
if (output_type.Is(Type::None())) { if (output_type.Is(Type::None())) {
...@@ -1092,6 +1124,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor( ...@@ -1092,6 +1124,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
CHECK(output_type.Is(Type::Boolean())); CHECK(output_type.Is(Type::Boolean()));
CHECK_NE(use_info.type_check(), TypeCheckKind::kNone); CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
CHECK_NE(use_info.type_check(), TypeCheckKind::kNumberOrOddball); CHECK_NE(use_info.type_check(), TypeCheckKind::kNumberOrOddball);
CHECK_NE(use_info.type_check(), TypeCheckKind::kBigInt);
Node* unreachable = Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi); InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
return jsgraph()->graph()->NewNode( return jsgraph()->graph()->NewNode(
......
...@@ -2839,6 +2839,11 @@ class RepresentationSelector { ...@@ -2839,6 +2839,11 @@ class RepresentationSelector {
return; return;
} }
case IrOpcode::kSpeculativeBigIntAdd: { case IrOpcode::kSpeculativeBigIntAdd: {
// TODO(nicohartmann@, chromium:1073440): There should be special
// handling for trunction.IsUnused() that correctly propagates deadness,
// but preserves type checking which may throw exceptions. Until this
// is fully supported, we lower to int64 operations but keep pushing
// type constraints.
if (truncation.IsUsedAsWord64()) { if (truncation.IsUsedAsWord64()) {
VisitBinop<T>( VisitBinop<T>(
node, UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), node, UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
......
...@@ -1507,16 +1507,15 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole( ...@@ -1507,16 +1507,15 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAdd( const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAdd(
BigIntOperationHint hint) { BigIntOperationHint hint) {
return new (zone()) Operator1<BigIntOperationHint>( return new (zone()) Operator1<BigIntOperationHint>(
IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable | Operator::kNoThrow, IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable,
"SpeculativeBigIntAdd", 2, 1, 1, 1, 1, 0, hint); "SpeculativeBigIntAdd", 2, 1, 1, 1, 1, 0, hint);
} }
const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntSubtract( const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntSubtract(
BigIntOperationHint hint) { BigIntOperationHint hint) {
return new (zone()) Operator1<BigIntOperationHint>( return new (zone()) Operator1<BigIntOperationHint>(
IrOpcode::kSpeculativeBigIntSubtract, IrOpcode::kSpeculativeBigIntSubtract, Operator::kFoldable,
Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntSubtract", 2, "SpeculativeBigIntSubtract", 2, 1, 1, 1, 1, 0, hint);
1, 1, 1, 1, 0, hint);
} }
const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate( const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate(
......
...@@ -38,6 +38,7 @@ namespace internal { ...@@ -38,6 +38,7 @@ namespace internal {
V(MinusZero, "minus zero") \ V(MinusZero, "minus zero") \
V(NaN, "NaN") \ V(NaN, "NaN") \
V(NoCache, "no cache") \ V(NoCache, "no cache") \
V(NotABigInt, "not a BigInt") \
V(NotAHeapNumber, "not a heap number") \ V(NotAHeapNumber, "not a heap number") \
V(NotAJavaScriptObject, "not a JavaScript object") \ V(NotAJavaScriptObject, "not a JavaScript object") \
V(NotAJavaScriptObjectOrNullOrUndefined, \ V(NotAJavaScriptObjectOrNullOrUndefined, \
......
...@@ -69,6 +69,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback( ...@@ -69,6 +69,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
// Not overflowed. // Not overflowed.
{ {
var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall); var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
slot_id);
var_result = smi_result; var_result = smi_result;
Goto(&end); Goto(&end);
} }
...@@ -116,6 +118,7 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback( ...@@ -116,6 +118,7 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
BIND(&do_fadd); BIND(&do_fadd);
{ {
var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber); var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
TNode<Float64T> value = TNode<Float64T> value =
Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value()); Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value); TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
...@@ -166,6 +169,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback( ...@@ -166,6 +169,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
&call_with_any_feedback); &call_with_any_feedback);
var_type_feedback = SmiConstant(BinaryOperationFeedback::kString); var_type_feedback = SmiConstant(BinaryOperationFeedback::kString);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
slot_id);
var_result = var_result =
CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs); CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs);
...@@ -194,6 +199,7 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback( ...@@ -194,6 +199,7 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big); GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt); var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
Goto(&end); Goto(&end);
BIND(&bigint_too_big); BIND(&bigint_too_big);
...@@ -219,12 +225,12 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback( ...@@ -219,12 +225,12 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
BIND(&call_add_stub); BIND(&call_add_stub);
{ {
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs); var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs);
Goto(&end); Goto(&end);
} }
BIND(&end); BIND(&end);
UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
return var_result.value(); return var_result.value();
} }
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --opt --allow-natives-syntax --no-always-opt
function foo(n) {
try {
let v = 0n;
for (let i = 0; i < n; ++i) {
v = 3n + v;
v = i;
}
} catch (e) {
return 1;
}
return 0;
}
%PrepareFunctionForOptimization(foo);
assertEquals(foo(1), 0);
assertEquals(foo(1), 0);
%OptimizeFunctionOnNextCall(foo);
assertEquals(foo(1), 0);
assertOptimized(foo);
%PrepareFunctionForOptimization(foo);
assertEquals(foo(2), 1);
assertUnoptimized(foo);
// Check that we learned something and do not loop deoptimizations.
%OptimizeFunctionOnNextCall(foo);
assertEquals(foo(1), 0);
assertOptimized(foo);
assertEquals(foo(2), 1);
assertOptimized(foo);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment