Commit 24e60017 authored by Jakob Linke's avatar Jakob Linke Committed by V8 LUCI CQ

[maglev] Deopt on overflow in >>>

Re-enable the int32 fast path for ShiftRightLogical, but account for
Maglev's missing signed/unsigned representation tracking by a)
removing rhs==0 as the identity value (a shift by 0 is still a
signed-unsigned conversion) and b) deoptimizing if the result cannot
be converted to a non-negative smi.

Note this is not a deopt loop, since a non-smi result will change the
feedback to kSignedSmallInputs (from kSignedSmall).

To fix this properly, we should track signed/unsigned representations
and convert the result to a heap number if it doesn't fit within smi
range.

Bug: v8:7700
Change-Id: Ifd538d227a6f1290eb7f008d9bfad586ff91ea0f
Fixed: v8:13251
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3876366Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83025}
parent 7b49edbc
......@@ -176,8 +176,7 @@ bool BinaryOperationHasInt32FastPath() {
case Operation::kBitwiseXor:
case Operation::kShiftLeft:
case Operation::kShiftRight:
// TODO(v8:13251): Implement with overflow protection.
// case Operation::kShiftRightLogical:
case Operation::kShiftRightLogical:
case Operation::kEqual:
case Operation::kStrictEqual:
case Operation::kLessThan:
......@@ -205,21 +204,21 @@ bool BinaryOperationHasFloat64FastPath() {
} // namespace
// MAP_OPERATION_TO_NODES are tuples with the following format:
// (Operation name,
// Int32 operation node,
// Unit of int32 operation (e.g, 0 for add/sub and 1 for mul/div))
#define MAP_OPERATION_TO_INT32_NODE(V) \
V(Add, Int32AddWithOverflow, 0) \
V(Subtract, Int32SubtractWithOverflow, 0) \
V(Multiply, Int32MultiplyWithOverflow, 1) \
V(Divide, Int32DivideWithOverflow, 1) \
V(BitwiseAnd, Int32BitwiseAnd, ~0) \
V(BitwiseOr, Int32BitwiseOr, 0) \
V(BitwiseXor, Int32BitwiseXor, 0) \
V(ShiftLeft, Int32ShiftLeft, 0) \
V(ShiftRight, Int32ShiftRight, 0) \
/* TODO(v8:13251): Implement with overflow protection. */ \
V(ShiftRightLogical, Int32ShiftRightLogical, 0)
// - Operation name,
// - Int32 operation node,
// - Identity of int32 operation (e.g, 0 for add/sub and 1 for mul/div), if it
// exists, or otherwise {}.
#define MAP_OPERATION_TO_INT32_NODE(V) \
V(Add, Int32AddWithOverflow, 0) \
V(Subtract, Int32SubtractWithOverflow, 0) \
V(Multiply, Int32MultiplyWithOverflow, 1) \
V(Divide, Int32DivideWithOverflow, 1) \
V(BitwiseAnd, Int32BitwiseAnd, ~0) \
V(BitwiseOr, Int32BitwiseOr, 0) \
V(BitwiseXor, Int32BitwiseXor, 0) \
V(ShiftLeft, Int32ShiftLeft, 0) \
V(ShiftRight, Int32ShiftRight, 0) \
V(ShiftRightLogical, Int32ShiftRightLogical, {})
#define MAP_COMPARE_OPERATION_TO_INT32_NODE(V) \
V(Equal, Int32Equal) \
......@@ -238,11 +237,11 @@ bool BinaryOperationHasFloat64FastPath() {
V(Divide, Float64Divide)
template <Operation kOperation>
static int Int32Unit() {
static constexpr base::Optional<int> Int32Identity() {
switch (kOperation) {
#define CASE(op, OpNode, unit) \
case Operation::k##op: \
return unit;
#define CASE(op, _, identity) \
case Operation::k##op: \
return identity;
MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
default:
......@@ -254,8 +253,8 @@ template <Operation kOperation>
ValueNode* MaglevGraphBuilder::AddNewInt32BinaryOperationNode(
std::initializer_list<ValueNode*> inputs) {
switch (kOperation) {
#define CASE(op, OpNode, unit) \
case Operation::k##op: \
#define CASE(op, OpNode, _) \
case Operation::k##op: \
return AddNewNode<OpNode>(inputs);
MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
......@@ -328,7 +327,7 @@ void MaglevGraphBuilder::BuildInt32BinarySmiOperationNode() {
// TODO(v8:7700): Do constant folding.
ValueNode* left = GetAccumulatorInt32();
int32_t constant = iterator_.GetImmediateOperand(0);
if (constant == Int32Unit<kOperation>()) {
if (base::Optional<int>(constant) == Int32Identity<kOperation>()) {
// If the constant is the unit of the operation, it already has the right
// value, so we can just return.
return;
......
......@@ -2297,6 +2297,13 @@ void Int32ShiftRightLogical::GenerateCode(MaglevAssembler* masm,
Register left = ToRegister(left_input());
DCHECK_EQ(rcx, ToRegister(right_input()));
__ shrl_cl(left);
// The result of >>> is unsigned, but Maglev doesn't yet track
// signed/unsigned representations. Instead, deopt if the resulting smi would
// be negative.
// TODO(jgruber): Properly track signed/unsigned representations and
// allocated a heap number if the result is outside smi range.
__ testl(left, Immediate((1 << 31) | (1 << 30)));
EmitEagerDeoptIf(not_equal, masm, DeoptimizeReason::kOverflow, this);
}
namespace {
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev
function gen_shrl_smi(y) {
return new Function('x', `return x >>> ${y};`);
}
function shrl_test(lhs, rhs, expected_result) {
const shrl = gen_shrl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(shrl);
shrl(1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs));
assertTrue(isMaglevved(shrl));
%DeoptimizeFunction(shrl);
assertEquals(expected_result, shrl(lhs));
}
function shrl_test_expect_deopt(lhs, rhs, expected_result) {
const shrl = gen_shrl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(shrl);
shrl(1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs));
assertFalse(isMaglevved(shrl));
}
shrl_test(8, 2, 2);
shrl_test_expect_deopt(-1, 1, 2147483647);
shrl_test(-8, 2, 1073741822);
shrl_test_expect_deopt(-8, 0, 4294967288);
shrl_test_expect_deopt(-892396978, 0, 3402570318);
shrl_test(8, 10, 0);
shrl_test(8, 33, 4);
shrl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, 1);
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev
function gen_shrl() {
return new Function('x', 'y', `return x >>> y;`);
}
function shrl_test(lhs, rhs, expected_result) {
const shrl = gen_shrl();
// Warmup.
%PrepareFunctionForOptimization(shrl);
shrl(1, 1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs, rhs));
assertTrue(isMaglevved(shrl));
%DeoptimizeFunction(shrl);
assertEquals(expected_result, shrl(lhs, rhs));
}
function shrl_test_expect_deopt(lhs, rhs, expected_result) {
const shrl = gen_shrl();
// Warmup.
%PrepareFunctionForOptimization(shrl);
shrl(1, 1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs, rhs));
assertFalse(isMaglevved(shrl));
}
shrl_test(8, 2, 2);
shrl_test_expect_deopt(-1, 1, 2147483647);
shrl_test(-8, 2, 1073741822);
shrl_test(-8, 0, 4294967288);
shrl_test(-892396978, 0, 3402570318);
shrl_test(8, 10, 0);
shrl_test(8, 33, 4);
shrl_test(0xFFFFFFFF, 0x3FFFFFFF, 1);
......@@ -4,55 +4,40 @@
// Flags: --allow-natives-syntax --maglev
// Checks Smi shift_right operation and deopt while untagging.
(function() {
function shift_right(x, y) {
return x >> y;
}
%PrepareFunctionForOptimization(shift_right);
assertEquals(2, shift_right(8, 2));
assertEquals(-2, shift_right(-8, 2));
assertEquals(-8, shift_right(-8, 0));
assertEquals(0, shift_right(8, 10));
assertEquals(4, shift_right(8, 33));
%OptimizeMaglevOnNextCall(shift_right);
assertEquals(2, shift_right(8, 2));
assertTrue(isMaglevved(shift_right));
assertEquals(-2, shift_right(-8, 2));
assertTrue(isMaglevved(shift_right));
assertEquals(-8, shift_right(-8, 0));
assertTrue(isMaglevved(shift_right));
assertEquals(0, shift_right(8, 10));
assertTrue(isMaglevved(shift_right));
// Shifts are mod 32
assertEquals(4, shift_right(8, 33));
assertTrue(isMaglevved(shift_right));
// // We should deopt here in SmiUntag.
// assertEquals(0x40000000, shift_right(1, 0x3FFFFFFF));
// assertFalse(isMaglevved(shift_right));
})();
// // Checks when we deopt due to tagging.
// (function() {
// function shift_right(x, y) {
// return x + y;
// }
// %PrepareFunctionForOptimization(shift_right);
// assertEquals(3, shift_right(1, 2));
// %OptimizeMaglevOnNextCall(shift_right);
// assertEquals(3, shift_right(1, 2));
// assertTrue(isMaglevved(shift_right));
// // We should deopt here in SmiTag.
// assertEquals(3.2, shift_right(1.2, 2));
// assertFalse(isMaglevved(shift_right));
// })();
function gen_sarl_smi(y) {
return new Function('x', `return x >> ${y};`);
}
function sarl_test(lhs, rhs, expected_result) {
const sarl = gen_sarl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(sarl);
sarl(1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs));
assertTrue(isMaglevved(sarl));
%DeoptimizeFunction(sarl);
assertEquals(expected_result, sarl(lhs));
}
function sarl_test_expect_deopt(lhs, rhs, expected_result) {
const sarl = gen_sarl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(sarl);
sarl(1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs));
assertFalse(isMaglevved(sarl));
}
sarl_test(8, 2, 2);
sarl_test(-8, 2, -2);
sarl_test(-8, 0, -8);
sarl_test(8, 10, 0);
sarl_test(8, 33, 4);
sarl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, -1);
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev
function gen_sarl() {
return new Function('x', 'y', 'return x >> y;');
}
function sarl_test(lhs, rhs, expected_result) {
const sarl = gen_sarl();
// Warmup.
%PrepareFunctionForOptimization(sarl);
sarl(1, 1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs, rhs));
assertTrue(isMaglevved(sarl));
%DeoptimizeFunction(sarl);
assertEquals(expected_result, sarl(lhs, rhs));
}
function sarl_test_expect_deopt(lhs, rhs, expected_result) {
const sarl = gen_sarl();
// Warmup.
%PrepareFunctionForOptimization(sarl);
sarl(1, 1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs, rhs));
assertFalse(isMaglevved(sarl));
}
sarl_test(8, 2, 2);
sarl_test(-8, 2, -2);
sarl_test(-8, 0, -8);
sarl_test(8, 10, 0);
sarl_test(8, 33, 4);
sarl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, -1);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment