Commit 178f2eeb authored by Jakob Linke's avatar Jakob Linke Committed by V8 LUCI CQ

Reland "[maglev] Deopt on overflow in >>>"

This is a reland of commit 24e60017

The reland changes %ClearFunctionFeedback to clear *all* feedback
slot kinds including binary/compare/for-in slots. In the tests we
thus no longer have to resort to tricks to restore the function to
it's initial state, instead simply call %ClearFunctionFeedback.

Original change's description:
> [maglev] Deopt on overflow in >>>
>
> Re-enable the int32 fast path for ShiftRightLogical, but account for
> Maglev's missing signed/unsigned representation tracking by a)
> removing rhs==0 as the identity value (a shift by 0 is still a
> signed-unsigned conversion) and b) deoptimizing if the result cannot
> be converted to a non-negative smi.
>
> Note this is not a deopt loop, since a non-smi result will change the
> feedback to kSignedSmallInputs (from kSignedSmall).
>
> To fix this properly, we should track signed/unsigned representations
> and convert the result to a heap number if it doesn't fit within smi
> range.
>
> Bug: v8:7700
> Change-Id: Ifd538d227a6f1290eb7f008d9bfad586ff91ea0f
> Fixed: v8:13251
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3876366
> Reviewed-by: Leszek Swirski <leszeks@chromium.org>
> Commit-Queue: Jakob Linke <jgruber@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#83025}

Bug: v8:7700
Change-Id: I2f607a0fb863b80e8589c9c1e86ee31fbac48c25
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879491
Auto-Submit: Jakob Linke <jgruber@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83057}
parent 3c4654da
......@@ -176,8 +176,7 @@ bool BinaryOperationHasInt32FastPath() {
case Operation::kBitwiseXor:
case Operation::kShiftLeft:
case Operation::kShiftRight:
// TODO(v8:13251): Implement with overflow protection.
// case Operation::kShiftRightLogical:
case Operation::kShiftRightLogical:
case Operation::kEqual:
case Operation::kStrictEqual:
case Operation::kLessThan:
......@@ -205,21 +204,21 @@ bool BinaryOperationHasFloat64FastPath() {
} // namespace
// MAP_OPERATION_TO_NODES are tuples with the following format:
// (Operation name,
// Int32 operation node,
// Unit of int32 operation (e.g, 0 for add/sub and 1 for mul/div))
#define MAP_OPERATION_TO_INT32_NODE(V) \
V(Add, Int32AddWithOverflow, 0) \
V(Subtract, Int32SubtractWithOverflow, 0) \
V(Multiply, Int32MultiplyWithOverflow, 1) \
V(Divide, Int32DivideWithOverflow, 1) \
V(BitwiseAnd, Int32BitwiseAnd, ~0) \
V(BitwiseOr, Int32BitwiseOr, 0) \
V(BitwiseXor, Int32BitwiseXor, 0) \
V(ShiftLeft, Int32ShiftLeft, 0) \
V(ShiftRight, Int32ShiftRight, 0) \
/* TODO(v8:13251): Implement with overflow protection. */ \
V(ShiftRightLogical, Int32ShiftRightLogical, 0)
// - Operation name,
// - Int32 operation node,
// - Identity of int32 operation (e.g, 0 for add/sub and 1 for mul/div), if it
// exists, or otherwise {}.
#define MAP_OPERATION_TO_INT32_NODE(V) \
V(Add, Int32AddWithOverflow, 0) \
V(Subtract, Int32SubtractWithOverflow, 0) \
V(Multiply, Int32MultiplyWithOverflow, 1) \
V(Divide, Int32DivideWithOverflow, 1) \
V(BitwiseAnd, Int32BitwiseAnd, ~0) \
V(BitwiseOr, Int32BitwiseOr, 0) \
V(BitwiseXor, Int32BitwiseXor, 0) \
V(ShiftLeft, Int32ShiftLeft, 0) \
V(ShiftRight, Int32ShiftRight, 0) \
V(ShiftRightLogical, Int32ShiftRightLogical, {})
#define MAP_COMPARE_OPERATION_TO_INT32_NODE(V) \
V(Equal, Int32Equal) \
......@@ -238,11 +237,11 @@ bool BinaryOperationHasFloat64FastPath() {
V(Divide, Float64Divide)
template <Operation kOperation>
static int Int32Unit() {
static constexpr base::Optional<int> Int32Identity() {
switch (kOperation) {
#define CASE(op, OpNode, unit) \
case Operation::k##op: \
return unit;
#define CASE(op, _, identity) \
case Operation::k##op: \
return identity;
MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
default:
......@@ -254,8 +253,8 @@ template <Operation kOperation>
ValueNode* MaglevGraphBuilder::AddNewInt32BinaryOperationNode(
std::initializer_list<ValueNode*> inputs) {
switch (kOperation) {
#define CASE(op, OpNode, unit) \
case Operation::k##op: \
#define CASE(op, OpNode, _) \
case Operation::k##op: \
return AddNewNode<OpNode>(inputs);
MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
......@@ -328,7 +327,7 @@ void MaglevGraphBuilder::BuildInt32BinarySmiOperationNode() {
// TODO(v8:7700): Do constant folding.
ValueNode* left = GetAccumulatorInt32();
int32_t constant = iterator_.GetImmediateOperand(0);
if (constant == Int32Unit<kOperation>()) {
if (base::Optional<int>(constant) == Int32Identity<kOperation>()) {
// If the constant is the unit of the operation, it already has the right
// value, so we can just return.
return;
......
......@@ -2056,6 +2056,13 @@ void Int32ShiftRightLogical::GenerateCode(MaglevAssembler* masm,
Register left = ToRegister(left_input());
DCHECK_EQ(rcx, ToRegister(right_input()));
__ shrl_cl(left);
// The result of >>> is unsigned, but Maglev doesn't yet track
// signed/unsigned representations. Instead, deopt if the resulting smi would
// be negative.
// TODO(jgruber): Properly track signed/unsigned representations and
// allocated a heap number if the result is outside smi range.
__ testl(left, Immediate((1 << 31) | (1 << 30)));
EmitEagerDeoptIf(not_equal, masm, DeoptimizeReason::kOverflow, this);
}
namespace {
......
......@@ -479,7 +479,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
}
}
bool FeedbackVector::ClearSlots(Isolate* isolate) {
bool FeedbackVector::ClearSlots(Isolate* isolate, ClearBehavior behavior) {
if (!shared_function_info().HasFeedbackMetadata()) return false;
MaybeObject uninitialized_sentinel = MaybeObject::FromObject(
FeedbackVector::RawUninitializedSentinel(isolate));
......@@ -492,7 +492,7 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
MaybeObject obj = Get(slot);
if (obj != uninitialized_sentinel) {
FeedbackNexus nexus(*this, slot);
feedback_updated |= nexus.Clear();
feedback_updated |= nexus.Clear(behavior);
}
}
return feedback_updated;
......@@ -609,23 +609,37 @@ void FeedbackNexus::ConfigureUninitialized() {
}
}
bool FeedbackNexus::Clear() {
bool FeedbackNexus::Clear(ClearBehavior behavior) {
bool feedback_updated = false;
switch (kind()) {
case FeedbackSlotKind::kTypeProfile:
// We don't clear these kinds ever.
if (V8_LIKELY(behavior == ClearBehavior::kDefault)) {
// We don't clear these kinds ever.
} else if (!IsCleared()) {
DCHECK_EQ(behavior, ClearBehavior::kClearAll);
SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER);
feedback_updated = true;
}
break;
case FeedbackSlotKind::kCompareOp:
case FeedbackSlotKind::kForIn:
case FeedbackSlotKind::kBinaryOp:
// We don't clear these, either.
if (V8_LIKELY(behavior == ClearBehavior::kDefault)) {
// We don't clear these, either.
} else if (!IsCleared()) {
DCHECK_EQ(behavior, ClearBehavior::kClearAll);
SetFeedback(Smi::zero(), SKIP_WRITE_BARRIER);
feedback_updated = true;
}
break;
case FeedbackSlotKind::kLiteral:
SetFeedback(Smi::zero(), SKIP_WRITE_BARRIER);
feedback_updated = true;
if (!IsCleared()) {
SetFeedback(Smi::zero(), SKIP_WRITE_BARRIER);
feedback_updated = true;
}
break;
case FeedbackSlotKind::kSetNamedSloppy:
......
......@@ -28,6 +28,12 @@ class IsCompiledScope;
enum class UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback };
// Which feedback slots to clear in Clear().
enum class ClearBehavior {
kDefault,
kClearAll, // .. also ForIn, CompareOp, BinaryOp.
};
enum class FeedbackSlotKind : uint8_t {
// This kind means that the slot points to the middle of other slot
// which occupies more than one feedback vector element.
......@@ -357,7 +363,14 @@ class FeedbackVector
void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot);
// Clears the vector slots. Return true if feedback has changed.
bool ClearSlots(Isolate* isolate);
bool ClearSlots(Isolate* isolate) {
return ClearSlots(isolate, ClearBehavior::kDefault);
}
// As above, but clears *all* slots - even those that we usually keep (e.g.:
// BinaryOp feedback).
bool ClearAllSlotsForTesting(Isolate* isolate) {
return ClearSlots(isolate, ClearBehavior::kClearAll);
}
// The object that indicates an uninitialized cache.
static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
......@@ -384,6 +397,8 @@ class FeedbackVector
TQ_OBJECT_CONSTRUCTORS(FeedbackVector)
private:
bool ClearSlots(Isolate* isolate, ClearBehavior behavior);
static void AddToVectorsForProfilingTools(Isolate* isolate,
Handle<FeedbackVector> vector);
......@@ -811,7 +826,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
}
// Clear() returns true if the state of the underlying vector was changed.
bool Clear();
bool Clear(ClearBehavior behavior);
void ConfigureUninitialized();
// ConfigureMegamorphic() returns true if the state of the underlying vector
// was changed. Extra feedback is cleared if the 0 parameter version is used.
......
......@@ -1364,14 +1364,14 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
static_cast<unsigned>(JSObject::kMaxInstanceSize));
}
void JSFunction::ClearTypeFeedbackInfo() {
void JSFunction::ClearAllTypeFeedbackInfoForTesting() {
ResetIfCodeFlushed();
if (has_feedback_vector()) {
FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate();
if (vector.ClearSlots(isolate)) {
if (vector.ClearAllSlotsForTesting(isolate)) {
IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(),
"ClearTypeFeedbackInfo");
"ClearAllTypeFeedbackInfoForTesting");
}
}
}
......
......@@ -243,8 +243,9 @@ class JSFunction : public TorqueGeneratedJSFunction<
IsCompiledScope* compiled_scope,
bool reset_budget_for_feedback_allocation);
// Unconditionally clear the type feedback vector.
void ClearTypeFeedbackInfo();
// Unconditionally clear the type feedback vector, even those that we usually
// keep (e.g.: BinaryOp feedback).
void ClearAllTypeFeedbackInfoForTesting();
// Resets function to clear compiled data after bytecode has been flushed.
inline bool NeedsResetDueToFlushedBytecode();
......
......@@ -952,7 +952,7 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSFunction> function = args.at<JSFunction>(0);
function->ClearTypeFeedbackInfo();
function->ClearAllTypeFeedbackInfoForTesting();
return ReadOnlyRoots(isolate).undefined_value();
}
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev
function gen_shrl_smi(y) {
return function shrl_smi(x) { return x >>> y; };
}
function shrl_test(lhs, rhs, expected_result) {
const shrl = gen_shrl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(shrl);
%ClearFunctionFeedback(shrl);
shrl(1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs));
assertTrue(isMaglevved(shrl));
%DeoptimizeFunction(shrl);
assertEquals(expected_result, shrl(lhs));
}
function shrl_test_expect_deopt(lhs, rhs, expected_result) {
const shrl = gen_shrl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(shrl);
%ClearFunctionFeedback(shrl);
shrl(1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs));
assertFalse(isMaglevved(shrl));
}
shrl_test(8, 2, 2);
shrl_test_expect_deopt(-1, 1, 2147483647);
shrl_test(-8, 2, 1073741822);
shrl_test_expect_deopt(-8, 0, 4294967288);
shrl_test_expect_deopt(-892396978, 0, 3402570318);
shrl_test(8, 10, 0);
shrl_test(8, 33, 4);
shrl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, 1);
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev
function shrl(x, y) {
return x >>> y;
}
function shrl_test(lhs, rhs, expected_result) {
// Warmup.
%PrepareFunctionForOptimization(shrl);
%ClearFunctionFeedback(shrl);
shrl(1, 1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs, rhs));
assertTrue(isMaglevved(shrl));
%DeoptimizeFunction(shrl);
assertEquals(expected_result, shrl(lhs, rhs));
}
function shrl_test_expect_deopt(lhs, rhs, expected_result) {
// Warmup.
%PrepareFunctionForOptimization(shrl);
%ClearFunctionFeedback(shrl);
shrl(1, 1);
%OptimizeMaglevOnNextCall(shrl);
assertEquals(expected_result, shrl(lhs, rhs));
assertFalse(isMaglevved(shrl));
}
shrl_test(8, 2, 2);
shrl_test_expect_deopt(-1, 1, 2147483647);
shrl_test(-8, 2, 1073741822);
shrl_test_expect_deopt(-8, 0, 4294967288);
shrl_test_expect_deopt(-892396978, 0, 3402570318);
shrl_test(8, 10, 0);
shrl_test(8, 33, 4);
shrl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, 1);
......@@ -4,55 +4,42 @@
// Flags: --allow-natives-syntax --maglev
// Checks Smi shift_right operation and deopt while untagging.
(function() {
function shift_right(x, y) {
return x >> y;
}
%PrepareFunctionForOptimization(shift_right);
assertEquals(2, shift_right(8, 2));
assertEquals(-2, shift_right(-8, 2));
assertEquals(-8, shift_right(-8, 0));
assertEquals(0, shift_right(8, 10));
assertEquals(4, shift_right(8, 33));
%OptimizeMaglevOnNextCall(shift_right);
assertEquals(2, shift_right(8, 2));
assertTrue(isMaglevved(shift_right));
assertEquals(-2, shift_right(-8, 2));
assertTrue(isMaglevved(shift_right));
assertEquals(-8, shift_right(-8, 0));
assertTrue(isMaglevved(shift_right));
assertEquals(0, shift_right(8, 10));
assertTrue(isMaglevved(shift_right));
// Shifts are mod 32
assertEquals(4, shift_right(8, 33));
assertTrue(isMaglevved(shift_right));
// // We should deopt here in SmiUntag.
// assertEquals(0x40000000, shift_right(1, 0x3FFFFFFF));
// assertFalse(isMaglevved(shift_right));
})();
// // Checks when we deopt due to tagging.
// (function() {
// function shift_right(x, y) {
// return x + y;
// }
// %PrepareFunctionForOptimization(shift_right);
// assertEquals(3, shift_right(1, 2));
// %OptimizeMaglevOnNextCall(shift_right);
// assertEquals(3, shift_right(1, 2));
// assertTrue(isMaglevved(shift_right));
// // We should deopt here in SmiTag.
// assertEquals(3.2, shift_right(1.2, 2));
// assertFalse(isMaglevved(shift_right));
// })();
function gen_sarl_smi(y) {
return function sarl_smi(x) { return x >> y; };
}
function sarl_test(lhs, rhs, expected_result) {
const sarl = gen_sarl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(sarl);
%ClearFunctionFeedback(sarl);
sarl(1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs));
assertTrue(isMaglevved(sarl));
%DeoptimizeFunction(sarl);
assertEquals(expected_result, sarl(lhs));
}
function sarl_test_expect_deopt(lhs, rhs, expected_result) {
const sarl = gen_sarl_smi(rhs);
// Warmup.
%PrepareFunctionForOptimization(sarl);
%ClearFunctionFeedback(sarl);
sarl(1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs));
assertFalse(isMaglevved(sarl));
}
sarl_test(8, 2, 2);
sarl_test(-8, 2, -2);
sarl_test(-8, 0, -8);
sarl_test(8, 10, 0);
sarl_test(8, 33, 4);
sarl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, -1);
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev
function sarl(x, y) {
return x >> y;
}
function sarl_test(lhs, rhs, expected_result) {
// Warmup.
%PrepareFunctionForOptimization(sarl);
%ClearFunctionFeedback(sarl);
sarl(1, 1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs, rhs));
assertTrue(isMaglevved(sarl));
%DeoptimizeFunction(sarl);
assertEquals(expected_result, sarl(lhs, rhs));
}
function sarl_test_expect_deopt(lhs, rhs, expected_result) {
// Warmup.
%PrepareFunctionForOptimization(sarl);
%ClearFunctionFeedback(sarl);
sarl(1, 1);
%OptimizeMaglevOnNextCall(sarl);
assertEquals(expected_result, sarl(lhs, rhs));
assertFalse(isMaglevved(sarl));
}
sarl_test(8, 2, 2);
sarl_test(-8, 2, -2);
sarl_test(-8, 0, -8);
sarl_test(8, 10, 0);
sarl_test(8, 33, 4);
sarl_test_expect_deopt(0xFFFFFFFF, 0x3FFFFFFF, -1);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment