Commit 47090774 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Add more Int32/Float64 arithmetic nodes

Add Int32/Float64 nodes for:

  * Subtract
  * Multiply
  * Divide

and additionally Int32 nodes for

  * BitwiseOr/And/Xor
  * ShiftLeft/Right/RightLogical

The latter ones don't have Float64 equivalents since they're implicitly
Int32 operations. In the future we'll add support for Number feedback by
adding Float64-to-Int32 conversions and using the Int32 nodes.

The divide node does an Int32 division and deopts if there's a remainder
to the division -- we may want to make it output a Float64 instead if we
think that's more likely in real-world code. There's also no peephole
optimisations for constant operations, which would generate much better
code, especially for shifts.

Bug: v8:7700
Change-Id: Ief1d24b46557cf4d2b7929ed50956df7b0d25992
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3652301
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80670}
parent c2430b4f
......@@ -156,9 +156,30 @@ using GenericNodeForOperation =
// TODO(victorgomes): Remove this once all operations have fast paths.
template <Operation kOperation>
bool BinaryOperationHasFastPath() {
bool BinaryOperationHasInt32FastPath() {
switch (kOperation) {
case Operation::kAdd:
case Operation::kSubtract:
case Operation::kMultiply:
case Operation::kDivide:
case Operation::kBitwiseAnd:
case Operation::kBitwiseOr:
case Operation::kBitwiseXor:
case Operation::kShiftLeft:
case Operation::kShiftRight:
case Operation::kShiftRightLogical:
return true;
default:
return false;
}
}
template <Operation kOperation>
bool BinaryOperationHasFloat64FastPath() {
switch (kOperation) {
case Operation::kAdd:
case Operation::kSubtract:
case Operation::kMultiply:
case Operation::kDivide:
return true;
default:
return false;
......@@ -170,17 +191,34 @@ bool BinaryOperationHasFastPath() {
// MAP_OPERATION_TO_NODES are tuples with the following format:
// (Operation name,
// Int32 operation node,
// Unit of int32 operation (e.g, 0 for add/sub and 1 for mul/div),
// Float64 operation node).
#define MAP_OPERATION_TO_NODES(V) V(Add, Int32AddWithOverflow, 0, Float64Add)
// Unit of int32 operation (e.g, 0 for add/sub and 1 for mul/div))
#define MAP_OPERATION_TO_INT32_NODE(V) \
V(Add, Int32AddWithOverflow, 0) \
V(Subtract, Int32SubtractWithOverflow, 0) \
V(Multiply, Int32MultiplyWithOverflow, 1) \
V(Divide, Int32DivideWithOverflow, 1) \
V(BitwiseAnd, Int32BitwiseAnd, ~0) \
V(BitwiseOr, Int32BitwiseOr, 0) \
V(BitwiseXor, Int32BitwiseXor, 0) \
V(ShiftLeft, Int32ShiftLeft, 0) \
V(ShiftRight, Int32ShiftRight, 0) \
V(ShiftRightLogical, Int32ShiftRightLogical, 0)
// MAP_OPERATION_TO_FLOAT64_NODE are tuples with the following format:
// (Operation name, Float64 operation node).
#define MAP_OPERATION_TO_FLOAT64_NODE(V) \
V(Add, Float64Add) \
V(Subtract, Float64Subtract) \
V(Multiply, Float64Multiply) \
V(Divide, Float64Divide)
template <Operation kOperation>
static int Int32Unit() {
switch (kOperation) {
#define CASE(op, _, unit, ...) \
#define CASE(op, OpNode, unit) \
case Operation::k##op: \
return unit;
MAP_OPERATION_TO_NODES(CASE)
MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
......@@ -191,10 +229,10 @@ template <Operation kOperation>
ValueNode* MaglevGraphBuilder::AddNewInt32BinaryOperationNode(
std::initializer_list<ValueNode*> inputs) {
switch (kOperation) {
#define CASE(op, OpNode, ...) \
case Operation::k##op: \
#define CASE(op, OpNode, unit) \
case Operation::k##op: \
return AddNewNode<OpNode>(inputs);
MAP_OPERATION_TO_NODES(CASE)
MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
......@@ -205,10 +243,10 @@ template <Operation kOperation>
ValueNode* MaglevGraphBuilder::AddNewFloat64BinaryOperationNode(
std::initializer_list<ValueNode*> inputs) {
switch (kOperation) {
#define CASE(op, _, u, OpNode) \
case Operation::k##op: \
#define CASE(op, OpNode) \
case Operation::k##op: \
return AddNewNode<OpNode>(inputs);
MAP_OPERATION_TO_NODES(CASE)
MAP_OPERATION_TO_FLOAT64_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
......@@ -300,18 +338,29 @@ void MaglevGraphBuilder::VisitUnaryOperation() {
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinaryOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
if (BinaryOperationHasFastPath<kOperation>()) {
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kSignedSmall:
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
BuildInt32BinaryOperationNode<kOperation>();
return;
case BinaryOperationHint::kNumber:
}
break;
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
BuildFloat64BinaryOperationNode<kOperation>();
return;
default:
// Fallback to generic node.
break;
}
// } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
// // Fall back to int32 fast path if there is one (this will be the
// case
// // for operations that deal with bits rather than numbers).
// BuildInt32BinaryOperationNode<kOperation>();
// return;
}
break;
default:
// Fallback to generic node.
break;
}
BuildGenericBinaryOperationNode<kOperation>();
}
......@@ -319,22 +368,61 @@ void MaglevGraphBuilder::VisitBinaryOperation() {
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinarySmiOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
if (BinaryOperationHasFastPath<kOperation>()) {
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kSignedSmall:
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
BuildInt32BinarySmiOperationNode<kOperation>();
return;
case BinaryOperationHint::kNumber:
}
break;
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
BuildFloat64BinarySmiOperationNode<kOperation>();
return;
default:
// Fallback to generic node.
break;
}
// } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
// // Fall back to int32 fast path if there is one (this will be the
// case
// // for operations that deal with bits rather than numbers).
// BuildInt32BinarySmiOperationNode<kOperation>();
// return;
}
break;
default:
// Fallback to generic node.
break;
}
BuildGenericBinarySmiOperationNode<kOperation>();
}
template <Operation kOperation>
void MaglevGraphBuilder::VisitCompareOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
switch (nexus.GetCompareOperationFeedback()) {
case CompareOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
BuildInt32BinaryOperationNode<kOperation>();
return;
}
break;
case CompareOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
BuildFloat64BinaryOperationNode<kOperation>();
return;
} else if (BinaryOperationHasInt32FastPath<kOperation>()) {
// Fall back to int32 fast path if there is one (this will be the case
// for operations that deal with bits rather than numbers).
BuildInt32BinaryOperationNode<kOperation>();
return;
}
break;
default:
// Fallback to generic node.
break;
}
BuildGenericBinaryOperationNode<kOperation>();
}
void MaglevGraphBuilder::VisitLdar() {
MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0),
interpreter::Register::virtual_accumulator());
......@@ -984,22 +1072,22 @@ void MaglevGraphBuilder::VisitConstruct() {
MAGLEV_UNIMPLEMENTED_BYTECODE(ConstructWithSpread)
void MaglevGraphBuilder::VisitTestEqual() {
VisitBinaryOperation<Operation::kEqual>();
VisitCompareOperation<Operation::kEqual>();
}
void MaglevGraphBuilder::VisitTestEqualStrict() {
VisitBinaryOperation<Operation::kStrictEqual>();
VisitCompareOperation<Operation::kStrictEqual>();
}
void MaglevGraphBuilder::VisitTestLessThan() {
VisitBinaryOperation<Operation::kLessThan>();
VisitCompareOperation<Operation::kLessThan>();
}
void MaglevGraphBuilder::VisitTestLessThanOrEqual() {
VisitBinaryOperation<Operation::kLessThanOrEqual>();
VisitCompareOperation<Operation::kLessThanOrEqual>();
}
void MaglevGraphBuilder::VisitTestGreaterThan() {
VisitBinaryOperation<Operation::kGreaterThan>();
VisitCompareOperation<Operation::kGreaterThan>();
}
void MaglevGraphBuilder::VisitTestGreaterThanOrEqual() {
VisitBinaryOperation<Operation::kGreaterThanOrEqual>();
VisitCompareOperation<Operation::kGreaterThanOrEqual>();
}
MAGLEV_UNIMPLEMENTED_BYTECODE(TestInstanceOf)
......
......@@ -585,6 +585,8 @@ class MaglevGraphBuilder {
void VisitBinaryOperation();
template <Operation kOperation>
void VisitBinarySmiOperation();
template <Operation kOperation>
void VisitCompareOperation();
void MergeIntoFrameState(BasicBlock* block, int target);
void MergeDeadIntoFrameState(int target);
......
......@@ -141,11 +141,25 @@ class MaglevGraphVerifier {
CheckValueInputIs(node, 2, ValueRepresentation::kTagged);
break;
case Opcode::kInt32AddWithOverflow:
case Opcode::kInt32SubtractWithOverflow:
case Opcode::kInt32MultiplyWithOverflow:
case Opcode::kInt32DivideWithOverflow:
// case Opcode::kInt32ExponentiateWithOverflow:
// case Opcode::kInt32ModulusWithOverflow:
case Opcode::kInt32BitwiseAnd:
case Opcode::kInt32BitwiseOr:
case Opcode::kInt32BitwiseXor:
case Opcode::kInt32ShiftLeft:
case Opcode::kInt32ShiftRight:
case Opcode::kInt32ShiftRightLogical:
DCHECK_EQ(node->input_count(), 2);
CheckValueInputIs(node, 0, ValueRepresentation::kInt32);
CheckValueInputIs(node, 1, ValueRepresentation::kInt32);
break;
case Opcode::kFloat64Add:
case Opcode::kFloat64Subtract:
case Opcode::kFloat64Multiply:
case Opcode::kFloat64Divide:
DCHECK_EQ(node->input_count(), 2);
CheckValueInputIs(node, 0, ValueRepresentation::kFloat64);
CheckValueInputIs(node, 1, ValueRepresentation::kFloat64);
......
......@@ -382,7 +382,10 @@ class MergePointInterpreterFrameState {
if (value->Is<CheckedSmiUntag>()) {
return value->input(0).node();
}
DCHECK(value->Is<Int32AddWithOverflow>() || value->Is<Int32Constant>());
#define IS_INT32_OP_NODE(Name) || value->Is<Name>()
DCHECK(value->Is<Int32Constant>()
INT32_OPERATIONS_NODE_LIST(IS_INT32_OP_NODE));
#undef IS_INT32_OP_NODE
// Check if the next Node in the block after value is its CheckedSmiTag
// version and reuse it.
if (value->NextNode()) {
......@@ -514,7 +517,10 @@ class MergePointInterpreterFrameState {
ValueNode* unmerged, int merge_offset) {
Phi* result = merged->TryCast<Phi>();
if (result == nullptr || result->merge_offset() != merge_offset) {
DCHECK_EQ(merged, unmerged);
DCHECK_EQ(merged, (unmerged->Is<CheckedSmiUntag>() ||
unmerged->Is<CheckedFloat64Unbox>())
? unmerged->input(0).node()
: unmerged);
return;
}
DCHECK_EQ(result->owner(), owner);
......
This diff is collapsed.
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev --no-stress-opt
// Checks Smi shift_right operation and deopt while untagging.
(function() {
function shift_right(x, y) {
return x >> y;
}
%PrepareFunctionForOptimization(shift_right);
assertEquals(2, shift_right(8, 2));
assertEquals(-2, shift_right(-8, 2));
assertEquals(-8, shift_right(-8, 0));
assertEquals(0, shift_right(8, 10));
assertEquals(4, shift_right(8, 33));
%OptimizeMaglevOnNextCall(shift_right);
assertEquals(2, shift_right(8, 2));
assertTrue(isMaglevved(shift_right));
assertEquals(-2, shift_right(-8, 2));
assertTrue(isMaglevved(shift_right));
assertEquals(-8, shift_right(-8, 0));
assertTrue(isMaglevved(shift_right));
assertEquals(0, shift_right(8, 10));
assertTrue(isMaglevved(shift_right));
// Shifts are mod 32
assertEquals(4, shift_right(8, 33));
assertTrue(isMaglevved(shift_right));
// // We should deopt here in SmiUntag.
// assertEquals(0x40000000, shift_right(1, 0x3FFFFFFF));
// assertFalse(isMaglevved(shift_right));
})();
// // Checks when we deopt due to tagging.
// (function() {
// function shift_right(x, y) {
// return x + y;
// }
// %PrepareFunctionForOptimization(shift_right);
// assertEquals(3, shift_right(1, 2));
// %OptimizeMaglevOnNextCall(shift_right);
// assertEquals(3, shift_right(1, 2));
// assertTrue(isMaglevved(shift_right));
// // We should deopt here in SmiTag.
// assertEquals(3.2, shift_right(1.2, 2));
// assertFalse(isMaglevved(shift_right));
// })();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment