Commit 47090774 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Add more Int32/Float64 arithmetic nodes

Add Int32/Float64 nodes for:

  * Subtract
  * Multiply
  * Divide

and additionally Int32 nodes for

  * BitwiseOr/And/Xor
  * ShiftLeft/Right/RightLogical

The latter ones don't have Float64 equivalents since they're implicitly
Int32 operations. In the future we'll add support for Number feedback by
adding Float64-to-Int32 conversions and using the Int32 nodes.

The divide node does an Int32 division and deopts if there's a remainder
to the division -- we may want to make it output a Float64 instead if we
think that's more likely in real-world code. There's also no peephole
optimisations for constant operations, which would generate much better
code, especially for shifts.

Bug: v8:7700
Change-Id: Ief1d24b46557cf4d2b7929ed50956df7b0d25992
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3652301
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80670}
parent c2430b4f
......@@ -156,9 +156,30 @@ using GenericNodeForOperation =
// TODO(victorgomes): Remove this once all operations have fast paths.
template <Operation kOperation>
bool BinaryOperationHasFastPath() {
bool BinaryOperationHasInt32FastPath() {
switch (kOperation) {
case Operation::kAdd:
case Operation::kSubtract:
case Operation::kMultiply:
case Operation::kDivide:
case Operation::kBitwiseAnd:
case Operation::kBitwiseOr:
case Operation::kBitwiseXor:
case Operation::kShiftLeft:
case Operation::kShiftRight:
case Operation::kShiftRightLogical:
return true;
default:
return false;
}
}
template <Operation kOperation>
bool BinaryOperationHasFloat64FastPath() {
switch (kOperation) {
case Operation::kAdd:
case Operation::kSubtract:
case Operation::kMultiply:
case Operation::kDivide:
return true;
default:
return false;
......@@ -170,17 +191,34 @@ bool BinaryOperationHasFastPath() {
// MAP_OPERATION_TO_NODES are tuples with the following format:
// (Operation name,
// Int32 operation node,
// Unit of int32 operation (e.g, 0 for add/sub and 1 for mul/div),
// Float64 operation node).
#define MAP_OPERATION_TO_NODES(V) V(Add, Int32AddWithOverflow, 0, Float64Add)
// Unit of int32 operation (e.g, 0 for add/sub and 1 for mul/div))
#define MAP_OPERATION_TO_INT32_NODE(V) \
V(Add, Int32AddWithOverflow, 0) \
V(Subtract, Int32SubtractWithOverflow, 0) \
V(Multiply, Int32MultiplyWithOverflow, 1) \
V(Divide, Int32DivideWithOverflow, 1) \
V(BitwiseAnd, Int32BitwiseAnd, ~0) \
V(BitwiseOr, Int32BitwiseOr, 0) \
V(BitwiseXor, Int32BitwiseXor, 0) \
V(ShiftLeft, Int32ShiftLeft, 0) \
V(ShiftRight, Int32ShiftRight, 0) \
V(ShiftRightLogical, Int32ShiftRightLogical, 0)
// MAP_OPERATION_TO_FLOAT64_NODE are tuples with the following format:
// (Operation name, Float64 operation node).
#define MAP_OPERATION_TO_FLOAT64_NODE(V) \
V(Add, Float64Add) \
V(Subtract, Float64Subtract) \
V(Multiply, Float64Multiply) \
V(Divide, Float64Divide)
template <Operation kOperation>
static int Int32Unit() {
switch (kOperation) {
#define CASE(op, _, unit, ...) \
#define CASE(op, OpNode, unit) \
case Operation::k##op: \
return unit;
MAP_OPERATION_TO_NODES(CASE)
MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
......@@ -191,10 +229,10 @@ template <Operation kOperation>
ValueNode* MaglevGraphBuilder::AddNewInt32BinaryOperationNode(
std::initializer_list<ValueNode*> inputs) {
switch (kOperation) {
#define CASE(op, OpNode, ...) \
case Operation::k##op: \
#define CASE(op, OpNode, unit) \
case Operation::k##op: \
return AddNewNode<OpNode>(inputs);
MAP_OPERATION_TO_NODES(CASE)
MAP_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
......@@ -205,10 +243,10 @@ template <Operation kOperation>
ValueNode* MaglevGraphBuilder::AddNewFloat64BinaryOperationNode(
std::initializer_list<ValueNode*> inputs) {
switch (kOperation) {
#define CASE(op, _, u, OpNode) \
case Operation::k##op: \
#define CASE(op, OpNode) \
case Operation::k##op: \
return AddNewNode<OpNode>(inputs);
MAP_OPERATION_TO_NODES(CASE)
MAP_OPERATION_TO_FLOAT64_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
......@@ -300,18 +338,29 @@ void MaglevGraphBuilder::VisitUnaryOperation() {
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinaryOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
if (BinaryOperationHasFastPath<kOperation>()) {
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kSignedSmall:
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
BuildInt32BinaryOperationNode<kOperation>();
return;
case BinaryOperationHint::kNumber:
}
break;
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
BuildFloat64BinaryOperationNode<kOperation>();
return;
default:
// Fallback to generic node.
break;
}
// } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
// // Fall back to int32 fast path if there is one (this will be the
// case
// // for operations that deal with bits rather than numbers).
// BuildInt32BinaryOperationNode<kOperation>();
// return;
}
break;
default:
// Fallback to generic node.
break;
}
BuildGenericBinaryOperationNode<kOperation>();
}
......@@ -319,22 +368,61 @@ void MaglevGraphBuilder::VisitBinaryOperation() {
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinarySmiOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
if (BinaryOperationHasFastPath<kOperation>()) {
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kSignedSmall:
switch (nexus.GetBinaryOperationFeedback()) {
case BinaryOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
BuildInt32BinarySmiOperationNode<kOperation>();
return;
case BinaryOperationHint::kNumber:
}
break;
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
BuildFloat64BinarySmiOperationNode<kOperation>();
return;
default:
// Fallback to generic node.
break;
}
// } else if (BinaryOperationHasInt32FastPath<kOperation>()) {
// // Fall back to int32 fast path if there is one (this will be the
// case
// // for operations that deal with bits rather than numbers).
// BuildInt32BinarySmiOperationNode<kOperation>();
// return;
}
break;
default:
// Fallback to generic node.
break;
}
BuildGenericBinarySmiOperationNode<kOperation>();
}
template <Operation kOperation>
void MaglevGraphBuilder::VisitCompareOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
switch (nexus.GetCompareOperationFeedback()) {
case CompareOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
BuildInt32BinaryOperationNode<kOperation>();
return;
}
break;
case CompareOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
BuildFloat64BinaryOperationNode<kOperation>();
return;
} else if (BinaryOperationHasInt32FastPath<kOperation>()) {
// Fall back to int32 fast path if there is one (this will be the case
// for operations that deal with bits rather than numbers).
BuildInt32BinaryOperationNode<kOperation>();
return;
}
break;
default:
// Fallback to generic node.
break;
}
BuildGenericBinaryOperationNode<kOperation>();
}
void MaglevGraphBuilder::VisitLdar() {
MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0),
interpreter::Register::virtual_accumulator());
......@@ -984,22 +1072,22 @@ void MaglevGraphBuilder::VisitConstruct() {
MAGLEV_UNIMPLEMENTED_BYTECODE(ConstructWithSpread)
void MaglevGraphBuilder::VisitTestEqual() {
VisitBinaryOperation<Operation::kEqual>();
VisitCompareOperation<Operation::kEqual>();
}
void MaglevGraphBuilder::VisitTestEqualStrict() {
VisitBinaryOperation<Operation::kStrictEqual>();
VisitCompareOperation<Operation::kStrictEqual>();
}
void MaglevGraphBuilder::VisitTestLessThan() {
VisitBinaryOperation<Operation::kLessThan>();
VisitCompareOperation<Operation::kLessThan>();
}
void MaglevGraphBuilder::VisitTestLessThanOrEqual() {
VisitBinaryOperation<Operation::kLessThanOrEqual>();
VisitCompareOperation<Operation::kLessThanOrEqual>();
}
void MaglevGraphBuilder::VisitTestGreaterThan() {
VisitBinaryOperation<Operation::kGreaterThan>();
VisitCompareOperation<Operation::kGreaterThan>();
}
void MaglevGraphBuilder::VisitTestGreaterThanOrEqual() {
VisitBinaryOperation<Operation::kGreaterThanOrEqual>();
VisitCompareOperation<Operation::kGreaterThanOrEqual>();
}
MAGLEV_UNIMPLEMENTED_BYTECODE(TestInstanceOf)
......
......@@ -585,6 +585,8 @@ class MaglevGraphBuilder {
void VisitBinaryOperation();
template <Operation kOperation>
void VisitBinarySmiOperation();
template <Operation kOperation>
void VisitCompareOperation();
void MergeIntoFrameState(BasicBlock* block, int target);
void MergeDeadIntoFrameState(int target);
......
......@@ -141,11 +141,25 @@ class MaglevGraphVerifier {
CheckValueInputIs(node, 2, ValueRepresentation::kTagged);
break;
case Opcode::kInt32AddWithOverflow:
case Opcode::kInt32SubtractWithOverflow:
case Opcode::kInt32MultiplyWithOverflow:
case Opcode::kInt32DivideWithOverflow:
// case Opcode::kInt32ExponentiateWithOverflow:
// case Opcode::kInt32ModulusWithOverflow:
case Opcode::kInt32BitwiseAnd:
case Opcode::kInt32BitwiseOr:
case Opcode::kInt32BitwiseXor:
case Opcode::kInt32ShiftLeft:
case Opcode::kInt32ShiftRight:
case Opcode::kInt32ShiftRightLogical:
DCHECK_EQ(node->input_count(), 2);
CheckValueInputIs(node, 0, ValueRepresentation::kInt32);
CheckValueInputIs(node, 1, ValueRepresentation::kInt32);
break;
case Opcode::kFloat64Add:
case Opcode::kFloat64Subtract:
case Opcode::kFloat64Multiply:
case Opcode::kFloat64Divide:
DCHECK_EQ(node->input_count(), 2);
CheckValueInputIs(node, 0, ValueRepresentation::kFloat64);
CheckValueInputIs(node, 1, ValueRepresentation::kFloat64);
......
......@@ -382,7 +382,10 @@ class MergePointInterpreterFrameState {
if (value->Is<CheckedSmiUntag>()) {
return value->input(0).node();
}
DCHECK(value->Is<Int32AddWithOverflow>() || value->Is<Int32Constant>());
#define IS_INT32_OP_NODE(Name) || value->Is<Name>()
DCHECK(value->Is<Int32Constant>()
INT32_OPERATIONS_NODE_LIST(IS_INT32_OP_NODE));
#undef IS_INT32_OP_NODE
// Check if the next Node in the block after value is its CheckedSmiTag
// version and reuse it.
if (value->NextNode()) {
......@@ -514,7 +517,10 @@ class MergePointInterpreterFrameState {
ValueNode* unmerged, int merge_offset) {
Phi* result = merged->TryCast<Phi>();
if (result == nullptr || result->merge_offset() != merge_offset) {
DCHECK_EQ(merged, unmerged);
DCHECK_EQ(merged, (unmerged->Is<CheckedSmiUntag>() ||
unmerged->Is<CheckedFloat64Unbox>())
? unmerged->input(0).node()
: unmerged);
return;
}
DCHECK_EQ(result->owner(), owner);
......
......@@ -956,6 +956,239 @@ void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
#undef DEF_OPERATION
void Int32AddWithOverflow::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Int32AddWithOverflow::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ addl(left, right);
EmitEagerDeoptIf(overflow, code_gen_state, this);
}
void Int32SubtractWithOverflow::AllocateVreg(
MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Int32SubtractWithOverflow::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ subl(left, right);
EmitEagerDeoptIf(overflow, code_gen_state, this);
}
void Int32MultiplyWithOverflow::AllocateVreg(
MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
set_temporaries_needed(1);
}
void Int32MultiplyWithOverflow::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register result = ToRegister(this->result());
Register right = ToRegister(right_input());
DCHECK_EQ(result, ToRegister(left_input()));
Register saved_left = temporaries().first();
__ movl(saved_left, result);
// TODO(leszeks): peephole optimise multiplication by a constant.
__ imull(result, right);
EmitEagerDeoptIf(overflow, code_gen_state, this);
// If the result is zero, check if either lhs or rhs is negative.
Label end;
__ cmpl(result, Immediate(0));
__ j(not_zero, &end);
{
__ orl(saved_left, right);
__ cmpl(saved_left, Immediate(0));
// If one of them is negative, we must have a -0 result, which is non-int32,
// so deopt.
// TODO(leszeks): Consider merging these deopts.
EmitEagerDeoptIf(less, code_gen_state, this);
}
__ bind(&end);
}
void Int32DivideWithOverflow::AllocateVreg(
MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
UseFixed(left_input(), rax);
UseRegister(right_input());
DefineAsFixed(vreg_state, this, rax);
// rdx is clobbered by idiv.
RequireSpecificTemporary(rdx);
}
void Int32DivideWithOverflow::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DCHECK_EQ(rax, ToRegister(left_input()));
DCHECK(temporaries().has(rdx));
Register right = ToRegister(right_input());
// Clear rdx so that it doesn't participate in the division.
__ xorl(rdx, rdx);
// TODO(leszeks): peephole optimise division by a constant.
__ idivl(right);
__ cmpl(rdx, Immediate(0));
EmitEagerDeoptIf(equal, code_gen_state, this);
}
void Int32BitwiseAnd::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Int32BitwiseAnd::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ andl(left, right);
}
void Int32BitwiseOr::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Int32BitwiseOr::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ orl(left, right);
}
void Int32BitwiseXor::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Int32BitwiseXor::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ xorl(left, right);
}
void Int32ShiftLeft::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
// Use the "shift by cl" variant of shl.
// TODO(leszeks): peephole optimise shifts by a constant.
UseFixed(right_input(), rcx);
DefineSameAsFirst(vreg_state, this);
}
void Int32ShiftLeft::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
DCHECK_EQ(rcx, ToRegister(right_input()));
__ shll_cl(left);
}
void Int32ShiftRight::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
// Use the "shift by cl" variant of sar.
// TODO(leszeks): peephole optimise shifts by a constant.
UseFixed(right_input(), rcx);
DefineSameAsFirst(vreg_state, this);
}
void Int32ShiftRight::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
DCHECK_EQ(rcx, ToRegister(right_input()));
__ sarl_cl(left);
}
void Int32ShiftRightLogical::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
// Use the "shift by cl" variant of shr.
// TODO(leszeks): peephole optimise shifts by a constant.
UseFixed(right_input(), rcx);
DefineSameAsFirst(vreg_state, this);
}
void Int32ShiftRightLogical::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
DCHECK_EQ(rcx, ToRegister(right_input()));
__ shrl_cl(left);
}
void Float64Add::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Float64Add::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DoubleRegister left = ToDoubleRegister(left_input());
DoubleRegister right = ToDoubleRegister(right_input());
__ Addsd(left, right);
}
void Float64Subtract::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Float64Subtract::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DoubleRegister left = ToDoubleRegister(left_input());
DoubleRegister right = ToDoubleRegister(right_input());
__ Subsd(left, right);
}
void Float64Multiply::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Float64Multiply::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DoubleRegister left = ToDoubleRegister(left_input());
DoubleRegister right = ToDoubleRegister(right_input());
__ Mulsd(left, right);
}
void Float64Divide::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Float64Divide::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DoubleRegister left = ToDoubleRegister(left_input());
DoubleRegister right = ToDoubleRegister(right_input());
__ Divsd(left, right);
}
void CheckedSmiUntag::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(input());
......@@ -1004,21 +1237,6 @@ void Int32Constant::PrintParams(std::ostream& os,
os << "(" << value() << ")";
}
void Int32AddWithOverflow::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Int32AddWithOverflow::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ addl(left, right);
EmitEagerDeoptIf(overflow, code_gen_state, this);
}
void Float64Box::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
using D = NewHeapNumberDescriptor;
......@@ -1072,20 +1290,6 @@ void ChangeInt32ToFloat64::GenerateCode(MaglevCodeGenState* code_gen_state,
__ Cvtlsi2sd(ToDoubleRegister(result()), ToRegister(input()));
}
void Float64Add::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Float64Add::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DoubleRegister left = ToDoubleRegister(left_input());
DoubleRegister right = ToDoubleRegister(right_input());
__ Addsd(left, right);
}
void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
// Phi inputs are processed in the post-process, once loop phis' inputs'
......
......@@ -65,6 +65,47 @@ class CompactInterpreterFrameState;
V(GenericGreaterThan) \
V(GenericGreaterThanOrEqual)
#define INT32_OPERATIONS_NODE_LIST(V) \
V(Int32AddWithOverflow) \
V(Int32SubtractWithOverflow) \
V(Int32MultiplyWithOverflow) \
V(Int32DivideWithOverflow) \
/*V(Int32ModulusWithOverflow)*/ \
/*V(Int32ExponentiateWithOverflow)*/ \
V(Int32BitwiseAnd) \
V(Int32BitwiseOr) \
V(Int32BitwiseXor) \
V(Int32ShiftLeft) \
V(Int32ShiftRight) \
V(Int32ShiftRightLogical) \
/*V(Int32BitwiseNot) */ \
/*V(Int32NegateWithOverflow) */ \
/*V(Int32IncrementWithOverflow)*/ \
/*V(Int32DecrementWithOverflow)*/ \
/*V(Int32Equal)*/ \
/*V(Int32StrictEqual) */ \
/*V(Int32LessThan)*/ \
/*V(Int32LessThanOrEqual) */ \
/*V(Int32GreaterThan)*/ \
/*V(Int32GreaterThanOrEqual)*/
#define FLOAT64_OPERATIONS_NODE_LIST(V) \
V(Float64Add) \
V(Float64Subtract) \
V(Float64Multiply) \
V(Float64Divide) \
/*V(Float64Modulus)*/ \
/*V(Float64Exponentiate)*/ \
/*V(Float64Negate) */ \
/*V(Float64Increment)*/ \
/*V(Float64Decrement)*/ \
/*V(Float64Equal)*/ \
/*V(Float64StrictEqual) */ \
/*V(Float64LessThan)*/ \
/*V(Float64LessThanOrEqual) */ \
/*V(Float64GreaterThan)*/ \
/*V(Float64GreaterThanOrEqual)*/
#define CONSTANT_VALUE_NODE_LIST(V) \
V(Constant) \
V(Float64Constant) \
......@@ -72,28 +113,28 @@ class CompactInterpreterFrameState;
V(RootConstant) \
V(SmiConstant)
#define VALUE_NODE_LIST(V) \
V(Call) \
V(Construct) \
V(CreateEmptyArrayLiteral) \
V(CreateObjectLiteral) \
V(CreateShallowObjectLiteral) \
V(InitialValue) \
V(LoadTaggedField) \
V(LoadDoubleField) \
V(LoadGlobal) \
V(LoadNamedGeneric) \
V(SetNamedGeneric) \
V(Phi) \
V(RegisterInput) \
V(CheckedSmiTag) \
V(CheckedSmiUntag) \
V(Int32AddWithOverflow) \
V(ChangeInt32ToFloat64) \
V(Float64Box) \
V(CheckedFloat64Unbox) \
V(Float64Add) \
CONSTANT_VALUE_NODE_LIST(V) \
#define VALUE_NODE_LIST(V) \
V(Call) \
V(Construct) \
V(CreateEmptyArrayLiteral) \
V(CreateObjectLiteral) \
V(CreateShallowObjectLiteral) \
V(InitialValue) \
V(LoadTaggedField) \
V(LoadDoubleField) \
V(LoadGlobal) \
V(LoadNamedGeneric) \
V(SetNamedGeneric) \
V(Phi) \
V(RegisterInput) \
V(CheckedSmiTag) \
V(CheckedSmiUntag) \
V(ChangeInt32ToFloat64) \
V(Float64Box) \
V(CheckedFloat64Unbox) \
CONSTANT_VALUE_NODE_LIST(V) \
INT32_OPERATIONS_NODE_LIST(V) \
FLOAT64_OPERATIONS_NODE_LIST(V) \
GENERIC_OPERATIONS_NODE_LIST(V)
#define GAP_MOVE_NODE_LIST(V) \
......@@ -434,7 +475,7 @@ NODE_BASE_LIST(DEF_OPCODE_OF)
class NodeBase : public ZoneObject {
private:
// Bitfield specification.
using OpcodeField = base::BitField<Opcode, 0, 6>;
using OpcodeField = base::BitField<Opcode, 0, 7>;
static_assert(OpcodeField::is_valid(kLastOpcode));
using OpPropertiesField =
OpcodeField::Next<OpProperties, OpProperties::kSize>;
......@@ -1045,6 +1086,134 @@ COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
#undef DEF_UNARY_WITH_FEEDBACK_NODE
#undef DEF_BINARY_WITH_FEEDBACK_NODE
#undef DEF_OPERATION_NODE
template <class Derived, Operation kOperation>
class Int32BinaryWithOverflowNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>;
public:
static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::Int32();
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
Input& left_input() { return Node::input(kLeftIndex); }
Input& right_input() { return Node::input(kRightIndex); }
protected:
explicit Int32BinaryWithOverflowNode(uint32_t bitfield) : Base(bitfield) {}
// void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
// void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
#define DEF_OPERATION_NODE(Name, Super, OpName) \
class Name : public Super<Name, Operation::k##OpName> { \
using Base = Super<Name, Operation::k##OpName>; \
\
public: \
explicit Name(uint32_t bitfield) : Base(bitfield) {} \
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
};
#define DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Name) \
DEF_OPERATION_NODE(Int32##Name##WithOverflow, Int32BinaryWithOverflowNode, \
Name)
DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Add)
DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Subtract)
DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Multiply)
DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Divide)
// DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Modulus)
// DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Exponentiate)
#undef DEF_INT32_BINARY_WITH_OVERFLOW_NODE
template <class Derived, Operation kOperation>
class Int32BinaryNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>;
public:
static constexpr OpProperties kProperties = OpProperties::Int32();
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
Input& left_input() { return Node::input(kLeftIndex); }
Input& right_input() { return Node::input(kRightIndex); }
protected:
explicit Int32BinaryNode(uint32_t bitfield) : Base(bitfield) {}
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
#define DEF_OPERATION_NODE(Name, Super, OpName) \
class Name : public Super<Name, Operation::k##OpName> { \
using Base = Super<Name, Operation::k##OpName>; \
\
public: \
explicit Name(uint32_t bitfield) : Base(bitfield) {} \
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
};
#define DEF_INT32_BINARY_NODE(Name) \
DEF_OPERATION_NODE(Int32##Name, Int32BinaryNode, Name)
DEF_INT32_BINARY_NODE(BitwiseAnd)
DEF_INT32_BINARY_NODE(BitwiseOr)
DEF_INT32_BINARY_NODE(BitwiseXor)
DEF_INT32_BINARY_NODE(ShiftLeft)
DEF_INT32_BINARY_NODE(ShiftRight)
DEF_INT32_BINARY_NODE(ShiftRightLogical)
#undef DEF_INT32_BINARY_NODE
// DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Negate)
// DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Increment)
// DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Decrement)
#undef DEF_OPERATION_NODE
template <class Derived, Operation kOperation>
class Float64BinaryNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>;
public:
static constexpr OpProperties kProperties = OpProperties::Float64();
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
Input& left_input() { return Node::input(kLeftIndex); }
Input& right_input() { return Node::input(kRightIndex); }
protected:
explicit Float64BinaryNode(uint32_t bitfield) : Base(bitfield) {}
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
#define DEF_OPERATION_NODE(Name, Super, OpName) \
class Name : public Super<Name, Operation::k##OpName> { \
using Base = Super<Name, Operation::k##OpName>; \
\
public: \
explicit Name(uint32_t bitfield) : Base(bitfield) {} \
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
};
#define DEF_FLOAT64_BINARY_NODE(Name) \
DEF_OPERATION_NODE(Float64##Name, Float64BinaryNode, Name)
DEF_FLOAT64_BINARY_NODE(Add)
DEF_FLOAT64_BINARY_NODE(Subtract)
DEF_FLOAT64_BINARY_NODE(Multiply)
DEF_FLOAT64_BINARY_NODE(Divide)
// DEF_FLOAT64_BINARY_NODE(Modulus)
// DEF_FLOAT64_BINARY_NODE(Exponentiate)
#undef DEF_FLOAT64_BINARY_NODE
class CheckedSmiTag : public FixedInputValueNodeT<1, CheckedSmiTag> {
using Base = FixedInputValueNodeT<1, CheckedSmiTag>;
......@@ -1125,26 +1294,6 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> {
const double value_;
};
class Int32AddWithOverflow
: public FixedInputValueNodeT<2, Int32AddWithOverflow> {
using Base = FixedInputValueNodeT<2, Int32AddWithOverflow>;
public:
explicit Int32AddWithOverflow(uint32_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::Int32();
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
Input& left_input() { return Node::input(kLeftIndex); }
Input& right_input() { return Node::input(kRightIndex); }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class Float64Box : public FixedInputValueNodeT<1, Float64Box> {
using Base = FixedInputValueNodeT<1, Float64Box>;
......@@ -1193,24 +1342,6 @@ class CheckedFloat64Unbox
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class Float64Add : public FixedInputValueNodeT<2, Float64Add> {
using Base = FixedInputValueNodeT<2, Float64Add>;
public:
explicit Float64Add(uint32_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::Float64();
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
Input& left_input() { return Node::input(kLeftIndex); }
Input& right_input() { return Node::input(kRightIndex); }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
using Base = FixedInputValueNodeT<0, InitialValue>;
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev --no-stress-opt
// Checks Smi shift_right operation and deopt while untagging.
(function() {
function shift_right(x, y) {
return x >> y;
}
%PrepareFunctionForOptimization(shift_right);
assertEquals(2, shift_right(8, 2));
assertEquals(-2, shift_right(-8, 2));
assertEquals(-8, shift_right(-8, 0));
assertEquals(0, shift_right(8, 10));
assertEquals(4, shift_right(8, 33));
%OptimizeMaglevOnNextCall(shift_right);
assertEquals(2, shift_right(8, 2));
assertTrue(isMaglevved(shift_right));
assertEquals(-2, shift_right(-8, 2));
assertTrue(isMaglevved(shift_right));
assertEquals(-8, shift_right(-8, 0));
assertTrue(isMaglevved(shift_right));
assertEquals(0, shift_right(8, 10));
assertTrue(isMaglevved(shift_right));
// Shifts are mod 32
assertEquals(4, shift_right(8, 33));
assertTrue(isMaglevved(shift_right));
// // We should deopt here in SmiUntag.
// assertEquals(0x40000000, shift_right(1, 0x3FFFFFFF));
// assertFalse(isMaglevved(shift_right));
})();
// // Checks when we deopt due to tagging.
// (function() {
// function shift_right(x, y) {
// return x + y;
// }
// %PrepareFunctionForOptimization(shift_right);
// assertEquals(3, shift_right(1, 2));
// %OptimizeMaglevOnNextCall(shift_right);
// assertEquals(3, shift_right(1, 2));
// assertTrue(isMaglevved(shift_right));
// // We should deopt here in SmiTag.
// assertEquals(3.2, shift_right(1.2, 2));
// assertFalse(isMaglevved(shift_right));
// })();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment