Commit 9694d9b6 authored by Victor Gomes's avatar Victor Gomes Committed by V8 LUCI CQ

[maglev] Generic binary operations

The CL implements binary operations bytecodes as generic nodes that
call the correspondent runtime builtin.

Bug: v8:7700
Change-Id: I82c5e20e4103d4ef367184af1242bae7f7f93fe0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3509392Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarVictor Gomes <victorgomes@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79462}
parent 9f9f36f8
...@@ -1515,34 +1515,6 @@ class CompareOperationFeedback { ...@@ -1515,34 +1515,6 @@ class CompareOperationFeedback {
}; };
}; };
enum class Operation {
// Binary operations.
kAdd,
kSubtract,
kMultiply,
kDivide,
kModulus,
kExponentiate,
kBitwiseAnd,
kBitwiseOr,
kBitwiseXor,
kShiftLeft,
kShiftRight,
kShiftRightLogical,
// Unary operations.
kBitwiseNot,
kNegate,
kIncrement,
kDecrement,
// Compare operations.
kEqual,
kStrictEqual,
kLessThan,
kLessThanOrEqual,
kGreaterThan,
kGreaterThanOrEqual,
};
// Type feedback is encoded in such a way that, we can combine the feedback // Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves // at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback. // to a more generic type when we combine feedback.
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMMON_OPERATION_H_
#define V8_COMMON_OPERATION_H_
#include <ostream>
#define ARITHMETIC_OPERATION_LIST(V) \
V(Add) \
V(Subtract) \
V(Multiply) \
V(Divide) \
V(Modulus) \
V(Exponentiate) \
V(BitwiseAnd) \
V(BitwiseOr) \
V(BitwiseXor) \
V(ShiftLeft) \
V(ShiftRight) \
V(ShiftRightLogical)
#define UNARY_OPERATION_LIST(V) \
V(BitwiseNot) \
V(Negate) \
V(Increment) \
V(Decrement)
#define COMPARISON_OPERATION_LIST(V) \
V(Equal) \
V(StrictEqual) \
V(LessThan) \
V(LessThanOrEqual) \
V(GreaterThan) \
V(GreaterThanOrEqual)
#define OPERATION_LIST(V) \
ARITHMETIC_OPERATION_LIST(V) \
UNARY_OPERATION_LIST(V) \
COMPARISON_OPERATION_LIST(V)
enum class Operation {
#define DEFINE_OP(name) k##name,
OPERATION_LIST(DEFINE_OP)
#undef DEFINE_OP
};
inline std::ostream& operator<<(std::ostream& os, const Operation& operation) {
switch (operation) {
#define CASE(name) \
case Operation::k##name: \
return os << #name;
OPERATION_LIST(CASE)
#undef CASE
}
}
#endif // V8_COMMON_OPERATION_H_
...@@ -29,6 +29,51 @@ namespace maglev { ...@@ -29,6 +29,51 @@ namespace maglev {
#define MAGLEV_UNIMPLEMENTED_BYTECODE(Name) \ #define MAGLEV_UNIMPLEMENTED_BYTECODE(Name) \
void MaglevGraphBuilder::Visit##Name() { MAGLEV_UNIMPLEMENTED(Name); } void MaglevGraphBuilder::Visit##Name() { MAGLEV_UNIMPLEMENTED(Name); }
template <Operation kOperation, typename... Args>
ValueNode* MaglevGraphBuilder::AddNewOperationNode(
std::initializer_list<ValueNode*> inputs, Args&&... args) {
switch (kOperation) {
#define CASE(Name) \
case Operation::k##Name: \
return AddNewNode<Generic##Name>(inputs, std::forward<Args>(args)...);
OPERATION_LIST(CASE)
#undef CASE
}
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericUnaryOperationNode() {
FeedbackSlot slot_index = GetSlotOperand(0);
ValueNode* value = GetAccumulator();
ValueNode* node = AddNewOperationNode<kOperation>(
{value}, compiler::FeedbackSource{feedback(), slot_index});
SetAccumulator(node);
MarkPossibleSideEffect();
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericBinaryOperationNode() {
ValueNode* left = LoadRegister(0);
FeedbackSlot slot_index = GetSlotOperand(1);
ValueNode* right = GetAccumulator();
ValueNode* node = AddNewOperationNode<kOperation>(
{left, right}, compiler::FeedbackSource{feedback(), slot_index});
SetAccumulator(node);
MarkPossibleSideEffect();
}
template <Operation kOperation>
void MaglevGraphBuilder::VisitUnaryOperation() {
// TODO(victorgomes): Use feedback info and create optimized versions.
BuildGenericUnaryOperationNode<kOperation>();
}
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinaryOperation() {
// TODO(victorgomes): Use feedback info and create optimized versions.
BuildGenericBinaryOperationNode<kOperation>();
}
void MaglevGraphBuilder::VisitLdar() { SetAccumulator(LoadRegister(0)); } void MaglevGraphBuilder::VisitLdar() { SetAccumulator(LoadRegister(0)); }
void MaglevGraphBuilder::VisitLdaZero() { void MaglevGraphBuilder::VisitLdaZero() {
...@@ -176,18 +221,42 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnProperty) ...@@ -176,18 +221,42 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnProperty)
MAGLEV_UNIMPLEMENTED_BYTECODE(StaInArrayLiteral) MAGLEV_UNIMPLEMENTED_BYTECODE(StaInArrayLiteral)
MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnPropertyInLiteral) MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnPropertyInLiteral)
MAGLEV_UNIMPLEMENTED_BYTECODE(CollectTypeProfile) MAGLEV_UNIMPLEMENTED_BYTECODE(CollectTypeProfile)
MAGLEV_UNIMPLEMENTED_BYTECODE(Add)
MAGLEV_UNIMPLEMENTED_BYTECODE(Sub) void MaglevGraphBuilder::VisitAdd() { VisitBinaryOperation<Operation::kAdd>(); }
MAGLEV_UNIMPLEMENTED_BYTECODE(Mul) void MaglevGraphBuilder::VisitSub() {
MAGLEV_UNIMPLEMENTED_BYTECODE(Div) VisitBinaryOperation<Operation::kSubtract>();
MAGLEV_UNIMPLEMENTED_BYTECODE(Mod) }
MAGLEV_UNIMPLEMENTED_BYTECODE(Exp) void MaglevGraphBuilder::VisitMul() {
MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseOr) VisitBinaryOperation<Operation::kMultiply>();
MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseXor) }
MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseAnd) void MaglevGraphBuilder::VisitDiv() {
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftLeft) VisitBinaryOperation<Operation::kDivide>();
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRight) }
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightLogical) void MaglevGraphBuilder::VisitMod() {
VisitBinaryOperation<Operation::kModulus>();
}
void MaglevGraphBuilder::VisitExp() {
VisitBinaryOperation<Operation::kExponentiate>();
}
void MaglevGraphBuilder::VisitBitwiseOr() {
VisitBinaryOperation<Operation::kBitwiseOr>();
}
void MaglevGraphBuilder::VisitBitwiseXor() {
VisitBinaryOperation<Operation::kBitwiseXor>();
}
void MaglevGraphBuilder::VisitBitwiseAnd() {
VisitBinaryOperation<Operation::kBitwiseAnd>();
}
void MaglevGraphBuilder::VisitShiftLeft() {
VisitBinaryOperation<Operation::kShiftLeft>();
}
void MaglevGraphBuilder::VisitShiftRight() {
VisitBinaryOperation<Operation::kShiftRight>();
}
void MaglevGraphBuilder::VisitShiftRightLogical() {
VisitBinaryOperation<Operation::kShiftRightLogical>();
}
MAGLEV_UNIMPLEMENTED_BYTECODE(AddSmi) MAGLEV_UNIMPLEMENTED_BYTECODE(AddSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(SubSmi) MAGLEV_UNIMPLEMENTED_BYTECODE(SubSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(MulSmi) MAGLEV_UNIMPLEMENTED_BYTECODE(MulSmi)
...@@ -200,20 +269,20 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseAndSmi) ...@@ -200,20 +269,20 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseAndSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftLeftSmi) MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftLeftSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightSmi) MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightLogicalSmi) MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightLogicalSmi)
void MaglevGraphBuilder::VisitInc() {
// Inc <slot>
FeedbackSlot slot_index = GetSlotOperand(0); void MaglevGraphBuilder::VisitInc() {
ValueNode* value = GetAccumulator(); VisitUnaryOperation<Operation::kIncrement>();
}
ValueNode* node = AddNewNode<Increment>( void MaglevGraphBuilder::VisitDec() {
{value}, compiler::FeedbackSource{feedback(), slot_index}); VisitUnaryOperation<Operation::kDecrement>();
SetAccumulator(node); }
MarkPossibleSideEffect(); void MaglevGraphBuilder::VisitNegate() {
VisitUnaryOperation<Operation::kNegate>();
} }
MAGLEV_UNIMPLEMENTED_BYTECODE(Dec) void MaglevGraphBuilder::VisitBitwiseNot() {
MAGLEV_UNIMPLEMENTED_BYTECODE(Negate) VisitUnaryOperation<Operation::kBitwiseNot>();
MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseNot) }
MAGLEV_UNIMPLEMENTED_BYTECODE(ToBooleanLogicalNot) MAGLEV_UNIMPLEMENTED_BYTECODE(ToBooleanLogicalNot)
MAGLEV_UNIMPLEMENTED_BYTECODE(LogicalNot) MAGLEV_UNIMPLEMENTED_BYTECODE(LogicalNot)
MAGLEV_UNIMPLEMENTED_BYTECODE(TypeOf) MAGLEV_UNIMPLEMENTED_BYTECODE(TypeOf)
...@@ -283,29 +352,17 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(ConstructWithSpread) ...@@ -283,29 +352,17 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(ConstructWithSpread)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqual) MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqual)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqualStrict) MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqualStrict)
template <typename RelNodeT> void MaglevGraphBuilder::VisitTestLessThan() {
void MaglevGraphBuilder::VisitRelNode() { VisitBinaryOperation<Operation::kLessThan>();
// Test[RelationComparison] <src> <slot>
ValueNode* left = LoadRegister(0);
FeedbackSlot slot_index = GetSlotOperand(1);
ValueNode* right = GetAccumulator();
USE(slot_index); // TODO(v8:7700): Use the feedback info.
ValueNode* node = AddNewNode<RelNodeT>(
{left, right}, compiler::FeedbackSource{feedback(), slot_index});
SetAccumulator(node);
MarkPossibleSideEffect();
} }
void MaglevGraphBuilder::VisitTestLessThan() { VisitRelNode<LessThan>(); }
void MaglevGraphBuilder::VisitTestLessThanOrEqual() { void MaglevGraphBuilder::VisitTestLessThanOrEqual() {
VisitRelNode<LessThanOrEqual>(); VisitBinaryOperation<Operation::kLessThanOrEqual>();
}
void MaglevGraphBuilder::VisitTestGreaterThan() {
VisitBinaryOperation<Operation::kGreaterThan>();
} }
void MaglevGraphBuilder::VisitTestGreaterThan() { VisitRelNode<GreaterThan>(); }
void MaglevGraphBuilder::VisitTestGreaterThanOrEqual() { void MaglevGraphBuilder::VisitTestGreaterThanOrEqual() {
VisitRelNode<GreaterThanOrEqual>(); VisitBinaryOperation<Operation::kGreaterThanOrEqual>();
} }
MAGLEV_UNIMPLEMENTED_BYTECODE(TestInstanceOf) MAGLEV_UNIMPLEMENTED_BYTECODE(TestInstanceOf)
......
...@@ -215,6 +215,10 @@ class MaglevGraphBuilder { ...@@ -215,6 +215,10 @@ class MaglevGraphBuilder {
return node; return node;
} }
template <Operation kOperation, typename... Args>
ValueNode* AddNewOperationNode(std::initializer_list<ValueNode*> inputs,
Args&&... args);
template <typename NodeT, typename... Args> template <typename NodeT, typename... Args>
NodeT* AddNewNode(size_t input_count, Args&&... args) { NodeT* AddNewNode(size_t input_count, Args&&... args) {
return AddNode(NewNode<NodeT>(input_count, std::forward<Args>(args)...)); return AddNode(NewNode<NodeT>(input_count, std::forward<Args>(args)...));
...@@ -356,8 +360,15 @@ class MaglevGraphBuilder { ...@@ -356,8 +360,15 @@ class MaglevGraphBuilder {
return block; return block;
} }
template <typename RelNodeT> template <Operation kOperation>
void VisitRelNode(); void BuildGenericUnaryOperationNode();
template <Operation kOperation>
void BuildGenericBinaryOperationNode();
template <Operation kOperation>
void VisitUnaryOperation();
template <Operation kOperation>
void VisitBinaryOperation();
void MergeIntoFrameState(BasicBlock* block, int target); void MergeIntoFrameState(BasicBlock* block, int target);
void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target); void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
......
...@@ -63,7 +63,10 @@ void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node, ...@@ -63,7 +63,10 @@ void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
vreg_state->AllocateVirtualRegister()); vreg_state->AllocateVirtualRegister());
} }
void DefineSameAsFirst(MaglevVregAllocationState* vreg_state, Node* node) { // TODO(victorgomes): Use this for smi binary operation and remove attribute
// [[maybe_unused]].
[[maybe_unused]] void DefineSameAsFirst(MaglevVregAllocationState* vreg_state,
Node* node) {
node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0); node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
} }
...@@ -646,24 +649,6 @@ void LoadNamedGeneric::PrintParams(std::ostream& os, ...@@ -646,24 +649,6 @@ void LoadNamedGeneric::PrintParams(std::ostream& os,
os << "(" << name_ << ")"; os << "(" << name_ << ")";
} }
void Increment::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
void Increment::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
// TODO(leszeks): Implement full handling.
__ CallBuiltin(Builtin::kIncrement_WithFeedback);
}
void StoreToFrame::AllocateVreg(MaglevVregAllocationState* vreg_state, void StoreToFrame::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {} const ProcessingState& state) {}
void StoreToFrame::GenerateCode(MaglevCodeGenState* code_gen_state, void StoreToFrame::GenerateCode(MaglevCodeGenState* code_gen_state,
...@@ -702,19 +687,41 @@ void GapMove::PrintParams(std::ostream& os, ...@@ -702,19 +687,41 @@ void GapMove::PrintParams(std::ostream& os,
os << "(" << source() << " → " << target() << ")"; os << "(" << source() << " → " << target() << ")";
} }
void Add::AllocateVreg(MaglevVregAllocationState* vreg_state, namespace {
const ProcessingState& state) {
UseRegister(left_input()); constexpr Builtin BuiltinFor(Operation operation) {
UseRegister(right_input()); switch (operation) {
DefineSameAsFirst(vreg_state, this); #define CASE(name) \
case Operation::k##name: \
return Builtin::k##name##_WithFeedback;
OPERATION_LIST(CASE)
#undef CASE
}
} }
void Add::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { } // namespace
UNREACHABLE();
template <class Derived, Operation kOperation>
void UnaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
DefineAsFixed(vreg_state, this, kReturnRegister0);
} }
template <class Derived> template <class Derived, Operation kOperation>
void BinaryWithFeedbackNode<Derived>::AllocateRelationalComparisonVreg( void UnaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
__ CallBuiltin(BuiltinFor(kOperation));
}
template <class Derived, Operation kOperation>
void BinaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
MaglevVregAllocationState* vreg_state, const ProcessingState& state) { MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
using D = BinaryOp_WithFeedbackDescriptor; using D = BinaryOp_WithFeedbackDescriptor;
UseFixed(left_input(), D::GetRegisterParameter(D::kLeft)); UseFixed(left_input(), D::GetRegisterParameter(D::kLeft));
...@@ -722,8 +729,8 @@ void BinaryWithFeedbackNode<Derived>::AllocateRelationalComparisonVreg( ...@@ -722,8 +729,8 @@ void BinaryWithFeedbackNode<Derived>::AllocateRelationalComparisonVreg(
DefineAsFixed(vreg_state, this, kReturnRegister0); DefineAsFixed(vreg_state, this, kReturnRegister0);
} }
template <class Derived> template <class Derived, Operation kOperation>
void BinaryWithFeedbackNode<Derived>::GenerateRelationalComparisonCode( void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
MaglevCodeGenState* code_gen_state, const ProcessingState& state) { MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
using D = BinaryOp_WithFeedbackDescriptor; using D = BinaryOp_WithFeedbackDescriptor;
DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft)); DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft));
...@@ -731,62 +738,20 @@ void BinaryWithFeedbackNode<Derived>::GenerateRelationalComparisonCode( ...@@ -731,62 +738,20 @@ void BinaryWithFeedbackNode<Derived>::GenerateRelationalComparisonCode(
__ Move(kContextRegister, code_gen_state->native_context().object()); __ Move(kContextRegister, code_gen_state->native_context().object());
__ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index())); __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector); __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
__ CallBuiltin(BuiltinFor(kOperation));
// TODO(jgruber): Implement full handling.
switch (this->opcode()) {
case Opcode::kLessThan:
__ CallBuiltin(Builtin::kLessThan_WithFeedback);
break;
case Opcode::kLessThanOrEqual:
__ CallBuiltin(Builtin::kLessThanOrEqual_WithFeedback);
break;
case Opcode::kGreaterThan:
__ CallBuiltin(Builtin::kGreaterThan_WithFeedback);
break;
case Opcode::kGreaterThanOrEqual:
__ CallBuiltin(Builtin::kGreaterThanOrEqual_WithFeedback);
break;
default:
UNREACHABLE();
}
}
void LessThan::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
Base::AllocateRelationalComparisonVreg(vreg_state, state);
}
void LessThan::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Base::GenerateRelationalComparisonCode(code_gen_state, state);
} }
void LessThanOrEqual::AllocateVreg(MaglevVregAllocationState* vreg_state, #define DEF_OPERATION(Name) \
const ProcessingState& state) { void Name::AllocateVreg(MaglevVregAllocationState* vreg_state, \
Base::AllocateRelationalComparisonVreg(vreg_state, state); const ProcessingState& state) { \
} Base::AllocateVreg(vreg_state, state); \
void LessThanOrEqual::GenerateCode(MaglevCodeGenState* code_gen_state, } \
const ProcessingState& state) { void Name::GenerateCode(MaglevCodeGenState* code_gen_state, \
Base::GenerateRelationalComparisonCode(code_gen_state, state); const ProcessingState& state) { \
} Base::GenerateCode(code_gen_state, state); \
}
void GreaterThan::AllocateVreg(MaglevVregAllocationState* vreg_state, GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
const ProcessingState& state) { #undef DEF_OPERATION
Base::AllocateRelationalComparisonVreg(vreg_state, state);
}
void GreaterThan::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Base::GenerateRelationalComparisonCode(code_gen_state, state);
}
void GreaterThanOrEqual::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
Base::AllocateRelationalComparisonVreg(vreg_state, state);
}
void GreaterThanOrEqual::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Base::GenerateRelationalComparisonCode(code_gen_state, state);
}
void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state, void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) { const ProcessingState& state) {
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "src/base/threaded-list.h" #include "src/base/threaded-list.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/common/operation.h"
#include "src/compiler/backend/instruction.h" #include "src/compiler/backend/instruction.h"
#include "src/compiler/heap-refs.h" #include "src/compiler/heap-refs.h"
#include "src/interpreter/bytecode-register.h" #include "src/interpreter/bytecode-register.h"
...@@ -35,24 +36,43 @@ class MaglevVregAllocationState; ...@@ -35,24 +36,43 @@ class MaglevVregAllocationState;
// //
// The macro lists below must match the node class hierarchy. // The macro lists below must match the node class hierarchy.
#define GENERIC_OPERATIONS_NODE_LIST(V) \
V(GenericAdd) \
V(GenericSubtract) \
V(GenericMultiply) \
V(GenericDivide) \
V(GenericModulus) \
V(GenericExponentiate) \
V(GenericBitwiseAnd) \
V(GenericBitwiseOr) \
V(GenericBitwiseXor) \
V(GenericShiftLeft) \
V(GenericShiftRight) \
V(GenericShiftRightLogical) \
V(GenericBitwiseNot) \
V(GenericNegate) \
V(GenericIncrement) \
V(GenericDecrement) \
V(GenericEqual) \
V(GenericStrictEqual) \
V(GenericLessThan) \
V(GenericLessThanOrEqual) \
V(GenericGreaterThan) \
V(GenericGreaterThanOrEqual)
#define VALUE_NODE_LIST(V) \ #define VALUE_NODE_LIST(V) \
V(Add) \
V(CallProperty) \ V(CallProperty) \
V(CallUndefinedReceiver) \ V(CallUndefinedReceiver) \
V(Constant) \ V(Constant) \
V(GreaterThan) \
V(GreaterThanOrEqual) \
V(Increment) \
V(InitialValue) \ V(InitialValue) \
V(LessThan) \
V(LessThanOrEqual) \
V(LoadField) \ V(LoadField) \
V(LoadGlobal) \ V(LoadGlobal) \
V(LoadNamedGeneric) \ V(LoadNamedGeneric) \
V(Phi) \ V(Phi) \
V(RegisterInput) \ V(RegisterInput) \
V(RootConstant) \ V(RootConstant) \
V(SmiConstant) V(SmiConstant) \
GENERIC_OPERATIONS_NODE_LIST(V)
#define NODE_LIST(V) \ #define NODE_LIST(V) \
V(Checkpoint) \ V(Checkpoint) \
...@@ -684,11 +704,16 @@ class FixedInputValueNodeT : public ValueNodeT<Derived> { ...@@ -684,11 +704,16 @@ class FixedInputValueNodeT : public ValueNodeT<Derived> {
} }
}; };
template <class Derived> template <class Derived, Operation kOperation>
class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> { class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
using Base = FixedInputValueNodeT<1, Derived>; using Base = FixedInputValueNodeT<1, Derived>;
public: public:
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::Call();
static constexpr int kOperandIndex = 0;
Input& operand_input() { return Node::input(kOperandIndex); }
compiler::FeedbackSource feedback() const { return feedback_; } compiler::FeedbackSource feedback() const { return feedback_; }
protected: protected:
...@@ -696,16 +721,18 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> { ...@@ -696,16 +721,18 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
const compiler::FeedbackSource& feedback) const compiler::FeedbackSource& feedback)
: Base(input_count), feedback_(feedback) {} : Base(input_count), feedback_(feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
const compiler::FeedbackSource feedback_; const compiler::FeedbackSource feedback_;
}; };
template <class Derived> template <class Derived, Operation kOperation>
class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> { class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>; using Base = FixedInputValueNodeT<2, Derived>;
public: public:
compiler::FeedbackSource feedback() const { return feedback_; }
// The implementation currently calls runtime. // The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::Call(); static constexpr OpProperties kProperties = OpProperties::Call();
...@@ -713,22 +740,42 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> { ...@@ -713,22 +740,42 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
static constexpr int kRightIndex = 1; static constexpr int kRightIndex = 1;
Input& left_input() { return Node::input(kLeftIndex); } Input& left_input() { return Node::input(kLeftIndex); }
Input& right_input() { return Node::input(kRightIndex); } Input& right_input() { return Node::input(kRightIndex); }
compiler::FeedbackSource feedback() const { return feedback_; }
protected: protected:
BinaryWithFeedbackNode(size_t input_count, BinaryWithFeedbackNode(size_t input_count,
const compiler::FeedbackSource& feedback) const compiler::FeedbackSource& feedback)
: Base(input_count), feedback_(feedback) {} : Base(input_count), feedback_(feedback) {}
// Only to be called when Derived is a RelationalComparisonNode. void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void AllocateRelationalComparisonVreg(MaglevVregAllocationState*, void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
const ProcessingState&); void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
void GenerateRelationalComparisonCode(MaglevCodeGenState*,
const ProcessingState&);
protected:
const compiler::FeedbackSource feedback_; const compiler::FeedbackSource feedback_;
}; };
#define DEF_OPERATION_NODE(Name, Super, OpName) \
class Name : public Super<Name, Operation::k##OpName> { \
using Base = Super<Name, Operation::k##OpName>; \
\
public: \
Name(size_t input_count, const compiler::FeedbackSource& feedback) \
: Base(input_count, feedback) {} \
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
};
#define DEF_UNARY_WITH_FEEDBACK_NODE(Name) \
DEF_OPERATION_NODE(Generic##Name, UnaryWithFeedbackNode, Name)
#define DEF_BINARY_WITH_FEEDBACK_NODE(Name) \
DEF_OPERATION_NODE(Generic##Name, BinaryWithFeedbackNode, Name)
UNARY_OPERATION_LIST(DEF_UNARY_WITH_FEEDBACK_NODE)
ARITHMETIC_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
#undef DEF_UNARY_WITH_FEEDBACK_NODE
#undef DEF_BINARY_WITH_FEEDBACK_NODE
class InitialValue : public FixedInputValueNodeT<0, InitialValue> { class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
using Base = FixedInputValueNodeT<0, InitialValue>; using Base = FixedInputValueNodeT<0, InitialValue>;
...@@ -968,24 +1015,6 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> { ...@@ -968,24 +1015,6 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
const compiler::NameRef name_; const compiler::NameRef name_;
}; };
class Increment : public UnaryWithFeedbackNode<Increment> {
using Base = UnaryWithFeedbackNode<Increment>;
public:
Increment(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::Call();
static constexpr int kOperandIndex = 0;
Input& operand_input() { return input(kOperandIndex); }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class StoreToFrame : public FixedInputNodeT<0, StoreToFrame> { class StoreToFrame : public FixedInputNodeT<0, StoreToFrame> {
using Base = FixedInputNodeT<0, StoreToFrame>; using Base = FixedInputNodeT<0, StoreToFrame>;
...@@ -1026,67 +1055,6 @@ class GapMove : public FixedInputNodeT<0, GapMove> { ...@@ -1026,67 +1055,6 @@ class GapMove : public FixedInputNodeT<0, GapMove> {
compiler::AllocatedOperand target_; compiler::AllocatedOperand target_;
}; };
class Add : public BinaryWithFeedbackNode<Add> {
using Base = BinaryWithFeedbackNode<Add>;
public:
explicit Add(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class LessThan : public BinaryWithFeedbackNode<LessThan> {
using Base = BinaryWithFeedbackNode<LessThan>;
public:
LessThan(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class LessThanOrEqual : public BinaryWithFeedbackNode<LessThanOrEqual> {
using Base = BinaryWithFeedbackNode<LessThanOrEqual>;
public:
LessThanOrEqual(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class GreaterThan : public BinaryWithFeedbackNode<GreaterThan> {
using Base = BinaryWithFeedbackNode<GreaterThan>;
public:
GreaterThan(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class GreaterThanOrEqual : public BinaryWithFeedbackNode<GreaterThanOrEqual> {
using Base = BinaryWithFeedbackNode<GreaterThanOrEqual>;
public:
GreaterThanOrEqual(size_t input_count,
const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
// TODO(verwaest): It may make more sense to buffer phis in merged_states until // TODO(verwaest): It may make more sense to buffer phis in merged_states until
// we set up the interpreter frame state for code generation. At that point we // we set up the interpreter frame state for code generation. At that point we
// can generate correctly-sized phis. // can generate correctly-sized phis.
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include "src/common/assert-scope.h" #include "src/common/assert-scope.h"
#include "src/common/checks.h" #include "src/common/checks.h"
#include "src/common/message-template.h" #include "src/common/message-template.h"
#include "src/common/operation.h"
#include "src/common/ptr-compr.h" #include "src/common/ptr-compr.h"
#include "src/flags/flags.h" #include "src/flags/flags.h"
#include "src/objects/elements-kind.h" #include "src/objects/elements-kind.h"
......
...@@ -36,7 +36,7 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -36,7 +36,7 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I32Atomic##Name) { \ WASM_EXEC_TEST(I32Atomic##Name) { \
RunU32BinOp(execution_tier, kExprI32Atomic##Name, Name); \ RunU32BinOp(execution_tier, kExprI32Atomic##Name, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op, void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
...@@ -65,7 +65,7 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op, ...@@ -65,7 +65,7 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I32Atomic##Name##16U) { \ WASM_EXEC_TEST(I32Atomic##Name##16U) { \
RunU16BinOp(execution_tier, kExprI32Atomic##Name##16U, Name); \ RunU16BinOp(execution_tier, kExprI32Atomic##Name##16U, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op, void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
...@@ -93,7 +93,7 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -93,7 +93,7 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I32Atomic##Name##8U) { \ WASM_EXEC_TEST(I32Atomic##Name##8U) { \
RunU8BinOp(execution_tier, kExprI32Atomic##Name##8U, Name); \ RunU8BinOp(execution_tier, kExprI32Atomic##Name##8U, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
WASM_EXEC_TEST(I32AtomicCompareExchange) { WASM_EXEC_TEST(I32AtomicCompareExchange) {
......
...@@ -36,7 +36,7 @@ void RunU64BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -36,7 +36,7 @@ void RunU64BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name) { \ WASM_EXEC_TEST(I64Atomic##Name) { \
RunU64BinOp(execution_tier, kExprI64Atomic##Name, Name); \ RunU64BinOp(execution_tier, kExprI64Atomic##Name, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op, void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
...@@ -65,7 +65,7 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -65,7 +65,7 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##32U) { \ WASM_EXEC_TEST(I64Atomic##Name##32U) { \
RunU32BinOp(execution_tier, kExprI64Atomic##Name##32U, Name); \ RunU32BinOp(execution_tier, kExprI64Atomic##Name##32U, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op, void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
...@@ -94,7 +94,7 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op, ...@@ -94,7 +94,7 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##16U) { \ WASM_EXEC_TEST(I64Atomic##Name##16U) { \
RunU16BinOp(execution_tier, kExprI64Atomic##Name##16U, Name); \ RunU16BinOp(execution_tier, kExprI64Atomic##Name##16U, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op, void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
...@@ -122,7 +122,7 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -122,7 +122,7 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##8U) { \ WASM_EXEC_TEST(I64Atomic##Name##8U) { \
RunU8BinOp(execution_tier, kExprI64Atomic##Name##8U, Name); \ RunU8BinOp(execution_tier, kExprI64Atomic##Name##8U, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicCompareExchange) { WASM_EXEC_TEST(I64AtomicCompareExchange) {
...@@ -380,7 +380,7 @@ void RunDropTest(TestExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -380,7 +380,7 @@ void RunDropTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##Drop) { \ WASM_EXEC_TEST(I64Atomic##Name##Drop) { \
RunDropTest(execution_tier, kExprI64Atomic##Name, Name); \ RunDropTest(execution_tier, kExprI64Atomic##Name, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicSub16UDrop) { WASM_EXEC_TEST(I64AtomicSub16UDrop) {
...@@ -499,7 +499,7 @@ void RunConvertTest(TestExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -499,7 +499,7 @@ void RunConvertTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64AtomicConvert##Name) { \ WASM_EXEC_TEST(I64AtomicConvert##Name) { \
RunConvertTest(execution_tier, kExprI64Atomic##Name, Name); \ RunConvertTest(execution_tier, kExprI64Atomic##Name, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicConvertCompareExchange) { WASM_EXEC_TEST(I64AtomicConvertCompareExchange) {
...@@ -546,7 +546,7 @@ void RunNonConstIndexTest(TestExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -546,7 +546,7 @@ void RunNonConstIndexTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64AtomicConstIndex##Name##Narrow) { \ WASM_EXEC_TEST(I64AtomicConstIndex##Name##Narrow) { \
RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name##32U, Name); \ RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name##32U, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
// Test a set of Regular operations // Test a set of Regular operations
...@@ -554,7 +554,7 @@ OPERATION_LIST(TEST_OPERATION) ...@@ -554,7 +554,7 @@ OPERATION_LIST(TEST_OPERATION)
WASM_EXEC_TEST(I64AtomicConstIndex##Name) { \ WASM_EXEC_TEST(I64AtomicConstIndex##Name) { \
RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name, Name); \ RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name, Name); \
} }
OPERATION_LIST(TEST_OPERATION) WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION #undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchangeNarrow) { WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchangeNarrow) {
......
...@@ -13,12 +13,12 @@ namespace v8 { ...@@ -13,12 +13,12 @@ namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
#define OPERATION_LIST(V) \ #define WASM_ATOMIC_OPERATION_LIST(V) \
V(Add) \ V(Add) \
V(Sub) \ V(Sub) \
V(And) \ V(And) \
V(Or) \ V(Or) \
V(Xor) \ V(Xor) \
V(Exchange) V(Exchange)
using Uint64BinOp = uint64_t (*)(uint64_t, uint64_t); using Uint64BinOp = uint64_t (*)(uint64_t, uint64_t);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment