Commit 9694d9b6 authored by Victor Gomes's avatar Victor Gomes Committed by V8 LUCI CQ

[maglev] Generic binary operations

The CL implements binary operations bytecodes as generic nodes that
call the correspondent runtime builtin.

Bug: v8:7700
Change-Id: I82c5e20e4103d4ef367184af1242bae7f7f93fe0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3509392Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarVictor Gomes <victorgomes@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79462}
parent 9f9f36f8
......@@ -1515,34 +1515,6 @@ class CompareOperationFeedback {
};
};
enum class Operation {
// Binary operations.
kAdd,
kSubtract,
kMultiply,
kDivide,
kModulus,
kExponentiate,
kBitwiseAnd,
kBitwiseOr,
kBitwiseXor,
kShiftLeft,
kShiftRight,
kShiftRightLogical,
// Unary operations.
kBitwiseNot,
kNegate,
kIncrement,
kDecrement,
// Compare operations.
kEqual,
kStrictEqual,
kLessThan,
kLessThanOrEqual,
kGreaterThan,
kGreaterThanOrEqual,
};
// Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMMON_OPERATION_H_
#define V8_COMMON_OPERATION_H_
#include <ostream>
#define ARITHMETIC_OPERATION_LIST(V) \
V(Add) \
V(Subtract) \
V(Multiply) \
V(Divide) \
V(Modulus) \
V(Exponentiate) \
V(BitwiseAnd) \
V(BitwiseOr) \
V(BitwiseXor) \
V(ShiftLeft) \
V(ShiftRight) \
V(ShiftRightLogical)
#define UNARY_OPERATION_LIST(V) \
V(BitwiseNot) \
V(Negate) \
V(Increment) \
V(Decrement)
#define COMPARISON_OPERATION_LIST(V) \
V(Equal) \
V(StrictEqual) \
V(LessThan) \
V(LessThanOrEqual) \
V(GreaterThan) \
V(GreaterThanOrEqual)
#define OPERATION_LIST(V) \
ARITHMETIC_OPERATION_LIST(V) \
UNARY_OPERATION_LIST(V) \
COMPARISON_OPERATION_LIST(V)
enum class Operation {
#define DEFINE_OP(name) k##name,
OPERATION_LIST(DEFINE_OP)
#undef DEFINE_OP
};
inline std::ostream& operator<<(std::ostream& os, const Operation& operation) {
switch (operation) {
#define CASE(name) \
case Operation::k##name: \
return os << #name;
OPERATION_LIST(CASE)
#undef CASE
}
}
#endif // V8_COMMON_OPERATION_H_
......@@ -29,6 +29,51 @@ namespace maglev {
#define MAGLEV_UNIMPLEMENTED_BYTECODE(Name) \
void MaglevGraphBuilder::Visit##Name() { MAGLEV_UNIMPLEMENTED(Name); }
template <Operation kOperation, typename... Args>
ValueNode* MaglevGraphBuilder::AddNewOperationNode(
std::initializer_list<ValueNode*> inputs, Args&&... args) {
switch (kOperation) {
#define CASE(Name) \
case Operation::k##Name: \
return AddNewNode<Generic##Name>(inputs, std::forward<Args>(args)...);
OPERATION_LIST(CASE)
#undef CASE
}
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericUnaryOperationNode() {
FeedbackSlot slot_index = GetSlotOperand(0);
ValueNode* value = GetAccumulator();
ValueNode* node = AddNewOperationNode<kOperation>(
{value}, compiler::FeedbackSource{feedback(), slot_index});
SetAccumulator(node);
MarkPossibleSideEffect();
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericBinaryOperationNode() {
ValueNode* left = LoadRegister(0);
FeedbackSlot slot_index = GetSlotOperand(1);
ValueNode* right = GetAccumulator();
ValueNode* node = AddNewOperationNode<kOperation>(
{left, right}, compiler::FeedbackSource{feedback(), slot_index});
SetAccumulator(node);
MarkPossibleSideEffect();
}
template <Operation kOperation>
void MaglevGraphBuilder::VisitUnaryOperation() {
// TODO(victorgomes): Use feedback info and create optimized versions.
BuildGenericUnaryOperationNode<kOperation>();
}
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinaryOperation() {
// TODO(victorgomes): Use feedback info and create optimized versions.
BuildGenericBinaryOperationNode<kOperation>();
}
void MaglevGraphBuilder::VisitLdar() { SetAccumulator(LoadRegister(0)); }
void MaglevGraphBuilder::VisitLdaZero() {
......@@ -176,18 +221,42 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnProperty)
MAGLEV_UNIMPLEMENTED_BYTECODE(StaInArrayLiteral)
MAGLEV_UNIMPLEMENTED_BYTECODE(DefineKeyedOwnPropertyInLiteral)
MAGLEV_UNIMPLEMENTED_BYTECODE(CollectTypeProfile)
MAGLEV_UNIMPLEMENTED_BYTECODE(Add)
MAGLEV_UNIMPLEMENTED_BYTECODE(Sub)
MAGLEV_UNIMPLEMENTED_BYTECODE(Mul)
MAGLEV_UNIMPLEMENTED_BYTECODE(Div)
MAGLEV_UNIMPLEMENTED_BYTECODE(Mod)
MAGLEV_UNIMPLEMENTED_BYTECODE(Exp)
MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseOr)
MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseXor)
MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseAnd)
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftLeft)
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRight)
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightLogical)
void MaglevGraphBuilder::VisitAdd() { VisitBinaryOperation<Operation::kAdd>(); }
void MaglevGraphBuilder::VisitSub() {
VisitBinaryOperation<Operation::kSubtract>();
}
void MaglevGraphBuilder::VisitMul() {
VisitBinaryOperation<Operation::kMultiply>();
}
void MaglevGraphBuilder::VisitDiv() {
VisitBinaryOperation<Operation::kDivide>();
}
void MaglevGraphBuilder::VisitMod() {
VisitBinaryOperation<Operation::kModulus>();
}
void MaglevGraphBuilder::VisitExp() {
VisitBinaryOperation<Operation::kExponentiate>();
}
void MaglevGraphBuilder::VisitBitwiseOr() {
VisitBinaryOperation<Operation::kBitwiseOr>();
}
void MaglevGraphBuilder::VisitBitwiseXor() {
VisitBinaryOperation<Operation::kBitwiseXor>();
}
void MaglevGraphBuilder::VisitBitwiseAnd() {
VisitBinaryOperation<Operation::kBitwiseAnd>();
}
void MaglevGraphBuilder::VisitShiftLeft() {
VisitBinaryOperation<Operation::kShiftLeft>();
}
void MaglevGraphBuilder::VisitShiftRight() {
VisitBinaryOperation<Operation::kShiftRight>();
}
void MaglevGraphBuilder::VisitShiftRightLogical() {
VisitBinaryOperation<Operation::kShiftRightLogical>();
}
MAGLEV_UNIMPLEMENTED_BYTECODE(AddSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(SubSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(MulSmi)
......@@ -200,20 +269,20 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseAndSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftLeftSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightSmi)
MAGLEV_UNIMPLEMENTED_BYTECODE(ShiftRightLogicalSmi)
void MaglevGraphBuilder::VisitInc() {
// Inc <slot>
FeedbackSlot slot_index = GetSlotOperand(0);
ValueNode* value = GetAccumulator();
ValueNode* node = AddNewNode<Increment>(
{value}, compiler::FeedbackSource{feedback(), slot_index});
SetAccumulator(node);
MarkPossibleSideEffect();
void MaglevGraphBuilder::VisitInc() {
VisitUnaryOperation<Operation::kIncrement>();
}
void MaglevGraphBuilder::VisitDec() {
VisitUnaryOperation<Operation::kDecrement>();
}
void MaglevGraphBuilder::VisitNegate() {
VisitUnaryOperation<Operation::kNegate>();
}
MAGLEV_UNIMPLEMENTED_BYTECODE(Dec)
MAGLEV_UNIMPLEMENTED_BYTECODE(Negate)
MAGLEV_UNIMPLEMENTED_BYTECODE(BitwiseNot)
void MaglevGraphBuilder::VisitBitwiseNot() {
VisitUnaryOperation<Operation::kBitwiseNot>();
}
MAGLEV_UNIMPLEMENTED_BYTECODE(ToBooleanLogicalNot)
MAGLEV_UNIMPLEMENTED_BYTECODE(LogicalNot)
MAGLEV_UNIMPLEMENTED_BYTECODE(TypeOf)
......@@ -283,29 +352,17 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(ConstructWithSpread)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqual)
MAGLEV_UNIMPLEMENTED_BYTECODE(TestEqualStrict)
template <typename RelNodeT>
void MaglevGraphBuilder::VisitRelNode() {
// Test[RelationComparison] <src> <slot>
ValueNode* left = LoadRegister(0);
FeedbackSlot slot_index = GetSlotOperand(1);
ValueNode* right = GetAccumulator();
USE(slot_index); // TODO(v8:7700): Use the feedback info.
ValueNode* node = AddNewNode<RelNodeT>(
{left, right}, compiler::FeedbackSource{feedback(), slot_index});
SetAccumulator(node);
MarkPossibleSideEffect();
void MaglevGraphBuilder::VisitTestLessThan() {
VisitBinaryOperation<Operation::kLessThan>();
}
void MaglevGraphBuilder::VisitTestLessThan() { VisitRelNode<LessThan>(); }
void MaglevGraphBuilder::VisitTestLessThanOrEqual() {
VisitRelNode<LessThanOrEqual>();
VisitBinaryOperation<Operation::kLessThanOrEqual>();
}
void MaglevGraphBuilder::VisitTestGreaterThan() {
VisitBinaryOperation<Operation::kGreaterThan>();
}
void MaglevGraphBuilder::VisitTestGreaterThan() { VisitRelNode<GreaterThan>(); }
void MaglevGraphBuilder::VisitTestGreaterThanOrEqual() {
VisitRelNode<GreaterThanOrEqual>();
VisitBinaryOperation<Operation::kGreaterThanOrEqual>();
}
MAGLEV_UNIMPLEMENTED_BYTECODE(TestInstanceOf)
......
......@@ -215,6 +215,10 @@ class MaglevGraphBuilder {
return node;
}
template <Operation kOperation, typename... Args>
ValueNode* AddNewOperationNode(std::initializer_list<ValueNode*> inputs,
Args&&... args);
template <typename NodeT, typename... Args>
NodeT* AddNewNode(size_t input_count, Args&&... args) {
return AddNode(NewNode<NodeT>(input_count, std::forward<Args>(args)...));
......@@ -356,8 +360,15 @@ class MaglevGraphBuilder {
return block;
}
template <typename RelNodeT>
void VisitRelNode();
template <Operation kOperation>
void BuildGenericUnaryOperationNode();
template <Operation kOperation>
void BuildGenericBinaryOperationNode();
template <Operation kOperation>
void VisitUnaryOperation();
template <Operation kOperation>
void VisitBinaryOperation();
void MergeIntoFrameState(BasicBlock* block, int target);
void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
......
......@@ -63,7 +63,10 @@ void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
vreg_state->AllocateVirtualRegister());
}
void DefineSameAsFirst(MaglevVregAllocationState* vreg_state, Node* node) {
// TODO(victorgomes): Use this for smi binary operation and remove attribute
// [[maybe_unused]].
[[maybe_unused]] void DefineSameAsFirst(MaglevVregAllocationState* vreg_state,
Node* node) {
node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
}
......@@ -646,24 +649,6 @@ void LoadNamedGeneric::PrintParams(std::ostream& os,
os << "(" << name_ << ")";
}
void Increment::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
void Increment::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
// TODO(leszeks): Implement full handling.
__ CallBuiltin(Builtin::kIncrement_WithFeedback);
}
void StoreToFrame::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void StoreToFrame::GenerateCode(MaglevCodeGenState* code_gen_state,
......@@ -702,19 +687,41 @@ void GapMove::PrintParams(std::ostream& os,
os << "(" << source() << " → " << target() << ")";
}
void Add::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
namespace {
constexpr Builtin BuiltinFor(Operation operation) {
switch (operation) {
#define CASE(name) \
case Operation::k##name: \
return Builtin::k##name##_WithFeedback;
OPERATION_LIST(CASE)
#undef CASE
}
}
void Add::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
UNREACHABLE();
} // namespace
template <class Derived, Operation kOperation>
void UnaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
template <class Derived>
void BinaryWithFeedbackNode<Derived>::AllocateRelationalComparisonVreg(
template <class Derived, Operation kOperation>
void UnaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
__ CallBuiltin(BuiltinFor(kOperation));
}
template <class Derived, Operation kOperation>
void BinaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
using D = BinaryOp_WithFeedbackDescriptor;
UseFixed(left_input(), D::GetRegisterParameter(D::kLeft));
......@@ -722,8 +729,8 @@ void BinaryWithFeedbackNode<Derived>::AllocateRelationalComparisonVreg(
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
template <class Derived>
void BinaryWithFeedbackNode<Derived>::GenerateRelationalComparisonCode(
template <class Derived, Operation kOperation>
void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
using D = BinaryOp_WithFeedbackDescriptor;
DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft));
......@@ -731,62 +738,20 @@ void BinaryWithFeedbackNode<Derived>::GenerateRelationalComparisonCode(
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
// TODO(jgruber): Implement full handling.
switch (this->opcode()) {
case Opcode::kLessThan:
__ CallBuiltin(Builtin::kLessThan_WithFeedback);
break;
case Opcode::kLessThanOrEqual:
__ CallBuiltin(Builtin::kLessThanOrEqual_WithFeedback);
break;
case Opcode::kGreaterThan:
__ CallBuiltin(Builtin::kGreaterThan_WithFeedback);
break;
case Opcode::kGreaterThanOrEqual:
__ CallBuiltin(Builtin::kGreaterThanOrEqual_WithFeedback);
break;
default:
UNREACHABLE();
}
}
void LessThan::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
Base::AllocateRelationalComparisonVreg(vreg_state, state);
}
void LessThan::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Base::GenerateRelationalComparisonCode(code_gen_state, state);
__ CallBuiltin(BuiltinFor(kOperation));
}
void LessThanOrEqual::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
Base::AllocateRelationalComparisonVreg(vreg_state, state);
}
void LessThanOrEqual::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Base::GenerateRelationalComparisonCode(code_gen_state, state);
}
void GreaterThan::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
Base::AllocateRelationalComparisonVreg(vreg_state, state);
}
void GreaterThan::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Base::GenerateRelationalComparisonCode(code_gen_state, state);
}
void GreaterThanOrEqual::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
Base::AllocateRelationalComparisonVreg(vreg_state, state);
}
void GreaterThanOrEqual::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Base::GenerateRelationalComparisonCode(code_gen_state, state);
}
#define DEF_OPERATION(Name) \
void Name::AllocateVreg(MaglevVregAllocationState* vreg_state, \
const ProcessingState& state) { \
Base::AllocateVreg(vreg_state, state); \
} \
void Name::GenerateCode(MaglevCodeGenState* code_gen_state, \
const ProcessingState& state) { \
Base::GenerateCode(code_gen_state, state); \
}
GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
#undef DEF_OPERATION
void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
......
......@@ -11,6 +11,7 @@
#include "src/base/threaded-list.h"
#include "src/codegen/reglist.h"
#include "src/common/globals.h"
#include "src/common/operation.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/heap-refs.h"
#include "src/interpreter/bytecode-register.h"
......@@ -35,24 +36,43 @@ class MaglevVregAllocationState;
//
// The macro lists below must match the node class hierarchy.
#define GENERIC_OPERATIONS_NODE_LIST(V) \
V(GenericAdd) \
V(GenericSubtract) \
V(GenericMultiply) \
V(GenericDivide) \
V(GenericModulus) \
V(GenericExponentiate) \
V(GenericBitwiseAnd) \
V(GenericBitwiseOr) \
V(GenericBitwiseXor) \
V(GenericShiftLeft) \
V(GenericShiftRight) \
V(GenericShiftRightLogical) \
V(GenericBitwiseNot) \
V(GenericNegate) \
V(GenericIncrement) \
V(GenericDecrement) \
V(GenericEqual) \
V(GenericStrictEqual) \
V(GenericLessThan) \
V(GenericLessThanOrEqual) \
V(GenericGreaterThan) \
V(GenericGreaterThanOrEqual)
#define VALUE_NODE_LIST(V) \
V(Add) \
V(CallProperty) \
V(CallUndefinedReceiver) \
V(Constant) \
V(GreaterThan) \
V(GreaterThanOrEqual) \
V(Increment) \
V(InitialValue) \
V(LessThan) \
V(LessThanOrEqual) \
V(LoadField) \
V(LoadGlobal) \
V(LoadNamedGeneric) \
V(Phi) \
V(RegisterInput) \
V(RootConstant) \
V(SmiConstant)
V(SmiConstant) \
GENERIC_OPERATIONS_NODE_LIST(V)
#define NODE_LIST(V) \
V(Checkpoint) \
......@@ -684,11 +704,16 @@ class FixedInputValueNodeT : public ValueNodeT<Derived> {
}
};
template <class Derived>
template <class Derived, Operation kOperation>
class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
using Base = FixedInputValueNodeT<1, Derived>;
public:
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::Call();
static constexpr int kOperandIndex = 0;
Input& operand_input() { return Node::input(kOperandIndex); }
compiler::FeedbackSource feedback() const { return feedback_; }
protected:
......@@ -696,16 +721,18 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
const compiler::FeedbackSource& feedback)
: Base(input_count), feedback_(feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
const compiler::FeedbackSource feedback_;
};
template <class Derived>
template <class Derived, Operation kOperation>
class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
using Base = FixedInputValueNodeT<2, Derived>;
public:
compiler::FeedbackSource feedback() const { return feedback_; }
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::Call();
......@@ -713,22 +740,42 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
static constexpr int kRightIndex = 1;
Input& left_input() { return Node::input(kLeftIndex); }
Input& right_input() { return Node::input(kRightIndex); }
compiler::FeedbackSource feedback() const { return feedback_; }
protected:
BinaryWithFeedbackNode(size_t input_count,
const compiler::FeedbackSource& feedback)
: Base(input_count), feedback_(feedback) {}
// Only to be called when Derived is a RelationalComparisonNode.
void AllocateRelationalComparisonVreg(MaglevVregAllocationState*,
const ProcessingState&);
void GenerateRelationalComparisonCode(MaglevCodeGenState*,
const ProcessingState&);
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
protected:
const compiler::FeedbackSource feedback_;
};
#define DEF_OPERATION_NODE(Name, Super, OpName) \
class Name : public Super<Name, Operation::k##OpName> { \
using Base = Super<Name, Operation::k##OpName>; \
\
public: \
Name(size_t input_count, const compiler::FeedbackSource& feedback) \
: Base(input_count, feedback) {} \
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
};
#define DEF_UNARY_WITH_FEEDBACK_NODE(Name) \
DEF_OPERATION_NODE(Generic##Name, UnaryWithFeedbackNode, Name)
#define DEF_BINARY_WITH_FEEDBACK_NODE(Name) \
DEF_OPERATION_NODE(Generic##Name, BinaryWithFeedbackNode, Name)
UNARY_OPERATION_LIST(DEF_UNARY_WITH_FEEDBACK_NODE)
ARITHMETIC_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
#undef DEF_UNARY_WITH_FEEDBACK_NODE
#undef DEF_BINARY_WITH_FEEDBACK_NODE
class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
using Base = FixedInputValueNodeT<0, InitialValue>;
......@@ -968,24 +1015,6 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
const compiler::NameRef name_;
};
class Increment : public UnaryWithFeedbackNode<Increment> {
using Base = UnaryWithFeedbackNode<Increment>;
public:
Increment(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::Call();
static constexpr int kOperandIndex = 0;
Input& operand_input() { return input(kOperandIndex); }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class StoreToFrame : public FixedInputNodeT<0, StoreToFrame> {
using Base = FixedInputNodeT<0, StoreToFrame>;
......@@ -1026,67 +1055,6 @@ class GapMove : public FixedInputNodeT<0, GapMove> {
compiler::AllocatedOperand target_;
};
class Add : public BinaryWithFeedbackNode<Add> {
using Base = BinaryWithFeedbackNode<Add>;
public:
explicit Add(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class LessThan : public BinaryWithFeedbackNode<LessThan> {
using Base = BinaryWithFeedbackNode<LessThan>;
public:
LessThan(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class LessThanOrEqual : public BinaryWithFeedbackNode<LessThanOrEqual> {
using Base = BinaryWithFeedbackNode<LessThanOrEqual>;
public:
LessThanOrEqual(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class GreaterThan : public BinaryWithFeedbackNode<GreaterThan> {
using Base = BinaryWithFeedbackNode<GreaterThan>;
public:
GreaterThan(size_t input_count, const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class GreaterThanOrEqual : public BinaryWithFeedbackNode<GreaterThanOrEqual> {
using Base = BinaryWithFeedbackNode<GreaterThanOrEqual>;
public:
GreaterThanOrEqual(size_t input_count,
const compiler::FeedbackSource& feedback)
: Base(input_count, feedback) {}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
// TODO(verwaest): It may make more sense to buffer phis in merged_states until
// we set up the interpreter frame state for code generation. At that point we
// can generate correctly-sized phis.
......
......@@ -19,6 +19,7 @@
#include "src/common/assert-scope.h"
#include "src/common/checks.h"
#include "src/common/message-template.h"
#include "src/common/operation.h"
#include "src/common/ptr-compr.h"
#include "src/flags/flags.h"
#include "src/objects/elements-kind.h"
......
......@@ -36,7 +36,7 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I32Atomic##Name) { \
RunU32BinOp(execution_tier, kExprI32Atomic##Name, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
......@@ -65,7 +65,7 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I32Atomic##Name##16U) { \
RunU16BinOp(execution_tier, kExprI32Atomic##Name##16U, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
......@@ -93,7 +93,7 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I32Atomic##Name##8U) { \
RunU8BinOp(execution_tier, kExprI32Atomic##Name##8U, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I32AtomicCompareExchange) {
......
......@@ -36,7 +36,7 @@ void RunU64BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name) { \
RunU64BinOp(execution_tier, kExprI64Atomic##Name, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
......@@ -65,7 +65,7 @@ void RunU32BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##32U) { \
RunU32BinOp(execution_tier, kExprI64Atomic##Name##32U, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
......@@ -94,7 +94,7 @@ void RunU16BinOp(TestExecutionTier tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##16U) { \
RunU16BinOp(execution_tier, kExprI64Atomic##Name##16U, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
......@@ -122,7 +122,7 @@ void RunU8BinOp(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##8U) { \
RunU8BinOp(execution_tier, kExprI64Atomic##Name##8U, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicCompareExchange) {
......@@ -380,7 +380,7 @@ void RunDropTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64Atomic##Name##Drop) { \
RunDropTest(execution_tier, kExprI64Atomic##Name, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicSub16UDrop) {
......@@ -499,7 +499,7 @@ void RunConvertTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64AtomicConvert##Name) { \
RunConvertTest(execution_tier, kExprI64Atomic##Name, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicConvertCompareExchange) {
......@@ -546,7 +546,7 @@ void RunNonConstIndexTest(TestExecutionTier execution_tier, WasmOpcode wasm_op,
WASM_EXEC_TEST(I64AtomicConstIndex##Name##Narrow) { \
RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name##32U, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
// Test a set of Regular operations
......@@ -554,7 +554,7 @@ OPERATION_LIST(TEST_OPERATION)
WASM_EXEC_TEST(I64AtomicConstIndex##Name) { \
RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name, Name); \
}
OPERATION_LIST(TEST_OPERATION)
WASM_ATOMIC_OPERATION_LIST(TEST_OPERATION)
#undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchangeNarrow) {
......
......@@ -13,12 +13,12 @@ namespace v8 {
namespace internal {
namespace wasm {
#define OPERATION_LIST(V) \
V(Add) \
V(Sub) \
V(And) \
V(Or) \
V(Xor) \
#define WASM_ATOMIC_OPERATION_LIST(V) \
V(Add) \
V(Sub) \
V(And) \
V(Or) \
V(Xor) \
V(Exchange)
using Uint64BinOp = uint64_t (*)(uint64_t, uint64_t);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment