Commit e27b348d authored by rmcilroy's avatar rmcilroy Committed by Commit bot

[Interpreter] Templatize AccumulatorUsage and OperandType for bytecode creation.

Templatizes the AccumulatorUsage and OperandType for BytecodeNode creation and
BytecodeRegisterOptimizer::PrepareForBytecode. This allows the compiler to
statically know whether the bytecode being created accesses the accumulator
and what operand types need scaling, avoiding runtime checks in the code.

Also removes BytecodeNode::set_bytecode methods.

Review-Url: https://codereview.chromium.org/2542903003
Cr-Commit-Position: refs/heads/master@{#41706}
parent f3b9d570
...@@ -143,7 +143,8 @@ class OperandHelper {}; ...@@ -143,7 +143,8 @@ class OperandHelper {};
template <> \ template <> \
class OperandHelper<OperandType::k##Name> \ class OperandHelper<OperandType::k##Name> \
: public UnsignedOperandHelper<Type> {}; : public UnsignedOperandHelper<Type> {};
UNSIGNED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER) UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
#undef DEFINE_UNSIGNED_OPERAND_HELPER #undef DEFINE_UNSIGNED_OPERAND_HELPER
template <> template <>
...@@ -211,14 +212,15 @@ class OperandHelper<OperandType::kRegOutTriple> { ...@@ -211,14 +212,15 @@ class OperandHelper<OperandType::kRegOutTriple> {
} // namespace } // namespace
template <OperandType... operand_types> template <Bytecode bytecode, AccumulatorUse accumulator_use,
OperandType... operand_types>
class BytecodeNodeBuilder { class BytecodeNodeBuilder {
public: public:
template <typename... Operands> template <typename... Operands>
INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder, INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
BytecodeSourceInfo source_info, BytecodeSourceInfo source_info,
Bytecode bytecode, Operands... operands)) { Operands... operands)) {
builder->PrepareToOutputBytecode(bytecode); builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
// The "OperandHelper<operand_types>::Convert(builder, operands)..." will // The "OperandHelper<operand_types>::Convert(builder, operands)..." will
// expand both the OperandType... and Operands... parameter packs e.g. for: // expand both the OperandType... and Operands... parameter packs e.g. for:
// BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make< // BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
...@@ -226,32 +228,34 @@ class BytecodeNodeBuilder { ...@@ -226,32 +228,34 @@ class BytecodeNodeBuilder {
// the code will expand into: // the code will expand into:
// OperandHelper<OperandType::kReg>::Convert(builder, reg), // OperandHelper<OperandType::kReg>::Convert(builder, reg),
// OperandHelper<OperandType::kImm>::Convert(builder, immediate), // OperandHelper<OperandType::kImm>::Convert(builder, immediate),
return BytecodeNode( return BytecodeNode::Create<bytecode, accumulator_use, operand_types...>(
bytecode, OperandHelper<operand_types>::Convert(builder, operands)..., source_info,
source_info); OperandHelper<operand_types>::Convert(builder, operands)...);
} }
}; };
#define DEFINE_BYTECODE_OUTPUT(name, accumulator_use, ...) \ #define DEFINE_BYTECODE_OUTPUT(name, ...) \
template <typename... Operands> \ template <typename... Operands> \
void BytecodeArrayBuilder::Output##name(Operands... operands) { \ void BytecodeArrayBuilder::Output##name(Operands... operands) { \
static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands, \ static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands, \
"too many operands for bytecode"); \ "too many operands for bytecode"); \
BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \ BytecodeNode node( \
this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \ BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
operands...)); \ Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
pipeline()->Write(&node); \ operands...)); \
} \ pipeline()->Write(&node); \
\ } \
template <typename... Operands> \ \
void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \ template <typename... Operands> \
Operands... operands) { \ void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \ Operands... operands) { \
BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \ DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \ BytecodeNode node( \
operands...)); \ BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
pipeline()->WriteJump(&node, label); \ Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
LeaveBasicBlock(); \ operands...)); \
pipeline()->WriteJump(&node, label); \
LeaveBasicBlock(); \
} }
BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT) BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
#undef DEFINE_BYTECODE_OUTPUT #undef DEFINE_BYTECODE_OUTPUT
...@@ -1000,8 +1004,10 @@ bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const { ...@@ -1000,8 +1004,10 @@ bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
} }
} }
void BytecodeArrayBuilder::PrepareToOutputBytecode(Bytecode bytecode) { template <Bytecode bytecode, AccumulatorUse accumulator_use>
if (register_optimizer_) register_optimizer_->PrepareForBytecode(bytecode); void BytecodeArrayBuilder::PrepareToOutputBytecode() {
if (register_optimizer_)
register_optimizer_->PrepareForBytecode<bytecode, accumulator_use>();
} }
uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) { uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
......
...@@ -365,7 +365,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final ...@@ -365,7 +365,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
private: private:
friend class BytecodeRegisterAllocator; friend class BytecodeRegisterAllocator;
template <OperandType... operand_types> template <Bytecode bytecode, AccumulatorUse accumulator_use,
OperandType... operand_types>
friend class BytecodeNodeBuilder; friend class BytecodeNodeBuilder;
// Returns the current source position for the given |bytecode|. // Returns the current source position for the given |bytecode|.
...@@ -393,7 +394,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final ...@@ -393,7 +394,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// during bytecode generation. // during bytecode generation.
BytecodeArrayBuilder& Illegal(); BytecodeArrayBuilder& Illegal();
void PrepareToOutputBytecode(Bytecode bytecode); template <Bytecode bytecode, AccumulatorUse accumulator_use>
void PrepareToOutputBytecode();
void LeaveBasicBlock() { return_seen_in_block_ = false; } void LeaveBasicBlock() { return_seen_in_block_ = false; }
......
...@@ -292,7 +292,7 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) { ...@@ -292,7 +292,7 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
delta -= 1; delta -= 1;
} }
DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode()); DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
node->set_bytecode(node->bytecode(), delta, node->operand(1)); node->update_operand0(delta);
} else { } else {
// The label has not yet been bound so this is a forward reference // The label has not yet been bound so this is a forward reference
// that will be patched when the label is bound. We create a // that will be patched when the label is bound. We create a
...@@ -310,13 +310,13 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) { ...@@ -310,13 +310,13 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
UNREACHABLE(); UNREACHABLE();
break; break;
case OperandSize::kByte: case OperandSize::kByte:
node->set_bytecode(node->bytecode(), k8BitJumpPlaceholder); node->update_operand0(k8BitJumpPlaceholder);
break; break;
case OperandSize::kShort: case OperandSize::kShort:
node->set_bytecode(node->bytecode(), k16BitJumpPlaceholder); node->update_operand0(k16BitJumpPlaceholder);
break; break;
case OperandSize::kQuad: case OperandSize::kQuad:
node->set_bytecode(node->bytecode(), k32BitJumpPlaceholder); node->update_operand0(k32BitJumpPlaceholder);
break; break;
} }
} }
......
...@@ -23,27 +23,33 @@ namespace interpreter { ...@@ -23,27 +23,33 @@ namespace interpreter {
V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \ V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
V(RegOutTriple, OperandTypeInfo::kScalableSignedByte) V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
#define UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \ #define SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \ V(Imm, OperandTypeInfo::kScalableSignedByte)
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
#define UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
V(Idx, OperandTypeInfo::kScalableUnsignedByte) \ V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
V(UImm, OperandTypeInfo::kScalableUnsignedByte) \ V(UImm, OperandTypeInfo::kScalableUnsignedByte) \
V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \ V(RegCount, OperandTypeInfo::kScalableUnsignedByte)
#define UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V) \
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort) V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
#define SIGNED_SCALAR_OPERAND_TYPE_LIST(V) \ // Carefully ordered for operand type range checks below.
V(Imm, OperandTypeInfo::kScalableSignedByte) #define NON_REGISTER_OPERAND_TYPE_LIST(V) \
INVALID_OPERAND_TYPE_LIST(V) \
UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V) \
UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V)
// Carefully ordered for operand type range checks below.
#define REGISTER_OPERAND_TYPE_LIST(V) \ #define REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_INPUT_OPERAND_TYPE_LIST(V) \ REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
INVALID_OPERAND_TYPE_LIST(V) \
UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
SIGNED_SCALAR_OPERAND_TYPE_LIST(V)
// The list of operand types used by bytecodes. // The list of operand types used by bytecodes.
// Carefully ordered for operand type range checks below.
#define OPERAND_TYPE_LIST(V) \ #define OPERAND_TYPE_LIST(V) \
NON_REGISTER_OPERAND_TYPE_LIST(V) \ NON_REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_OPERAND_TYPE_LIST(V) REGISTER_OPERAND_TYPE_LIST(V)
...@@ -125,6 +131,33 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, ...@@ -125,6 +131,33 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const OperandSize& operand_size); const OperandSize& operand_size);
std::ostream& operator<<(std::ostream& os, const OperandType& operand_type); std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
class BytecodeOperands {
public:
// Returns true if |accumulator_use| reads the accumulator.
static constexpr bool ReadsAccumulator(AccumulatorUse accumulator_use) {
return accumulator_use == AccumulatorUse::kRead ||
accumulator_use == AccumulatorUse::kReadWrite;
}
// Returns true if |accumulator_use| writes the accumulator.
static constexpr bool WritesAccumulator(AccumulatorUse accumulator_use) {
return accumulator_use == AccumulatorUse::kWrite ||
accumulator_use == AccumulatorUse::kReadWrite;
}
// Returns true if |operand_type| is a scalable signed byte.
static constexpr bool IsScalableSignedByte(OperandType operand_type) {
return operand_type >= OperandType::kImm &&
operand_type <= OperandType::kRegOutTriple;
}
// Returns true if |operand_type| is a scalable unsigned byte.
static constexpr bool IsScalableUnsignedByte(OperandType operand_type) {
return operand_type >= OperandType::kIdx &&
operand_type <= OperandType::kRegCount;
}
};
} // namespace interpreter } // namespace interpreter
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -13,7 +13,8 @@ namespace interpreter { ...@@ -13,7 +13,8 @@ namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer( BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
BytecodePipelineStage* next_stage) BytecodePipelineStage* next_stage)
: next_stage_(next_stage), last_(Bytecode::kIllegal, BytecodeSourceInfo()) { : next_stage_(next_stage),
last_(BytecodeNode::Illegal(BytecodeSourceInfo())) {
InvalidateLast(); InvalidateLast();
} }
...@@ -65,7 +66,7 @@ void BytecodePeepholeOptimizer::Flush() { ...@@ -65,7 +66,7 @@ void BytecodePeepholeOptimizer::Flush() {
} }
void BytecodePeepholeOptimizer::InvalidateLast() { void BytecodePeepholeOptimizer::InvalidateLast() {
last_.set_bytecode(Bytecode::kIllegal); last_ = BytecodeNode::Illegal(BytecodeSourceInfo());
} }
bool BytecodePeepholeOptimizer::LastIsValid() const { bool BytecodePeepholeOptimizer::LastIsValid() const {
...@@ -116,37 +117,41 @@ bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition( ...@@ -116,37 +117,41 @@ bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
namespace { namespace {
void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode, BytecodeNode TransformLdaSmiBinaryOpToBinaryOpWithSmi(
BytecodeNode* const last, Bytecode new_bytecode, BytecodeNode* const last,
BytecodeNode* const current) { BytecodeNode* const current) {
DCHECK_EQ(last->bytecode(), Bytecode::kLdaSmi); DCHECK_EQ(last->bytecode(), Bytecode::kLdaSmi);
current->set_bytecode(new_bytecode, last->operand(0), current->operand(0), BytecodeNode node(new_bytecode, last->operand(0), current->operand(0),
current->operand(1)); current->operand(1), current->source_info());
if (last->source_info().is_valid()) { if (last->source_info().is_valid()) {
current->set_source_info(last->source_info()); node.set_source_info(last->source_info());
} }
return node;
} }
void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode, BytecodeNode TransformLdaZeroBinaryOpToBinaryOpWithZero(
BytecodeNode* const last, Bytecode new_bytecode, BytecodeNode* const last,
BytecodeNode* const current) { BytecodeNode* const current) {
DCHECK_EQ(last->bytecode(), Bytecode::kLdaZero); DCHECK_EQ(last->bytecode(), Bytecode::kLdaZero);
current->set_bytecode(new_bytecode, 0, current->operand(0), BytecodeNode node(new_bytecode, 0, current->operand(0), current->operand(1),
current->operand(1)); current->source_info());
if (last->source_info().is_valid()) { if (last->source_info().is_valid()) {
current->set_source_info(last->source_info()); node.set_source_info(last->source_info());
} }
return node;
} }
void TransformEqualityWithNullOrUndefinedToTestUndetectable( BytecodeNode TransformEqualityWithNullOrUndefinedToTestUndetectable(
BytecodeNode* const last, BytecodeNode* const current) { BytecodeNode* const last, BytecodeNode* const current) {
DCHECK((last->bytecode() == Bytecode::kLdaNull) || DCHECK((last->bytecode() == Bytecode::kLdaNull) ||
(last->bytecode() == Bytecode::kLdaUndefined)); (last->bytecode() == Bytecode::kLdaUndefined));
DCHECK_EQ(current->bytecode(), Bytecode::kTestEqual); DCHECK_EQ(current->bytecode(), Bytecode::kTestEqual);
current->set_bytecode(Bytecode::kTestUndetectable, current->operand(0)); BytecodeNode node(BytecodeNode::TestUndetectable(current->source_info(),
current->operand(0)));
if (last->source_info().is_valid()) { if (last->source_info().is_valid()) {
current->set_source_info(last->source_info()); node.set_source_info(last->source_info());
} }
return node;
} }
} // namespace } // namespace
...@@ -186,8 +191,8 @@ void BytecodePeepholeOptimizer::ElideCurrentAction( ...@@ -186,8 +191,8 @@ void BytecodePeepholeOptimizer::ElideCurrentAction(
if (node->source_info().is_valid()) { if (node->source_info().is_valid()) {
// Preserve the source information by replacing the node bytecode // Preserve the source information by replacing the node bytecode
// with a no op bytecode. // with a no op bytecode.
node->set_bytecode(Bytecode::kNop); BytecodeNode new_node(BytecodeNode::Nop(node->source_info()));
DefaultAction(node); DefaultAction(&new_node);
} else { } else {
// Nothing to do, keep last and wait for next bytecode to pair with it. // Nothing to do, keep last and wait for next bytecode to pair with it.
} }
...@@ -239,9 +244,9 @@ void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction( ...@@ -239,9 +244,9 @@ void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction(
if (!node->source_info().is_valid() || !last()->source_info().is_valid()) { if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
// Fused last and current into current. // Fused last and current into current.
TransformLdaSmiBinaryOpToBinaryOpWithSmi(action_data->bytecode, last(), BytecodeNode new_node(TransformLdaSmiBinaryOpToBinaryOpWithSmi(
node); action_data->bytecode, last(), node));
SetLast(node); SetLast(&new_node);
} else { } else {
DefaultAction(node); DefaultAction(node);
} }
...@@ -254,9 +259,9 @@ void BytecodePeepholeOptimizer:: ...@@ -254,9 +259,9 @@ void BytecodePeepholeOptimizer::
DCHECK(!Bytecodes::IsJump(node->bytecode())); DCHECK(!Bytecodes::IsJump(node->bytecode()));
if (!node->source_info().is_valid() || !last()->source_info().is_valid()) { if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
// Fused last and current into current. // Fused last and current into current.
TransformLdaZeroBinaryOpToBinaryOpWithZero(action_data->bytecode, last(), BytecodeNode new_node(TransformLdaZeroBinaryOpToBinaryOpWithZero(
node); action_data->bytecode, last(), node));
SetLast(node); SetLast(&new_node);
} else { } else {
DefaultAction(node); DefaultAction(node);
} }
...@@ -268,8 +273,9 @@ void BytecodePeepholeOptimizer:: ...@@ -268,8 +273,9 @@ void BytecodePeepholeOptimizer::
DCHECK(LastIsValid()); DCHECK(LastIsValid());
DCHECK(!Bytecodes::IsJump(node->bytecode())); DCHECK(!Bytecodes::IsJump(node->bytecode()));
// Fused last and current into current. // Fused last and current into current.
TransformEqualityWithNullOrUndefinedToTestUndetectable(last(), node); BytecodeNode new_node(
SetLast(node); TransformEqualityWithNullOrUndefinedToTestUndetectable(last(), node));
SetLast(&new_node);
} }
void BytecodePeepholeOptimizer::DefaultJumpAction( void BytecodePeepholeOptimizer::DefaultJumpAction(
...@@ -294,7 +300,7 @@ void BytecodePeepholeOptimizer::ChangeJumpBytecodeAction( ...@@ -294,7 +300,7 @@ void BytecodePeepholeOptimizer::ChangeJumpBytecodeAction(
next_stage()->Write(last()); next_stage()->Write(last());
InvalidateLast(); InvalidateLast();
node->set_bytecode(action_data->bytecode, node->operand(0)); node->replace_bytecode(action_data->bytecode);
} }
void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction( void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
......
...@@ -191,6 +191,15 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) { ...@@ -191,6 +191,15 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
SetOperand(3, operand3); SetOperand(3, operand3);
} }
#define DEFINE_BYTECODE_NODE_CREATOR(Name, ...) \
template <typename... Operands> \
INLINE(static BytecodeNode Name(BytecodeSourceInfo source_info, \
Operands... operands)) { \
return Create<Bytecode::k##Name, __VA_ARGS__>(source_info, operands...); \
}
BYTECODE_LIST(DEFINE_BYTECODE_NODE_CREATOR)
#undef DEFINE_BYTECODE_NODE_CREATOR
// Replace the bytecode of this node with |bytecode| and keep the operands. // Replace the bytecode of this node with |bytecode| and keep the operands.
void replace_bytecode(Bytecode bytecode) { void replace_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_), DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
...@@ -198,40 +207,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) { ...@@ -198,40 +207,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
bytecode_ = bytecode; bytecode_ = bytecode;
} }
void set_bytecode(Bytecode bytecode) { void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
bytecode_ = bytecode;
operand_count_ = 0;
operand_scale_ = OperandScale::kSingle;
}
void set_bytecode(Bytecode bytecode, uint32_t operand0) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
bytecode_ = bytecode;
operand_count_ = 1;
operand_scale_ = OperandScale::kSingle;
SetOperand(0, operand0);
}
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
bytecode_ = bytecode;
operand_count_ = 2;
operand_scale_ = OperandScale::kSingle;
SetOperand(0, operand0);
SetOperand(1, operand1);
}
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
bytecode_ = bytecode;
operand_count_ = 3;
operand_scale_ = OperandScale::kSingle;
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
}
// Print to stream |os|. // Print to stream |os|.
void Print(std::ostream& os) const; void Print(std::ostream& os) const;
...@@ -277,6 +253,100 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) { ...@@ -277,6 +253,100 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
bool operator!=(const BytecodeNode& other) const { return !(*this == other); } bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private: private:
template <Bytecode bytecode, AccumulatorUse accumulator_use,
OperandType... operand_types>
friend class BytecodeNodeBuilder;
INLINE(BytecodeNode(Bytecode bytecode, int operand_count,
OperandScale operand_scale,
BytecodeSourceInfo source_info, uint32_t operand0 = 0,
uint32_t operand1 = 0, uint32_t operand2 = 0,
uint32_t operand3 = 0))
: bytecode_(bytecode),
operand_count_(operand_count),
operand_scale_(operand_scale),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
operands_[0] = operand0;
operands_[1] = operand1;
operands_[2] = operand2;
operands_[3] = operand3;
}
template <Bytecode bytecode, AccumulatorUse accum_use>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info)) {
return BytecodeNode(bytecode, 0, OperandScale::kSingle, source_info);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
return BytecodeNode(bytecode, 1, scale, source_info, operand0);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
return BytecodeNode(bytecode, 2, scale, source_info, operand0, operand1);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1,
uint32_t operand2)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
return BytecodeNode(bytecode, 3, scale, source_info, operand0, operand1,
operand2);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type, OperandType operand3_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 3), operand3_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
scale = std::max(scale, ScaleForOperand<operand3_type>(operand3));
return BytecodeNode(bytecode, 4, scale, source_info, operand0, operand1,
operand2, operand3);
}
template <OperandType operand_type>
INLINE(static OperandScale ScaleForOperand(uint32_t operand)) {
if (BytecodeOperands::IsScalableUnsignedByte(operand_type)) {
return Bytecodes::ScaleForUnsignedOperand(operand);
} else if (BytecodeOperands::IsScalableSignedByte(operand_type)) {
return Bytecodes::ScaleForSignedOperand(operand);
} else {
return OperandScale::kSingle;
}
}
INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) { INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) { if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
operand_scale_ = operand_scale_ =
......
...@@ -265,16 +265,16 @@ void BytecodeRegisterOptimizer::OutputRegisterTransfer( ...@@ -265,16 +265,16 @@ void BytecodeRegisterOptimizer::OutputRegisterTransfer(
if (input == accumulator_) { if (input == accumulator_) {
uint32_t operand = static_cast<uint32_t>(output.ToOperand()); uint32_t operand = static_cast<uint32_t>(output.ToOperand());
BytecodeNode node(Bytecode::kStar, operand, source_info); BytecodeNode node = BytecodeNode::Star(source_info, operand);
next_stage_->Write(&node); next_stage_->Write(&node);
} else if (output == accumulator_) { } else if (output == accumulator_) {
uint32_t operand = static_cast<uint32_t>(input.ToOperand()); uint32_t operand = static_cast<uint32_t>(input.ToOperand());
BytecodeNode node(Bytecode::kLdar, operand, source_info); BytecodeNode node = BytecodeNode::Ldar(source_info, operand);
next_stage_->Write(&node); next_stage_->Write(&node);
} else { } else {
uint32_t operand0 = static_cast<uint32_t>(input.ToOperand()); uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
uint32_t operand1 = static_cast<uint32_t>(output.ToOperand()); uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
BytecodeNode node(Bytecode::kMov, operand0, operand1, source_info); BytecodeNode node = BytecodeNode::Mov(source_info, operand0, operand1);
next_stage_->Write(&node); next_stage_->Write(&node);
} }
if (output != accumulator_) { if (output != accumulator_) {
...@@ -365,7 +365,7 @@ void BytecodeRegisterOptimizer::RegisterTransfer( ...@@ -365,7 +365,7 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
void BytecodeRegisterOptimizer::EmitNopForSourceInfo( void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
BytecodeSourceInfo source_info) const { BytecodeSourceInfo source_info) const {
DCHECK(source_info.is_valid()); DCHECK(source_info.is_valid());
BytecodeNode nop(Bytecode::kNop, source_info); BytecodeNode nop = BytecodeNode::Nop(source_info);
next_stage_->Write(&nop); next_stage_->Write(&nop);
} }
...@@ -416,32 +416,6 @@ RegisterList BytecodeRegisterOptimizer::GetInputRegisterList( ...@@ -416,32 +416,6 @@ RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
} }
} }
void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) {
if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
bytecode == Bytecode::kSuspendGenerator) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target aren't
// known.
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
Flush();
}
// Materialize the accumulator if it is read by the bytecode. The
// accumulator is special and no other register can be materialized
// in it's place.
if (Bytecodes::ReadsAccumulator(bytecode) &&
!accumulator_info_->materialized()) {
Materialize(accumulator_info_);
}
// Materialize an equivalent to the accumulator if it will be
// clobbered when the bytecode is dispatched.
if (Bytecodes::WritesAccumulator(bytecode)) {
PrepareOutputRegister(accumulator_);
}
}
void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) { void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
DCHECK(RegisterIsTemporary(reg)); DCHECK(RegisterIsTemporary(reg));
size_t index = GetRegisterInfoTableIndex(reg); size_t index = GetRegisterInfoTableIndex(reg);
......
...@@ -46,7 +46,32 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final ...@@ -46,7 +46,32 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
void Flush(); void Flush();
// Prepares for |bytecode|. // Prepares for |bytecode|.
void PrepareForBytecode(Bytecode bytecode); template <Bytecode bytecode, AccumulatorUse accumulator_use>
INLINE(void PrepareForBytecode()) {
if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
bytecode == Bytecode::kSuspendGenerator) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target
// aren't
// known.
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
Flush();
}
// Materialize the accumulator if it is read by the bytecode. The
// accumulator is special and no other register can be materialized
// in it's place.
if (BytecodeOperands::ReadsAccumulator(accumulator_use)) {
Materialize(accumulator_info_);
}
// Materialize an equivalent to the accumulator if it will be
// clobbered when the bytecode is dispatched.
if (BytecodeOperands::WritesAccumulator(accumulator_use)) {
PrepareOutputRegister(accumulator_);
}
}
// Prepares |reg| for being used as an output operand. // Prepares |reg| for being used as an output operand.
void PrepareOutputRegister(Register reg); void PrepareOutputRegister(Register reg);
......
...@@ -391,14 +391,6 @@ enum class Bytecode : uint8_t { ...@@ -391,14 +391,6 @@ enum class Bytecode : uint8_t {
#undef COUNT_BYTECODE #undef COUNT_BYTECODE
}; };
// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
// See crbug.com/603131.
#if V8_CC_MSVC
#define CONSTEXPR const
#else
#define CONSTEXPR constexpr
#endif
class V8_EXPORT_PRIVATE Bytecodes final { class V8_EXPORT_PRIVATE Bytecodes final {
public: public:
// The maximum number of operands a bytecode may have. // The maximum number of operands a bytecode may have.
...@@ -466,14 +458,12 @@ class V8_EXPORT_PRIVATE Bytecodes final { ...@@ -466,14 +458,12 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns true if |bytecode| reads the accumulator. // Returns true if |bytecode| reads the accumulator.
static bool ReadsAccumulator(Bytecode bytecode) { static bool ReadsAccumulator(Bytecode bytecode) {
return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) == return BytecodeOperands::ReadsAccumulator(GetAccumulatorUse(bytecode));
AccumulatorUse::kRead;
} }
// Returns true if |bytecode| writes the accumulator. // Returns true if |bytecode| writes the accumulator.
static bool WritesAccumulator(Bytecode bytecode) { static bool WritesAccumulator(Bytecode bytecode) {
return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) == return BytecodeOperands::WritesAccumulator(GetAccumulatorUse(bytecode));
AccumulatorUse::kWrite;
} }
// Return true if |bytecode| writes the accumulator with a boolean value. // Return true if |bytecode| writes the accumulator with a boolean value.
...@@ -502,7 +492,7 @@ class V8_EXPORT_PRIVATE Bytecodes final { ...@@ -502,7 +492,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Return true if |bytecode| is an accumulator load without effects, // Return true if |bytecode| is an accumulator load without effects,
// e.g. LdaConstant, LdaTrue, Ldar. // e.g. LdaConstant, LdaTrue, Ldar.
static CONSTEXPR bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) { static constexpr bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kLdaZero || return bytecode == Bytecode::kLdar || bytecode == Bytecode::kLdaZero ||
bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaNull || bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaNull ||
bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse || bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
...@@ -515,124 +505,124 @@ class V8_EXPORT_PRIVATE Bytecodes final { ...@@ -515,124 +505,124 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Return true if |bytecode| is a register load without effects, // Return true if |bytecode| is a register load without effects,
// e.g. Mov, Star. // e.g. Mov, Star.
static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) { static constexpr bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext || return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar; bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar;
} }
// Returns true if the bytecode is a conditional jump taking // Returns true if the bytecode is a conditional jump taking
// an immediate byte operand (OperandType::kImm). // an immediate byte operand (OperandType::kImm).
static CONSTEXPR bool IsConditionalJumpImmediate(Bytecode bytecode) { static constexpr bool IsConditionalJumpImmediate(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfToBooleanTrue && return bytecode >= Bytecode::kJumpIfToBooleanTrue &&
bytecode <= Bytecode::kJumpIfNotHole; bytecode <= Bytecode::kJumpIfNotHole;
} }
// Returns true if the bytecode is a conditional jump taking // Returns true if the bytecode is a conditional jump taking
// a constant pool entry (OperandType::kIdx). // a constant pool entry (OperandType::kIdx).
static CONSTEXPR bool IsConditionalJumpConstant(Bytecode bytecode) { static constexpr bool IsConditionalJumpConstant(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfNullConstant && return bytecode >= Bytecode::kJumpIfNullConstant &&
bytecode <= Bytecode::kJumpIfToBooleanFalseConstant; bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
} }
// Returns true if the bytecode is a conditional jump taking // Returns true if the bytecode is a conditional jump taking
// any kind of operand. // any kind of operand.
static CONSTEXPR bool IsConditionalJump(Bytecode bytecode) { static constexpr bool IsConditionalJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfNullConstant && return bytecode >= Bytecode::kJumpIfNullConstant &&
bytecode <= Bytecode::kJumpIfNotHole; bytecode <= Bytecode::kJumpIfNotHole;
} }
// Returns true if the bytecode is an unconditional jump. // Returns true if the bytecode is an unconditional jump.
static CONSTEXPR bool IsUnconditionalJump(Bytecode bytecode) { static constexpr bool IsUnconditionalJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpLoop && return bytecode >= Bytecode::kJumpLoop &&
bytecode <= Bytecode::kJumpConstant; bytecode <= Bytecode::kJumpConstant;
} }
// Returns true if the bytecode is a jump or a conditional jump taking // Returns true if the bytecode is a jump or a conditional jump taking
// an immediate byte operand (OperandType::kImm). // an immediate byte operand (OperandType::kImm).
static CONSTEXPR bool IsJumpImmediate(Bytecode bytecode) { static constexpr bool IsJumpImmediate(Bytecode bytecode) {
return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpLoop || return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpLoop ||
IsConditionalJumpImmediate(bytecode); IsConditionalJumpImmediate(bytecode);
} }
// Returns true if the bytecode is a jump or conditional jump taking a // Returns true if the bytecode is a jump or conditional jump taking a
// constant pool entry (OperandType::kIdx). // constant pool entry (OperandType::kIdx).
static CONSTEXPR bool IsJumpConstant(Bytecode bytecode) { static constexpr bool IsJumpConstant(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpConstant && return bytecode >= Bytecode::kJumpConstant &&
bytecode <= Bytecode::kJumpIfToBooleanFalseConstant; bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
} }
// Returns true if the bytecode is a jump that internally coerces the // Returns true if the bytecode is a jump that internally coerces the
// accumulator to a boolean. // accumulator to a boolean.
static CONSTEXPR bool IsJumpIfToBoolean(Bytecode bytecode) { static constexpr bool IsJumpIfToBoolean(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfToBooleanTrueConstant && return bytecode >= Bytecode::kJumpIfToBooleanTrueConstant &&
bytecode <= Bytecode::kJumpIfToBooleanFalse; bytecode <= Bytecode::kJumpIfToBooleanFalse;
} }
// Returns true if the bytecode is a jump or conditional jump taking // Returns true if the bytecode is a jump or conditional jump taking
// any kind of operand. // any kind of operand.
static CONSTEXPR bool IsJump(Bytecode bytecode) { static constexpr bool IsJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpLoop && return bytecode >= Bytecode::kJumpLoop &&
bytecode <= Bytecode::kJumpIfNotHole; bytecode <= Bytecode::kJumpIfNotHole;
} }
// Returns true if the bytecode is a forward jump or conditional jump taking // Returns true if the bytecode is a forward jump or conditional jump taking
// any kind of operand. // any kind of operand.
static CONSTEXPR bool IsForwardJump(Bytecode bytecode) { static constexpr bool IsForwardJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJump && bytecode <= Bytecode::kJumpIfNotHole; return bytecode >= Bytecode::kJump && bytecode <= Bytecode::kJumpIfNotHole;
} }
// Returns true if the bytecode is a conditional jump, a jump, or a return. // Returns true if the bytecode is a conditional jump, a jump, or a return.
static CONSTEXPR bool IsJumpOrReturn(Bytecode bytecode) { static constexpr bool IsJumpOrReturn(Bytecode bytecode) {
return bytecode == Bytecode::kReturn || IsJump(bytecode); return bytecode == Bytecode::kReturn || IsJump(bytecode);
} }
// Return true if |bytecode| is a jump without effects, // Return true if |bytecode| is a jump without effects,
// e.g. any jump excluding those that include type coercion like // e.g. any jump excluding those that include type coercion like
// JumpIfTrueToBoolean. // JumpIfTrueToBoolean.
static CONSTEXPR bool IsJumpWithoutEffects(Bytecode bytecode) { static constexpr bool IsJumpWithoutEffects(Bytecode bytecode) {
return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode); return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
} }
// Returns true if |bytecode| has no effects. These bytecodes only manipulate // Returns true if |bytecode| has no effects. These bytecodes only manipulate
// interpreter frame state and will never throw. // interpreter frame state and will never throw.
static CONSTEXPR bool IsWithoutExternalSideEffects(Bytecode bytecode) { static constexpr bool IsWithoutExternalSideEffects(Bytecode bytecode) {
return (IsAccumulatorLoadWithoutEffects(bytecode) || return (IsAccumulatorLoadWithoutEffects(bytecode) ||
IsRegisterLoadWithoutEffects(bytecode) || IsRegisterLoadWithoutEffects(bytecode) ||
bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode)); bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
} }
// Returns true if the bytecode is Ldar or Star. // Returns true if the bytecode is Ldar or Star.
static CONSTEXPR bool IsLdarOrStar(Bytecode bytecode) { static constexpr bool IsLdarOrStar(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar; return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
} }
// Returns true if |bytecode| puts a name in the accumulator. // Returns true if |bytecode| puts a name in the accumulator.
static CONSTEXPR bool PutsNameInAccumulator(Bytecode bytecode) { static constexpr bool PutsNameInAccumulator(Bytecode bytecode) {
return bytecode == Bytecode::kTypeOf; return bytecode == Bytecode::kTypeOf;
} }
// Returns true if the bytecode is a call or a constructor call. // Returns true if the bytecode is a call or a constructor call.
static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) { static constexpr bool IsCallOrNew(Bytecode bytecode) {
return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty || return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew; bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew;
} }
// Returns true if the bytecode is a call to the runtime. // Returns true if the bytecode is a call to the runtime.
static CONSTEXPR bool IsCallRuntime(Bytecode bytecode) { static constexpr bool IsCallRuntime(Bytecode bytecode) {
return bytecode == Bytecode::kCallRuntime || return bytecode == Bytecode::kCallRuntime ||
bytecode == Bytecode::kCallRuntimeForPair || bytecode == Bytecode::kCallRuntimeForPair ||
bytecode == Bytecode::kInvokeIntrinsic; bytecode == Bytecode::kInvokeIntrinsic;
} }
// Returns true if the bytecode is a scaling prefix bytecode. // Returns true if the bytecode is a scaling prefix bytecode.
static CONSTEXPR bool IsPrefixScalingBytecode(Bytecode bytecode) { static constexpr bool IsPrefixScalingBytecode(Bytecode bytecode) {
return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide || return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide ||
bytecode == Bytecode::kDebugBreakExtraWide || bytecode == Bytecode::kDebugBreakExtraWide ||
bytecode == Bytecode::kDebugBreakWide; bytecode == Bytecode::kDebugBreakWide;
} }
// Returns the number of values which |bytecode| returns. // Returns the number of values which |bytecode| returns.
static CONSTEXPR size_t ReturnCount(Bytecode bytecode) { static constexpr size_t ReturnCount(Bytecode bytecode) {
return bytecode == Bytecode::kReturn ? 1 : 0; return bytecode == Bytecode::kReturn ? 1 : 0;
} }
...@@ -817,10 +807,6 @@ class V8_EXPORT_PRIVATE Bytecodes final { ...@@ -817,10 +807,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
static const OperandSize* const kOperandSizes[][3]; static const OperandSize* const kOperandSizes[][3];
}; };
// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
// See crbug.com/603131.
#undef CONSTEXPR
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const Bytecode& bytecode); const Bytecode& bytecode);
......
...@@ -106,6 +106,7 @@ v8_executable("unittests") { ...@@ -106,6 +106,7 @@ v8_executable("unittests") {
"interpreter/bytecode-array-writer-unittest.cc", "interpreter/bytecode-array-writer-unittest.cc",
"interpreter/bytecode-dead-code-optimizer-unittest.cc", "interpreter/bytecode-dead-code-optimizer-unittest.cc",
"interpreter/bytecode-decoder-unittest.cc", "interpreter/bytecode-decoder-unittest.cc",
"interpreter/bytecode-operands-unittest.cc",
"interpreter/bytecode-peephole-optimizer-unittest.cc", "interpreter/bytecode-peephole-optimizer-unittest.cc",
"interpreter/bytecode-pipeline-unittest.cc", "interpreter/bytecode-pipeline-unittest.cc",
"interpreter/bytecode-register-allocator-unittest.cc", "interpreter/bytecode-register-allocator-unittest.cc",
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/interpreter/bytecode-operands.h"
#include "src/isolate.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace interpreter {
using BytecodeOperandsTest = TestWithIsolateAndZone;
TEST(BytecodeOperandsTest, IsScalableSignedByte) {
#define SCALABLE_SIGNED_OPERAND(Name, ...) \
CHECK(BytecodeOperands::IsScalableSignedByte(OperandType::k##Name));
REGISTER_OPERAND_TYPE_LIST(SCALABLE_SIGNED_OPERAND)
SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(SCALABLE_SIGNED_OPERAND)
#undef SCALABLE_SIGNED_OPERAND
#define NOT_SCALABLE_SIGNED_OPERAND(Name, ...) \
CHECK(!BytecodeOperands::IsScalableSignedByte(OperandType::k##Name));
INVALID_OPERAND_TYPE_LIST(NOT_SCALABLE_SIGNED_OPERAND)
UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(NOT_SCALABLE_SIGNED_OPERAND)
UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(NOT_SCALABLE_SIGNED_OPERAND)
#undef NOT_SCALABLE_SIGNED_OPERAND
}
TEST(BytecodeOperandsTest, IsScalableUnsignedByte) {
#define SCALABLE_UNSIGNED_OPERAND(Name, ...) \
CHECK(BytecodeOperands::IsScalableUnsignedByte(OperandType::k##Name));
UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(SCALABLE_UNSIGNED_OPERAND)
#undef SCALABLE_SIGNED_OPERAND
#define NOT_SCALABLE_UNSIGNED_OPERAND(Name, ...) \
CHECK(!BytecodeOperands::IsScalableUnsignedByte(OperandType::k##Name));
INVALID_OPERAND_TYPE_LIST(NOT_SCALABLE_UNSIGNED_OPERAND)
REGISTER_OPERAND_TYPE_LIST(NOT_SCALABLE_UNSIGNED_OPERAND)
SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(NOT_SCALABLE_UNSIGNED_OPERAND)
UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(NOT_SCALABLE_UNSIGNED_OPERAND)
#undef NOT_SCALABLE_SIGNED_OPERAND
}
} // namespace interpreter
} // namespace internal
} // namespace v8
...@@ -19,11 +19,12 @@ class BytecodePeepholeOptimizerTest : public BytecodePipelineStage, ...@@ -19,11 +19,12 @@ class BytecodePeepholeOptimizerTest : public BytecodePipelineStage,
public TestWithIsolateAndZone { public TestWithIsolateAndZone {
public: public:
BytecodePeepholeOptimizerTest() BytecodePeepholeOptimizerTest()
: peephole_optimizer_(this), last_written_(Bytecode::kIllegal) {} : peephole_optimizer_(this),
last_written_(BytecodeNode::Illegal(BytecodeSourceInfo())) {}
~BytecodePeepholeOptimizerTest() override {} ~BytecodePeepholeOptimizerTest() override {}
void Reset() { void Reset() {
last_written_.set_bytecode(Bytecode::kIllegal); last_written_ = BytecodeNode::Illegal(BytecodeSourceInfo());
write_count_ = 0; write_count_ = 0;
} }
......
...@@ -134,36 +134,6 @@ TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) { ...@@ -134,36 +134,6 @@ TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
CHECK_NE(node, other); CHECK_NE(node, other);
} }
TEST_F(BytecodeNodeTest, SetBytecode0) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, false);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], source_info);
CHECK_EQ(node.source_info(), source_info);
BytecodeNode clone(Bytecode::kIllegal);
clone = node;
clone.set_bytecode(Bytecode::kNop);
CHECK_EQ(clone.bytecode(), Bytecode::kNop);
CHECK_EQ(clone.operand_count(), 0);
CHECK_EQ(clone.source_info(), source_info);
}
TEST_F(BytecodeNodeTest, SetBytecode1) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, false);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], source_info);
BytecodeNode clone(Bytecode::kIllegal);
clone = node;
clone.set_bytecode(Bytecode::kJump, 0x01aabbcc);
CHECK_EQ(clone.bytecode(), Bytecode::kJump);
CHECK_EQ(clone.operand_count(), 1);
CHECK_EQ(clone.operand(0), 0x01aabbccu);
CHECK_EQ(clone.source_info(), source_info);
}
} // namespace interpreter } // namespace interpreter
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -78,7 +78,7 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) { ...@@ -78,7 +78,7 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
Register temp = NewTemporary(); Register temp = NewTemporary();
optimizer()->DoStar(temp, BytecodeSourceInfo()); optimizer()->DoStar(temp, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0u); CHECK_EQ(write_count(), 0u);
optimizer()->PrepareForBytecode(Bytecode::kJump); optimizer()->PrepareForBytecode<Bytecode::kJump, AccumulatorUse::kNone>();
CHECK_EQ(write_count(), 1u); CHECK_EQ(write_count(), 1u);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar); CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), static_cast<uint32_t>(temp.ToOperand())); CHECK_EQ(output()->at(0).operand(0), static_cast<uint32_t>(temp.ToOperand()));
...@@ -96,7 +96,7 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) { ...@@ -96,7 +96,7 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand()); BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand());
ReleaseTemporaries(temp); ReleaseTemporaries(temp);
CHECK_EQ(write_count(), 0u); CHECK_EQ(write_count(), 0u);
optimizer()->PrepareForBytecode(Bytecode::kReturn); optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdar); CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(0).operand(0), CHECK_EQ(output()->at(0).operand(0),
static_cast<uint32_t>(parameter.ToOperand())); static_cast<uint32_t>(parameter.ToOperand()));
...@@ -104,12 +104,12 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) { ...@@ -104,12 +104,12 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) { TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
Initialize(3, 1); Initialize(3, 1);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi); optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
Register temp0 = NewTemporary(); Register temp0 = NewTemporary();
Register temp1 = NewTemporary(); Register temp1 = NewTemporary();
optimizer()->DoStar(temp1, BytecodeSourceInfo()); optimizer()->DoStar(temp1, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0u); CHECK_EQ(write_count(), 0u);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi); optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
CHECK_EQ(write_count(), 1u); CHECK_EQ(write_count(), 1u);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar); CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), CHECK_EQ(output()->at(0).operand(0),
...@@ -120,7 +120,7 @@ TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) { ...@@ -120,7 +120,7 @@ TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
CHECK_EQ(write_count(), 1u); CHECK_EQ(write_count(), 1u);
optimizer()->DoLdar(temp0, BytecodeSourceInfo()); optimizer()->DoLdar(temp0, BytecodeSourceInfo());
CHECK_EQ(write_count(), 1u); CHECK_EQ(write_count(), 1u);
optimizer()->PrepareForBytecode(Bytecode::kReturn); optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
CHECK_EQ(write_count(), 2u); CHECK_EQ(write_count(), 2u);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar); CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(1).operand(0), CHECK_EQ(output()->at(1).operand(0),
...@@ -129,7 +129,7 @@ TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) { ...@@ -129,7 +129,7 @@ TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterNotFlushed) { TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterNotFlushed) {
Initialize(3, 1); Initialize(3, 1);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi); optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
Register temp0 = NewTemporary(); Register temp0 = NewTemporary();
Register temp1 = NewTemporary(); Register temp1 = NewTemporary();
optimizer()->DoStar(temp0, BytecodeSourceInfo()); optimizer()->DoStar(temp0, BytecodeSourceInfo());
...@@ -158,7 +158,7 @@ TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) { ...@@ -158,7 +158,7 @@ TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
CHECK_EQ(output()->at(0).operand(1), CHECK_EQ(output()->at(0).operand(1),
static_cast<uint32_t>(local.ToOperand())); static_cast<uint32_t>(local.ToOperand()));
optimizer()->PrepareForBytecode(Bytecode::kReturn); optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
CHECK_EQ(write_count(), 2u); CHECK_EQ(write_count(), 2u);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar); CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(1).operand(0), CHECK_EQ(output()->at(1).operand(0),
...@@ -188,12 +188,13 @@ TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) { ...@@ -188,12 +188,13 @@ TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
Register parameter = Register::FromParameterIndex(1, 3); Register parameter = Register::FromParameterIndex(1, 3);
Register temp0 = NewTemporary(); Register temp0 = NewTemporary();
Register temp1 = NewTemporary(); Register temp1 = NewTemporary();
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi); optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
optimizer()->DoStar(temp0, BytecodeSourceInfo()); optimizer()->DoStar(temp0, BytecodeSourceInfo());
optimizer()->DoMov(parameter, temp1, BytecodeSourceInfo()); optimizer()->DoMov(parameter, temp1, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0u); CHECK_EQ(write_count(), 0u);
optimizer()->PrepareForBytecode(Bytecode::kCallJSRuntime); optimizer()
->PrepareForBytecode<Bytecode::kCallJSRuntime, AccumulatorUse::kWrite>();
RegisterList reg_list = RegisterList reg_list =
optimizer()->GetInputRegisterList(RegisterList(temp0.index(), 2)); optimizer()->GetInputRegisterList(RegisterList(temp0.index(), 2));
CHECK_EQ(temp0.index(), reg_list.first_register().index()); CHECK_EQ(temp0.index(), reg_list.first_register().index());
......
...@@ -97,6 +97,7 @@ ...@@ -97,6 +97,7 @@
'interpreter/bytecode-array-writer-unittest.cc', 'interpreter/bytecode-array-writer-unittest.cc',
'interpreter/bytecode-dead-code-optimizer-unittest.cc', 'interpreter/bytecode-dead-code-optimizer-unittest.cc',
'interpreter/bytecode-decoder-unittest.cc', 'interpreter/bytecode-decoder-unittest.cc',
'interpreter/bytecode-operands-unittest.cc',
'interpreter/bytecode-peephole-optimizer-unittest.cc', 'interpreter/bytecode-peephole-optimizer-unittest.cc',
'interpreter/bytecode-pipeline-unittest.cc', 'interpreter/bytecode-pipeline-unittest.cc',
'interpreter/bytecode-register-allocator-unittest.cc', 'interpreter/bytecode-register-allocator-unittest.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment