Commit e27b348d authored by rmcilroy's avatar rmcilroy Committed by Commit bot

[Interpreter] Templatize AccumulatorUsage and OperandType for bytecode creation.

Templatizes the AccumulatorUsage and OperandType for BytecodeNode creation and
BytecodeRegisterOptimizer::PrepareForBytecode. This allows the compiler to
statically know whether the bytecode being created accesses the accumulator
and what operand types need scaling, avoiding runtime checks in the code.

Also removes BytecodeNode::set_bytecode methods.

Review-Url: https://codereview.chromium.org/2542903003
Cr-Commit-Position: refs/heads/master@{#41706}
parent f3b9d570
......@@ -143,7 +143,8 @@ class OperandHelper {};
template <> \
class OperandHelper<OperandType::k##Name> \
: public UnsignedOperandHelper<Type> {};
UNSIGNED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
#undef DEFINE_UNSIGNED_OPERAND_HELPER
template <>
......@@ -211,14 +212,15 @@ class OperandHelper<OperandType::kRegOutTriple> {
} // namespace
template <OperandType... operand_types>
template <Bytecode bytecode, AccumulatorUse accumulator_use,
OperandType... operand_types>
class BytecodeNodeBuilder {
public:
template <typename... Operands>
INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
BytecodeSourceInfo source_info,
Bytecode bytecode, Operands... operands)) {
builder->PrepareToOutputBytecode(bytecode);
Operands... operands)) {
builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
// The "OperandHelper<operand_types>::Convert(builder, operands)..." will
// expand both the OperandType... and Operands... parameter packs e.g. for:
// BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
......@@ -226,32 +228,34 @@ class BytecodeNodeBuilder {
// the code will expand into:
// OperandHelper<OperandType::kReg>::Convert(builder, reg),
// OperandHelper<OperandType::kImm>::Convert(builder, immediate),
return BytecodeNode(
bytecode, OperandHelper<operand_types>::Convert(builder, operands)...,
source_info);
return BytecodeNode::Create<bytecode, accumulator_use, operand_types...>(
source_info,
OperandHelper<operand_types>::Convert(builder, operands)...);
}
};
#define DEFINE_BYTECODE_OUTPUT(name, accumulator_use, ...) \
template <typename... Operands> \
void BytecodeArrayBuilder::Output##name(Operands... operands) { \
static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands, \
"too many operands for bytecode"); \
BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
operands...)); \
pipeline()->Write(&node); \
} \
\
template <typename... Operands> \
void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
Operands... operands) { \
DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
operands...)); \
pipeline()->WriteJump(&node, label); \
LeaveBasicBlock(); \
#define DEFINE_BYTECODE_OUTPUT(name, ...) \
template <typename... Operands> \
void BytecodeArrayBuilder::Output##name(Operands... operands) { \
static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands, \
"too many operands for bytecode"); \
BytecodeNode node( \
BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
operands...)); \
pipeline()->Write(&node); \
} \
\
template <typename... Operands> \
void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
Operands... operands) { \
DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
BytecodeNode node( \
BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
operands...)); \
pipeline()->WriteJump(&node, label); \
LeaveBasicBlock(); \
}
BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
#undef DEFINE_BYTECODE_OUTPUT
......@@ -1000,8 +1004,10 @@ bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
}
}
void BytecodeArrayBuilder::PrepareToOutputBytecode(Bytecode bytecode) {
if (register_optimizer_) register_optimizer_->PrepareForBytecode(bytecode);
template <Bytecode bytecode, AccumulatorUse accumulator_use>
void BytecodeArrayBuilder::PrepareToOutputBytecode() {
if (register_optimizer_)
register_optimizer_->PrepareForBytecode<bytecode, accumulator_use>();
}
uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
......
......@@ -365,7 +365,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
private:
friend class BytecodeRegisterAllocator;
template <OperandType... operand_types>
template <Bytecode bytecode, AccumulatorUse accumulator_use,
OperandType... operand_types>
friend class BytecodeNodeBuilder;
// Returns the current source position for the given |bytecode|.
......@@ -393,7 +394,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// during bytecode generation.
BytecodeArrayBuilder& Illegal();
void PrepareToOutputBytecode(Bytecode bytecode);
template <Bytecode bytecode, AccumulatorUse accumulator_use>
void PrepareToOutputBytecode();
void LeaveBasicBlock() { return_seen_in_block_ = false; }
......
......@@ -292,7 +292,7 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
delta -= 1;
}
DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
node->set_bytecode(node->bytecode(), delta, node->operand(1));
node->update_operand0(delta);
} else {
// The label has not yet been bound so this is a forward reference
// that will be patched when the label is bound. We create a
......@@ -310,13 +310,13 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
UNREACHABLE();
break;
case OperandSize::kByte:
node->set_bytecode(node->bytecode(), k8BitJumpPlaceholder);
node->update_operand0(k8BitJumpPlaceholder);
break;
case OperandSize::kShort:
node->set_bytecode(node->bytecode(), k16BitJumpPlaceholder);
node->update_operand0(k16BitJumpPlaceholder);
break;
case OperandSize::kQuad:
node->set_bytecode(node->bytecode(), k32BitJumpPlaceholder);
node->update_operand0(k32BitJumpPlaceholder);
break;
}
}
......
......@@ -23,27 +23,33 @@ namespace interpreter {
V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
#define UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
#define SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
V(Imm, OperandTypeInfo::kScalableSignedByte)
#define UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
V(UImm, OperandTypeInfo::kScalableUnsignedByte) \
V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
V(RegCount, OperandTypeInfo::kScalableUnsignedByte)
#define UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V) \
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
#define SIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
V(Imm, OperandTypeInfo::kScalableSignedByte)
// Carefully ordered for operand type range checks below.
#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
INVALID_OPERAND_TYPE_LIST(V) \
UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V) \
UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V)
// Carefully ordered for operand type range checks below.
#define REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
INVALID_OPERAND_TYPE_LIST(V) \
UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
SIGNED_SCALAR_OPERAND_TYPE_LIST(V)
// The list of operand types used by bytecodes.
// Carefully ordered for operand type range checks below.
#define OPERAND_TYPE_LIST(V) \
NON_REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_OPERAND_TYPE_LIST(V)
......@@ -125,6 +131,33 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const OperandSize& operand_size);
std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
class BytecodeOperands {
public:
// Returns true if |accumulator_use| reads the accumulator.
static constexpr bool ReadsAccumulator(AccumulatorUse accumulator_use) {
return accumulator_use == AccumulatorUse::kRead ||
accumulator_use == AccumulatorUse::kReadWrite;
}
// Returns true if |accumulator_use| writes the accumulator.
static constexpr bool WritesAccumulator(AccumulatorUse accumulator_use) {
return accumulator_use == AccumulatorUse::kWrite ||
accumulator_use == AccumulatorUse::kReadWrite;
}
// Returns true if |operand_type| is a scalable signed byte.
static constexpr bool IsScalableSignedByte(OperandType operand_type) {
return operand_type >= OperandType::kImm &&
operand_type <= OperandType::kRegOutTriple;
}
// Returns true if |operand_type| is a scalable unsigned byte.
static constexpr bool IsScalableUnsignedByte(OperandType operand_type) {
return operand_type >= OperandType::kIdx &&
operand_type <= OperandType::kRegCount;
}
};
} // namespace interpreter
} // namespace internal
} // namespace v8
......
......@@ -13,7 +13,8 @@ namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
BytecodePipelineStage* next_stage)
: next_stage_(next_stage), last_(Bytecode::kIllegal, BytecodeSourceInfo()) {
: next_stage_(next_stage),
last_(BytecodeNode::Illegal(BytecodeSourceInfo())) {
InvalidateLast();
}
......@@ -65,7 +66,7 @@ void BytecodePeepholeOptimizer::Flush() {
}
void BytecodePeepholeOptimizer::InvalidateLast() {
last_.set_bytecode(Bytecode::kIllegal);
last_ = BytecodeNode::Illegal(BytecodeSourceInfo());
}
bool BytecodePeepholeOptimizer::LastIsValid() const {
......@@ -116,37 +117,41 @@ bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
namespace {
void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
BytecodeNode* const last,
BytecodeNode* const current) {
BytecodeNode TransformLdaSmiBinaryOpToBinaryOpWithSmi(
Bytecode new_bytecode, BytecodeNode* const last,
BytecodeNode* const current) {
DCHECK_EQ(last->bytecode(), Bytecode::kLdaSmi);
current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
current->operand(1));
BytecodeNode node(new_bytecode, last->operand(0), current->operand(0),
current->operand(1), current->source_info());
if (last->source_info().is_valid()) {
current->set_source_info(last->source_info());
node.set_source_info(last->source_info());
}
return node;
}
void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
BytecodeNode* const last,
BytecodeNode* const current) {
BytecodeNode TransformLdaZeroBinaryOpToBinaryOpWithZero(
Bytecode new_bytecode, BytecodeNode* const last,
BytecodeNode* const current) {
DCHECK_EQ(last->bytecode(), Bytecode::kLdaZero);
current->set_bytecode(new_bytecode, 0, current->operand(0),
current->operand(1));
BytecodeNode node(new_bytecode, 0, current->operand(0), current->operand(1),
current->source_info());
if (last->source_info().is_valid()) {
current->set_source_info(last->source_info());
node.set_source_info(last->source_info());
}
return node;
}
void TransformEqualityWithNullOrUndefinedToTestUndetectable(
BytecodeNode TransformEqualityWithNullOrUndefinedToTestUndetectable(
BytecodeNode* const last, BytecodeNode* const current) {
DCHECK((last->bytecode() == Bytecode::kLdaNull) ||
(last->bytecode() == Bytecode::kLdaUndefined));
DCHECK_EQ(current->bytecode(), Bytecode::kTestEqual);
current->set_bytecode(Bytecode::kTestUndetectable, current->operand(0));
BytecodeNode node(BytecodeNode::TestUndetectable(current->source_info(),
current->operand(0)));
if (last->source_info().is_valid()) {
current->set_source_info(last->source_info());
node.set_source_info(last->source_info());
}
return node;
}
} // namespace
......@@ -186,8 +191,8 @@ void BytecodePeepholeOptimizer::ElideCurrentAction(
if (node->source_info().is_valid()) {
// Preserve the source information by replacing the node bytecode
// with a no op bytecode.
node->set_bytecode(Bytecode::kNop);
DefaultAction(node);
BytecodeNode new_node(BytecodeNode::Nop(node->source_info()));
DefaultAction(&new_node);
} else {
// Nothing to do, keep last and wait for next bytecode to pair with it.
}
......@@ -239,9 +244,9 @@ void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction(
if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
// Fused last and current into current.
TransformLdaSmiBinaryOpToBinaryOpWithSmi(action_data->bytecode, last(),
node);
SetLast(node);
BytecodeNode new_node(TransformLdaSmiBinaryOpToBinaryOpWithSmi(
action_data->bytecode, last(), node));
SetLast(&new_node);
} else {
DefaultAction(node);
}
......@@ -254,9 +259,9 @@ void BytecodePeepholeOptimizer::
DCHECK(!Bytecodes::IsJump(node->bytecode()));
if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
// Fused last and current into current.
TransformLdaZeroBinaryOpToBinaryOpWithZero(action_data->bytecode, last(),
node);
SetLast(node);
BytecodeNode new_node(TransformLdaZeroBinaryOpToBinaryOpWithZero(
action_data->bytecode, last(), node));
SetLast(&new_node);
} else {
DefaultAction(node);
}
......@@ -268,8 +273,9 @@ void BytecodePeepholeOptimizer::
DCHECK(LastIsValid());
DCHECK(!Bytecodes::IsJump(node->bytecode()));
// Fused last and current into current.
TransformEqualityWithNullOrUndefinedToTestUndetectable(last(), node);
SetLast(node);
BytecodeNode new_node(
TransformEqualityWithNullOrUndefinedToTestUndetectable(last(), node));
SetLast(&new_node);
}
void BytecodePeepholeOptimizer::DefaultJumpAction(
......@@ -294,7 +300,7 @@ void BytecodePeepholeOptimizer::ChangeJumpBytecodeAction(
next_stage()->Write(last());
InvalidateLast();
node->set_bytecode(action_data->bytecode, node->operand(0));
node->replace_bytecode(action_data->bytecode);
}
void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
......
......@@ -191,6 +191,15 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
SetOperand(3, operand3);
}
#define DEFINE_BYTECODE_NODE_CREATOR(Name, ...) \
template <typename... Operands> \
INLINE(static BytecodeNode Name(BytecodeSourceInfo source_info, \
Operands... operands)) { \
return Create<Bytecode::k##Name, __VA_ARGS__>(source_info, operands...); \
}
BYTECODE_LIST(DEFINE_BYTECODE_NODE_CREATOR)
#undef DEFINE_BYTECODE_NODE_CREATOR
// Replace the bytecode of this node with |bytecode| and keep the operands.
void replace_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
......@@ -198,40 +207,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
bytecode_ = bytecode;
}
void set_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
bytecode_ = bytecode;
operand_count_ = 0;
operand_scale_ = OperandScale::kSingle;
}
void set_bytecode(Bytecode bytecode, uint32_t operand0) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
bytecode_ = bytecode;
operand_count_ = 1;
operand_scale_ = OperandScale::kSingle;
SetOperand(0, operand0);
}
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
bytecode_ = bytecode;
operand_count_ = 2;
operand_scale_ = OperandScale::kSingle;
SetOperand(0, operand0);
SetOperand(1, operand1);
}
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
bytecode_ = bytecode;
operand_count_ = 3;
operand_scale_ = OperandScale::kSingle;
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
}
void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
// Print to stream |os|.
void Print(std::ostream& os) const;
......@@ -277,6 +253,100 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private:
template <Bytecode bytecode, AccumulatorUse accumulator_use,
OperandType... operand_types>
friend class BytecodeNodeBuilder;
INLINE(BytecodeNode(Bytecode bytecode, int operand_count,
OperandScale operand_scale,
BytecodeSourceInfo source_info, uint32_t operand0 = 0,
uint32_t operand1 = 0, uint32_t operand2 = 0,
uint32_t operand3 = 0))
: bytecode_(bytecode),
operand_count_(operand_count),
operand_scale_(operand_scale),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
operands_[0] = operand0;
operands_[1] = operand1;
operands_[2] = operand2;
operands_[3] = operand3;
}
template <Bytecode bytecode, AccumulatorUse accum_use>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info)) {
return BytecodeNode(bytecode, 0, OperandScale::kSingle, source_info);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
return BytecodeNode(bytecode, 1, scale, source_info, operand0);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
return BytecodeNode(bytecode, 2, scale, source_info, operand0, operand1);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1,
uint32_t operand2)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
return BytecodeNode(bytecode, 3, scale, source_info, operand0, operand1,
operand2);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type, OperandType operand3_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 3), operand3_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
scale = std::max(scale, ScaleForOperand<operand3_type>(operand3));
return BytecodeNode(bytecode, 4, scale, source_info, operand0, operand1,
operand2, operand3);
}
template <OperandType operand_type>
INLINE(static OperandScale ScaleForOperand(uint32_t operand)) {
if (BytecodeOperands::IsScalableUnsignedByte(operand_type)) {
return Bytecodes::ScaleForUnsignedOperand(operand);
} else if (BytecodeOperands::IsScalableSignedByte(operand_type)) {
return Bytecodes::ScaleForSignedOperand(operand);
} else {
return OperandScale::kSingle;
}
}
INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
operand_scale_ =
......
......@@ -265,16 +265,16 @@ void BytecodeRegisterOptimizer::OutputRegisterTransfer(
if (input == accumulator_) {
uint32_t operand = static_cast<uint32_t>(output.ToOperand());
BytecodeNode node(Bytecode::kStar, operand, source_info);
BytecodeNode node = BytecodeNode::Star(source_info, operand);
next_stage_->Write(&node);
} else if (output == accumulator_) {
uint32_t operand = static_cast<uint32_t>(input.ToOperand());
BytecodeNode node(Bytecode::kLdar, operand, source_info);
BytecodeNode node = BytecodeNode::Ldar(source_info, operand);
next_stage_->Write(&node);
} else {
uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
BytecodeNode node(Bytecode::kMov, operand0, operand1, source_info);
BytecodeNode node = BytecodeNode::Mov(source_info, operand0, operand1);
next_stage_->Write(&node);
}
if (output != accumulator_) {
......@@ -365,7 +365,7 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
BytecodeSourceInfo source_info) const {
DCHECK(source_info.is_valid());
BytecodeNode nop(Bytecode::kNop, source_info);
BytecodeNode nop = BytecodeNode::Nop(source_info);
next_stage_->Write(&nop);
}
......@@ -416,32 +416,6 @@ RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
}
}
void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) {
if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
bytecode == Bytecode::kSuspendGenerator) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target aren't
// known.
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
Flush();
}
// Materialize the accumulator if it is read by the bytecode. The
// accumulator is special and no other register can be materialized
// in it's place.
if (Bytecodes::ReadsAccumulator(bytecode) &&
!accumulator_info_->materialized()) {
Materialize(accumulator_info_);
}
// Materialize an equivalent to the accumulator if it will be
// clobbered when the bytecode is dispatched.
if (Bytecodes::WritesAccumulator(bytecode)) {
PrepareOutputRegister(accumulator_);
}
}
void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
DCHECK(RegisterIsTemporary(reg));
size_t index = GetRegisterInfoTableIndex(reg);
......
......@@ -46,7 +46,32 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
void Flush();
// Prepares for |bytecode|.
void PrepareForBytecode(Bytecode bytecode);
template <Bytecode bytecode, AccumulatorUse accumulator_use>
INLINE(void PrepareForBytecode()) {
if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
bytecode == Bytecode::kSuspendGenerator) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target
// aren't
// known.
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
Flush();
}
// Materialize the accumulator if it is read by the bytecode. The
// accumulator is special and no other register can be materialized
// in it's place.
if (BytecodeOperands::ReadsAccumulator(accumulator_use)) {
Materialize(accumulator_info_);
}
// Materialize an equivalent to the accumulator if it will be
// clobbered when the bytecode is dispatched.
if (BytecodeOperands::WritesAccumulator(accumulator_use)) {
PrepareOutputRegister(accumulator_);
}
}
// Prepares |reg| for being used as an output operand.
void PrepareOutputRegister(Register reg);
......
......@@ -391,14 +391,6 @@ enum class Bytecode : uint8_t {
#undef COUNT_BYTECODE
};
// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
// See crbug.com/603131.
#if V8_CC_MSVC
#define CONSTEXPR const
#else
#define CONSTEXPR constexpr
#endif
class V8_EXPORT_PRIVATE Bytecodes final {
public:
// The maximum number of operands a bytecode may have.
......@@ -466,14 +458,12 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns true if |bytecode| reads the accumulator.
static bool ReadsAccumulator(Bytecode bytecode) {
return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
AccumulatorUse::kRead;
return BytecodeOperands::ReadsAccumulator(GetAccumulatorUse(bytecode));
}
// Returns true if |bytecode| writes the accumulator.
static bool WritesAccumulator(Bytecode bytecode) {
return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
AccumulatorUse::kWrite;
return BytecodeOperands::WritesAccumulator(GetAccumulatorUse(bytecode));
}
// Return true if |bytecode| writes the accumulator with a boolean value.
......@@ -502,7 +492,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Return true if |bytecode| is an accumulator load without effects,
// e.g. LdaConstant, LdaTrue, Ldar.
static CONSTEXPR bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
static constexpr bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kLdaZero ||
bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaNull ||
bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
......@@ -515,124 +505,124 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Return true if |bytecode| is a register load without effects,
// e.g. Mov, Star.
static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
static constexpr bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar;
}
// Returns true if the bytecode is a conditional jump taking
// an immediate byte operand (OperandType::kImm).
static CONSTEXPR bool IsConditionalJumpImmediate(Bytecode bytecode) {
static constexpr bool IsConditionalJumpImmediate(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfToBooleanTrue &&
bytecode <= Bytecode::kJumpIfNotHole;
}
// Returns true if the bytecode is a conditional jump taking
// a constant pool entry (OperandType::kIdx).
static CONSTEXPR bool IsConditionalJumpConstant(Bytecode bytecode) {
static constexpr bool IsConditionalJumpConstant(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfNullConstant &&
bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
}
// Returns true if the bytecode is a conditional jump taking
// any kind of operand.
static CONSTEXPR bool IsConditionalJump(Bytecode bytecode) {
static constexpr bool IsConditionalJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfNullConstant &&
bytecode <= Bytecode::kJumpIfNotHole;
}
// Returns true if the bytecode is an unconditional jump.
static CONSTEXPR bool IsUnconditionalJump(Bytecode bytecode) {
static constexpr bool IsUnconditionalJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpLoop &&
bytecode <= Bytecode::kJumpConstant;
}
// Returns true if the bytecode is a jump or a conditional jump taking
// an immediate byte operand (OperandType::kImm).
static CONSTEXPR bool IsJumpImmediate(Bytecode bytecode) {
static constexpr bool IsJumpImmediate(Bytecode bytecode) {
return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpLoop ||
IsConditionalJumpImmediate(bytecode);
}
// Returns true if the bytecode is a jump or conditional jump taking a
// constant pool entry (OperandType::kIdx).
static CONSTEXPR bool IsJumpConstant(Bytecode bytecode) {
static constexpr bool IsJumpConstant(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpConstant &&
bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
}
// Returns true if the bytecode is a jump that internally coerces the
// accumulator to a boolean.
static CONSTEXPR bool IsJumpIfToBoolean(Bytecode bytecode) {
static constexpr bool IsJumpIfToBoolean(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfToBooleanTrueConstant &&
bytecode <= Bytecode::kJumpIfToBooleanFalse;
}
// Returns true if the bytecode is a jump or conditional jump taking
// any kind of operand.
static CONSTEXPR bool IsJump(Bytecode bytecode) {
static constexpr bool IsJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpLoop &&
bytecode <= Bytecode::kJumpIfNotHole;
}
// Returns true if the bytecode is a forward jump or conditional jump taking
// any kind of operand.
static CONSTEXPR bool IsForwardJump(Bytecode bytecode) {
static constexpr bool IsForwardJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJump && bytecode <= Bytecode::kJumpIfNotHole;
}
// Returns true if the bytecode is a conditional jump, a jump, or a return.
static CONSTEXPR bool IsJumpOrReturn(Bytecode bytecode) {
static constexpr bool IsJumpOrReturn(Bytecode bytecode) {
return bytecode == Bytecode::kReturn || IsJump(bytecode);
}
// Return true if |bytecode| is a jump without effects,
// e.g. any jump excluding those that include type coercion like
// JumpIfTrueToBoolean.
static CONSTEXPR bool IsJumpWithoutEffects(Bytecode bytecode) {
static constexpr bool IsJumpWithoutEffects(Bytecode bytecode) {
return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
}
// Returns true if |bytecode| has no effects. These bytecodes only manipulate
// interpreter frame state and will never throw.
static CONSTEXPR bool IsWithoutExternalSideEffects(Bytecode bytecode) {
static constexpr bool IsWithoutExternalSideEffects(Bytecode bytecode) {
return (IsAccumulatorLoadWithoutEffects(bytecode) ||
IsRegisterLoadWithoutEffects(bytecode) ||
bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
}
// Returns true if the bytecode is Ldar or Star.
static CONSTEXPR bool IsLdarOrStar(Bytecode bytecode) {
static constexpr bool IsLdarOrStar(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
}
// Returns true if |bytecode| puts a name in the accumulator.
static CONSTEXPR bool PutsNameInAccumulator(Bytecode bytecode) {
static constexpr bool PutsNameInAccumulator(Bytecode bytecode) {
return bytecode == Bytecode::kTypeOf;
}
// Returns true if the bytecode is a call or a constructor call.
static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) {
static constexpr bool IsCallOrNew(Bytecode bytecode) {
return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew;
}
// Returns true if the bytecode is a call to the runtime.
static CONSTEXPR bool IsCallRuntime(Bytecode bytecode) {
static constexpr bool IsCallRuntime(Bytecode bytecode) {
return bytecode == Bytecode::kCallRuntime ||
bytecode == Bytecode::kCallRuntimeForPair ||
bytecode == Bytecode::kInvokeIntrinsic;
}
// Returns true if the bytecode is a scaling prefix bytecode.
static CONSTEXPR bool IsPrefixScalingBytecode(Bytecode bytecode) {
static constexpr bool IsPrefixScalingBytecode(Bytecode bytecode) {
return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide ||
bytecode == Bytecode::kDebugBreakExtraWide ||
bytecode == Bytecode::kDebugBreakWide;
}
// Returns the number of values which |bytecode| returns.
static CONSTEXPR size_t ReturnCount(Bytecode bytecode) {
static constexpr size_t ReturnCount(Bytecode bytecode) {
return bytecode == Bytecode::kReturn ? 1 : 0;
}
......@@ -817,10 +807,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
static const OperandSize* const kOperandSizes[][3];
};
// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
// See crbug.com/603131.
#undef CONSTEXPR
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const Bytecode& bytecode);
......
......@@ -106,6 +106,7 @@ v8_executable("unittests") {
"interpreter/bytecode-array-writer-unittest.cc",
"interpreter/bytecode-dead-code-optimizer-unittest.cc",
"interpreter/bytecode-decoder-unittest.cc",
"interpreter/bytecode-operands-unittest.cc",
"interpreter/bytecode-peephole-optimizer-unittest.cc",
"interpreter/bytecode-pipeline-unittest.cc",
"interpreter/bytecode-register-allocator-unittest.cc",
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/interpreter/bytecode-operands.h"
#include "src/isolate.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace interpreter {
using BytecodeOperandsTest = TestWithIsolateAndZone;
TEST(BytecodeOperandsTest, IsScalableSignedByte) {
#define SCALABLE_SIGNED_OPERAND(Name, ...) \
CHECK(BytecodeOperands::IsScalableSignedByte(OperandType::k##Name));
REGISTER_OPERAND_TYPE_LIST(SCALABLE_SIGNED_OPERAND)
SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(SCALABLE_SIGNED_OPERAND)
#undef SCALABLE_SIGNED_OPERAND
#define NOT_SCALABLE_SIGNED_OPERAND(Name, ...) \
CHECK(!BytecodeOperands::IsScalableSignedByte(OperandType::k##Name));
INVALID_OPERAND_TYPE_LIST(NOT_SCALABLE_SIGNED_OPERAND)
UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(NOT_SCALABLE_SIGNED_OPERAND)
UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(NOT_SCALABLE_SIGNED_OPERAND)
#undef NOT_SCALABLE_SIGNED_OPERAND
}
TEST(BytecodeOperandsTest, IsScalableUnsignedByte) {
#define SCALABLE_UNSIGNED_OPERAND(Name, ...) \
CHECK(BytecodeOperands::IsScalableUnsignedByte(OperandType::k##Name));
UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(SCALABLE_UNSIGNED_OPERAND)
#undef SCALABLE_SIGNED_OPERAND
#define NOT_SCALABLE_UNSIGNED_OPERAND(Name, ...) \
CHECK(!BytecodeOperands::IsScalableUnsignedByte(OperandType::k##Name));
INVALID_OPERAND_TYPE_LIST(NOT_SCALABLE_UNSIGNED_OPERAND)
REGISTER_OPERAND_TYPE_LIST(NOT_SCALABLE_UNSIGNED_OPERAND)
SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(NOT_SCALABLE_UNSIGNED_OPERAND)
UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(NOT_SCALABLE_UNSIGNED_OPERAND)
#undef NOT_SCALABLE_SIGNED_OPERAND
}
} // namespace interpreter
} // namespace internal
} // namespace v8
......@@ -19,11 +19,12 @@ class BytecodePeepholeOptimizerTest : public BytecodePipelineStage,
public TestWithIsolateAndZone {
public:
BytecodePeepholeOptimizerTest()
: peephole_optimizer_(this), last_written_(Bytecode::kIllegal) {}
: peephole_optimizer_(this),
last_written_(BytecodeNode::Illegal(BytecodeSourceInfo())) {}
~BytecodePeepholeOptimizerTest() override {}
void Reset() {
last_written_.set_bytecode(Bytecode::kIllegal);
last_written_ = BytecodeNode::Illegal(BytecodeSourceInfo());
write_count_ = 0;
}
......
......@@ -134,36 +134,6 @@ TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
CHECK_NE(node, other);
}
TEST_F(BytecodeNodeTest, SetBytecode0) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, false);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], source_info);
CHECK_EQ(node.source_info(), source_info);
BytecodeNode clone(Bytecode::kIllegal);
clone = node;
clone.set_bytecode(Bytecode::kNop);
CHECK_EQ(clone.bytecode(), Bytecode::kNop);
CHECK_EQ(clone.operand_count(), 0);
CHECK_EQ(clone.source_info(), source_info);
}
TEST_F(BytecodeNodeTest, SetBytecode1) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, false);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], source_info);
BytecodeNode clone(Bytecode::kIllegal);
clone = node;
clone.set_bytecode(Bytecode::kJump, 0x01aabbcc);
CHECK_EQ(clone.bytecode(), Bytecode::kJump);
CHECK_EQ(clone.operand_count(), 1);
CHECK_EQ(clone.operand(0), 0x01aabbccu);
CHECK_EQ(clone.source_info(), source_info);
}
} // namespace interpreter
} // namespace internal
} // namespace v8
......@@ -78,7 +78,7 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
Register temp = NewTemporary();
optimizer()->DoStar(temp, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0u);
optimizer()->PrepareForBytecode(Bytecode::kJump);
optimizer()->PrepareForBytecode<Bytecode::kJump, AccumulatorUse::kNone>();
CHECK_EQ(write_count(), 1u);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), static_cast<uint32_t>(temp.ToOperand()));
......@@ -96,7 +96,7 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand());
ReleaseTemporaries(temp);
CHECK_EQ(write_count(), 0u);
optimizer()->PrepareForBytecode(Bytecode::kReturn);
optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(0).operand(0),
static_cast<uint32_t>(parameter.ToOperand()));
......@@ -104,12 +104,12 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
Initialize(3, 1);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
optimizer()->DoStar(temp1, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0u);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
CHECK_EQ(write_count(), 1u);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0),
......@@ -120,7 +120,7 @@ TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
CHECK_EQ(write_count(), 1u);
optimizer()->DoLdar(temp0, BytecodeSourceInfo());
CHECK_EQ(write_count(), 1u);
optimizer()->PrepareForBytecode(Bytecode::kReturn);
optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
CHECK_EQ(write_count(), 2u);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(1).operand(0),
......@@ -129,7 +129,7 @@ TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterNotFlushed) {
Initialize(3, 1);
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
optimizer()->DoStar(temp0, BytecodeSourceInfo());
......@@ -158,7 +158,7 @@ TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
CHECK_EQ(output()->at(0).operand(1),
static_cast<uint32_t>(local.ToOperand()));
optimizer()->PrepareForBytecode(Bytecode::kReturn);
optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
CHECK_EQ(write_count(), 2u);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
CHECK_EQ(output()->at(1).operand(0),
......@@ -188,12 +188,13 @@ TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
Register parameter = Register::FromParameterIndex(1, 3);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
optimizer()->PrepareForBytecode<Bytecode::kLdaSmi, AccumulatorUse::kWrite>();
optimizer()->DoStar(temp0, BytecodeSourceInfo());
optimizer()->DoMov(parameter, temp1, BytecodeSourceInfo());
CHECK_EQ(write_count(), 0u);
optimizer()->PrepareForBytecode(Bytecode::kCallJSRuntime);
optimizer()
->PrepareForBytecode<Bytecode::kCallJSRuntime, AccumulatorUse::kWrite>();
RegisterList reg_list =
optimizer()->GetInputRegisterList(RegisterList(temp0.index(), 2));
CHECK_EQ(temp0.index(), reg_list.first_register().index());
......
......@@ -97,6 +97,7 @@
'interpreter/bytecode-array-writer-unittest.cc',
'interpreter/bytecode-dead-code-optimizer-unittest.cc',
'interpreter/bytecode-decoder-unittest.cc',
'interpreter/bytecode-operands-unittest.cc',
'interpreter/bytecode-peephole-optimizer-unittest.cc',
'interpreter/bytecode-pipeline-unittest.cc',
'interpreter/bytecode-register-allocator-unittest.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment