Commit 40511877 authored by oth's avatar oth Committed by Commit bot

[interpreter] Introduce binary op bytecodes for Smi operand.

Introduces fused bytecodes for fusing LdaSmi followed by a binary op bytecode.
The chosen bytecodes are used frequently in Octane: AddSmi, SubSmi,
BitwiseOrSmi, BitwiseAndSmi, ShiftLeftSmi, ShiftRightSmi.

There are additional code stubs for these operations that are biased towards
both the left hand and right hand operands being Smis.

BUG=v8:4280
LOG=N

Review-Url: https://codereview.chromium.org/2111923002
Cr-Commit-Position: refs/heads/master@{#37531}
parent ddc75cc1
......@@ -1698,7 +1698,7 @@ compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
Node* pair = assembler->SmiAddWithOverflow(value, one);
Node* overflow = assembler->Projection(1, pair);
// Check if the Smi additon overflowed.
// Check if the Smi addition overflowed.
Label if_overflow(assembler), if_notoverflow(assembler);
assembler->Branch(overflow, &if_overflow, &if_notoverflow);
......
......@@ -1155,6 +1155,45 @@ void BytecodeGraphBuilder::VisitShiftRightLogical() {
BuildBinaryOp(javascript()->ShiftRightLogical(hints));
}
void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* js_op) {
FrameStateBeforeAndAfter states(this);
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
Node* node = NewNode(js_op, left, right);
environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitAddSmi() {
BinaryOperationHints hints = BinaryOperationHints::Any();
BuildBinaryOpWithImmediate(javascript()->Add(hints));
}
void BytecodeGraphBuilder::VisitSubSmi() {
BinaryOperationHints hints = BinaryOperationHints::Any();
BuildBinaryOpWithImmediate(javascript()->Subtract(hints));
}
void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
BinaryOperationHints hints = BinaryOperationHints::Any();
BuildBinaryOpWithImmediate(javascript()->BitwiseOr(hints));
}
void BytecodeGraphBuilder::VisitBitwiseAndSmi() {
BinaryOperationHints hints = BinaryOperationHints::Any();
BuildBinaryOpWithImmediate(javascript()->BitwiseAnd(hints));
}
void BytecodeGraphBuilder::VisitShiftLeftSmi() {
BinaryOperationHints hints = BinaryOperationHints::Any();
BuildBinaryOpWithImmediate(javascript()->ShiftLeft(hints));
}
void BytecodeGraphBuilder::VisitShiftRightSmi() {
BinaryOperationHints hints = BinaryOperationHints::Any();
BuildBinaryOpWithImmediate(javascript()->ShiftRight(hints));
}
void BytecodeGraphBuilder::VisitInc() {
FrameStateBeforeAndAfter states(this);
// Note: Use subtract -1 here instead of add 1 to ensure we always convert to
......
......@@ -124,6 +124,7 @@ class BytecodeGraphBuilder {
void BuildCall(TailCallMode tail_call_mode);
void BuildThrow();
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
void BuildCompareOp(const Operator* op);
void BuildDelete(LanguageMode language_mode);
void BuildCastOperator(const Operator* op);
......
......@@ -125,7 +125,8 @@ bool BytecodePeepholeOptimizer::CanElideCurrent(
return true;
} else {
// Additional candidates for eliding current:
// (i) ToNumber if the last puts a number in the accumulator.
// (i) current is Nop.
// (ii) ToNumber if the last puts a number in the accumulator.
return false;
}
}
......@@ -188,6 +189,18 @@ void TransformLdaStarToLdrLdar(Bytecode new_bytecode, BytecodeNode* const last,
current->set_bytecode(Bytecode::kLdar, current->operand(0));
}
void TransformToBinaryOpWithSmiOnRhs(Bytecode new_bytecode,
BytecodeNode* const last,
BytecodeNode* const current) {
DCHECK(Bytecodes::IsLdaSmiOrLdaZero(last->bytecode()));
uint32_t imm_operand =
last->bytecode() == Bytecode::kLdaSmi ? last->operand(0) : 0;
current->set_bytecode(new_bytecode, imm_operand, current->operand(0));
if (last->source_info().is_valid()) {
current->source_info().Clone(last->source_info());
}
}
} // namespace
bool BytecodePeepholeOptimizer::TransformLastAndCurrentBytecodes(
......@@ -216,7 +229,43 @@ bool BytecodePeepholeOptimizer::TransformLastAndCurrentBytecodes(
default:
break;
}
} else if (Bytecodes::IsLdaSmiOrLdaZero(last_.bytecode()) &&
(!last_.source_info().is_valid() ||
!current->source_info().is_valid())) {
switch (current->bytecode()) {
case Bytecode::kAdd:
TransformToBinaryOpWithSmiOnRhs(Bytecode::kAddSmi, &last_, current);
InvalidateLast();
return true;
case Bytecode::kSub:
TransformToBinaryOpWithSmiOnRhs(Bytecode::kSubSmi, &last_, current);
InvalidateLast();
return true;
case Bytecode::kBitwiseOr:
TransformToBinaryOpWithSmiOnRhs(Bytecode::kBitwiseOrSmi, &last_,
current);
InvalidateLast();
return true;
case Bytecode::kBitwiseAnd:
TransformToBinaryOpWithSmiOnRhs(Bytecode::kBitwiseAndSmi, &last_,
current);
InvalidateLast();
return true;
case Bytecode::kShiftLeft:
TransformToBinaryOpWithSmiOnRhs(Bytecode::kShiftLeftSmi, &last_,
current);
InvalidateLast();
return true;
case Bytecode::kShiftRight:
TransformToBinaryOpWithSmiOnRhs(Bytecode::kShiftRightSmi, &last_,
current);
InvalidateLast();
return true;
default:
break;
}
}
return false;
}
......@@ -279,7 +328,6 @@ bool BytecodePeepholeOptimizer::CanElideLast(
BytecodeNode* BytecodePeepholeOptimizer::Optimize(BytecodeNode* current) {
TryToRemoveLastExpressionPosition(current);
if (TransformCurrentBytecode(current) ||
TransformLastAndCurrentBytecodes(current)) {
return current;
......
......@@ -59,17 +59,6 @@ BytecodeNode& BytecodeNode::operator=(const BytecodeNode& other) {
return *this;
}
void BytecodeNode::set_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
bytecode_ = bytecode;
}
void BytecodeNode::set_bytecode(Bytecode bytecode, uint32_t operand0) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
bytecode_ = bytecode;
operands_[0] = operand0;
}
void BytecodeNode::Clone(const BytecodeNode* const other) {
memcpy(this, other, sizeof(*other));
}
......
......@@ -151,8 +151,21 @@ class BytecodeNode final : ZoneObject {
BytecodeNode(const BytecodeNode& other);
BytecodeNode& operator=(const BytecodeNode& other);
void set_bytecode(Bytecode bytecode);
void set_bytecode(Bytecode bytecode, uint32_t operand0);
void set_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
bytecode_ = bytecode;
}
void set_bytecode(Bytecode bytecode, uint32_t operand0) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
bytecode_ = bytecode;
operands_[0] = operand0;
}
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
bytecode_ = bytecode;
operands_[0] = operand0;
operands_[1] = operand1;
}
// Clone |other|.
void Clone(const BytecodeNode* const other);
......
......@@ -473,6 +473,11 @@ bool Bytecodes::IsLdarOrStar(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
}
// static
bool Bytecodes::IsLdaSmiOrLdaZero(Bytecode bytecode) {
return bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaZero;
}
// static
bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
switch (bytecode) {
......
......@@ -157,6 +157,18 @@ namespace interpreter {
V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg) \
V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg) \
\
/* Binary operators with immediate operands */ \
V(AddSmi, AccumulatorUse::kWrite, OperandType::kImm, OperandType::kReg) \
V(SubSmi, AccumulatorUse::kWrite, OperandType::kImm, OperandType::kReg) \
V(BitwiseOrSmi, AccumulatorUse::kWrite, OperandType::kImm, \
OperandType::kReg) \
V(BitwiseAndSmi, AccumulatorUse::kWrite, OperandType::kImm, \
OperandType::kReg) \
V(ShiftLeftSmi, AccumulatorUse::kWrite, OperandType::kImm, \
OperandType::kReg) \
V(ShiftRightSmi, AccumulatorUse::kWrite, OperandType::kImm, \
OperandType::kReg) \
\
/* Unary Operators */ \
V(Inc, AccumulatorUse::kReadWrite) \
V(Dec, AccumulatorUse::kReadWrite) \
......@@ -592,6 +604,9 @@ class Bytecodes {
// Returns true if the bytecode is Ldar or Star.
static bool IsLdarOrStar(Bytecode bytecode);
// Returns true if the bytecode is LdaSmi or LdaZero.
static bool IsLdaSmiOrLdaZero(Bytecode bytecode);
// Returns true if the bytecode has wider operand forms.
static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
......
......@@ -836,6 +836,174 @@ void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
DoBinaryOp<ShiftRightLogicalStub>(assembler);
}
// AddSmi <imm> <reg>
//
// Adds an immediate value <imm> to register <reg>. For this
// operation <reg> is the lhs operand and <imm> is the <rhs> operand.
void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
Variable var_result(assembler, MachineRepresentation::kTagged);
Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
end(assembler);
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
Node* raw_int = __ BytecodeOperandImm(0);
Node* right = __ SmiTag(raw_int);
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
__ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi addition first.
Node* pair = __ SmiAddWithOverflow(left, right);
Node* overflow = __ Projection(1, pair);
// Check if the Smi additon overflowed.
Label if_notoverflow(assembler);
__ BranchIf(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
var_result.Bind(__ Projection(0, pair));
__ Goto(&end);
}
}
__ Bind(&slowpath);
{
Node* context = __ GetContext();
Callable callable = CodeFactory::Add(__ isolate());
var_result.Bind(__ CallStub(callable, context, left, right));
__ Goto(&end);
}
__ Bind(&end);
{
__ SetAccumulator(var_result.value());
__ Dispatch();
}
}
// SubSmi <imm> <reg>
//
// Subtracts an immediate value <imm> to register <reg>. For this
// operation <reg> is the lhs operand and <imm> is the rhs operand.
void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
Variable var_result(assembler, MachineRepresentation::kTagged);
Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
end(assembler);
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
Node* raw_int = __ BytecodeOperandImm(0);
Node* right = __ SmiTag(raw_int);
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
__ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi subtraction first.
Node* pair = __ SmiSubWithOverflow(left, right);
Node* overflow = __ Projection(1, pair);
// Check if the Smi subtraction overflowed.
Label if_notoverflow(assembler);
__ BranchIf(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
var_result.Bind(__ Projection(0, pair));
__ Goto(&end);
}
}
__ Bind(&slowpath);
{
Node* context = __ GetContext();
Callable callable = CodeFactory::Subtract(__ isolate());
var_result.Bind(__ CallStub(callable, context, left, right));
__ Goto(&end);
}
__ Bind(&end);
{
__ SetAccumulator(var_result.value());
__ Dispatch();
}
}
// BitwiseOr <imm> <reg>
//
// BitwiseOr <reg> with <imm>. For this operation <reg> is the lhs
// operand and <imm> is the rhs operand.
void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
Node* raw_int = __ BytecodeOperandImm(0);
Node* right = __ SmiTag(raw_int);
Node* context = __ GetContext();
Node* lhs_value = __ TruncateTaggedToWord32(context, left);
Node* rhs_value = __ SmiToWord32(right);
Node* value = __ Word32Or(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
__ SetAccumulator(result);
__ Dispatch();
}
// BitwiseAnd <imm> <reg>
//
// BitwiseAnd <reg> with <imm>. For this operation <reg> is the lhs
// operand and <imm> is the rhs operand.
void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
Node* raw_int = __ BytecodeOperandImm(0);
Node* right = __ SmiTag(raw_int);
Node* context = __ GetContext();
Node* lhs_value = __ TruncateTaggedToWord32(context, left);
Node* rhs_value = __ SmiToWord32(right);
Node* value = __ Word32And(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
__ SetAccumulator(result);
__ Dispatch();
}
// ShiftLeftSmi <imm> <reg>
//
// Left shifts register <src> by the count specified in <imm>.
// Register <src> is converted to an int32 before the operation. The 5
// lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
Node* raw_int = __ BytecodeOperandImm(0);
Node* right = __ SmiTag(raw_int);
Node* context = __ GetContext();
Node* lhs_value = __ TruncateTaggedToWord32(context, left);
Node* rhs_value = __ SmiToWord32(right);
Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
Node* value = __ Word32Shl(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
__ SetAccumulator(result);
__ Dispatch();
}
// ShiftRightSmi <imm> <reg>
//
// Right shifts register <src> by the count specified in <imm>.
// Register <src> is converted to an int32 before the operation. The 5
// lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
Node* raw_int = __ BytecodeOperandImm(0);
Node* right = __ SmiTag(raw_int);
Node* context = __ GetContext();
Node* lhs_value = __ TruncateTaggedToWord32(context, left);
Node* rhs_value = __ SmiToWord32(right);
Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
Node* value = __ Word32Sar(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
__ SetAccumulator(result);
__ Dispatch();
}
void Interpreter::DoUnaryOp(Callable callable,
InterpreterAssembler* assembler) {
Node* target = __ HeapConstant(callable.code());
......
......@@ -74,6 +74,11 @@ class Interpreter {
template <class Generator>
void DoBinaryOp(InterpreterAssembler* assembler);
// Generates code to perform the binary operation via |Generator| using
// an immediate value rather the accumulator as the rhs operand.
template <class Generator>
void DoBinaryOpWithImmediate(InterpreterAssembler* assembler);
// Generates code to perform the unary operation via |callable|.
void DoUnaryOp(Callable callable, InterpreterAssembler* assembler);
......
......@@ -31,7 +31,7 @@ snippet: "
"
frame size: 3
parameter count: 1
bytecode array length: 35
bytecode array length: 34
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
......@@ -44,8 +44,7 @@ bytecodes: [
/* 54 E> */ B(StaKeyedPropertySloppy), R(2), R(1), U8(1),
B(LdaSmi), U8(1),
B(Star), R(1),
B(LdaSmi), U8(1),
/* 57 E> */ B(Add), R(0),
/* 57 E> */ B(AddSmi), U8(1), R(0),
B(StaKeyedPropertySloppy), R(2), R(1), U8(1),
B(Ldar), R(2),
/* 66 S> */ B(Return),
......@@ -80,7 +79,7 @@ snippet: "
"
frame size: 5
parameter count: 1
bytecode array length: 65
bytecode array length: 64
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
......@@ -103,8 +102,7 @@ bytecodes: [
B(Star), R(4),
B(LdaZero),
B(Star), R(3),
B(LdaSmi), U8(2),
/* 66 E> */ B(Add), R(0),
/* 66 E> */ B(AddSmi), U8(2), R(0),
B(StaKeyedPropertySloppy), R(4), R(3), U8(3),
B(Ldar), R(4),
B(StaKeyedPropertySloppy), R(2), R(1), U8(5),
......
......@@ -199,7 +199,7 @@ snippet: "
"
frame size: 5
parameter count: 1
bytecode array length: 65
bytecode array length: 64
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
......@@ -211,8 +211,7 @@ bytecodes: [
B(Star), R(0),
/* 68 E> */ B(Add), R(2),
B(Star), R(3),
B(LdaSmi), U8(1),
/* 76 E> */ B(Add), R(0),
/* 76 E> */ B(AddSmi), U8(1), R(0),
B(Star), R(4),
B(LdaSmi), U8(2),
B(Star), R(1),
......
......@@ -19,13 +19,12 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 15
bytecode array length: 14
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 56 S> */ B(LdaSmi), U8(1),
B(Add), R(0),
/* 56 S> */ B(AddSmi), U8(1), R(0),
B(Star), R(0),
/* 69 S> */ B(Jump), U8(2),
/* 97 S> */ B(Ldar), R(0),
......
......@@ -24,7 +24,7 @@ snippet: "
"
frame size: 7
parameter count: 1
bytecode array length: 54
bytecode array length: 53
bytecodes: [
B(Mov), R(closure), R(0),
/* 99 E> */ B(StackCheck),
......@@ -44,8 +44,7 @@ bytecodes: [
B(Star), R(1),
/* 117 E> */ B(Call), R(1), R(2), U8(1), U8(1),
B(Star), R(3),
B(LdaSmi), U8(1),
B(Add), R(3),
B(AddSmi), U8(1), R(3),
/* 131 S> */ B(Return),
]
constant pool: [
......
......@@ -13,13 +13,12 @@ snippet: "
"
frame size: 2
parameter count: 1
bytecode array length: 16
bytecode array length: 15
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(2),
B(Add), R(0),
/* 45 S> */ B(AddSmi), U8(2), R(0),
B(Mov), R(0), R(1),
B(Star), R(0),
B(LdaUndefined),
......@@ -112,7 +111,7 @@ snippet: "
"
frame size: 2
parameter count: 1
bytecode array length: 29
bytecode array length: 28
bytecodes: [
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
B(PushContext), R(0),
......@@ -121,8 +120,7 @@ bytecodes: [
/* 42 E> */ B(StaContextSlot), R(context), U8(4),
/* 45 S> */ B(CreateClosure), U8(0), U8(2),
/* 75 S> */ B(LdrContextSlot), R(context), U8(4), R(1),
B(LdaSmi), U8(24),
B(BitwiseOr), R(1),
B(BitwiseOrSmi), U8(24), R(1),
/* 77 E> */ B(StaContextSlot), R(context), U8(4),
B(LdaUndefined),
/* 84 S> */ B(Return),
......
......@@ -16,12 +16,11 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 12
bytecode array length: 11
bytecodes: [
/* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdrGlobal), U8(1), R(0),
B(LdaSmi), U8(1),
B(BitwiseAnd), R(0),
B(BitwiseAndSmi), U8(1), R(0),
/* 45 E> */ B(StaGlobalSloppy), U8(0), U8(3),
/* 51 S> */ B(Return),
]
......@@ -39,12 +38,11 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 12
bytecode array length: 11
bytecodes: [
/* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdrGlobal), U8(1), R(0),
B(LdaSmi), U8(1),
B(Add), R(0),
B(AddSmi), U8(1), R(0),
/* 51 E> */ B(StaGlobalSloppy), U8(0), U8(3),
/* 57 S> */ B(Return),
]
......
......@@ -116,14 +116,13 @@ snippet: "
"
frame size: 2
parameter count: 1
bytecode array length: 23
bytecode array length: 22
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 30 S> */ B(JumpIfToBooleanFalse), U8(13),
/* 43 S> */ B(LdaSmi), U8(1),
B(Add), R(0),
/* 30 S> */ B(JumpIfToBooleanFalse), U8(12),
/* 43 S> */ B(AddSmi), U8(1), R(0),
B(Mov), R(0), R(1),
B(Star), R(0),
B(Jump), U8(5),
......
......@@ -76,15 +76,14 @@ snippet: "
"
frame size: 2
parameter count: 1
bytecode array length: 22
bytecode array length: 21
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1),
B(Star), R(1),
B(LdaSmi), U8(1),
/* 67 E> */ B(Add), R(0),
/* 67 E> */ B(AddSmi), U8(1), R(0),
B(StaNamedPropertySloppy), R(1), U8(1), U8(1),
B(Ldar), R(1),
/* 76 S> */ B(Return),
......
......@@ -32,13 +32,12 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 9
bytecode array length: 8
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(3),
B(Add), R(0),
/* 45 S> */ B(AddSmi), U8(3), R(0),
/* 59 S> */ B(Return),
]
constant pool: [
......@@ -52,13 +51,12 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 9
bytecode array length: 8
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(3),
B(Sub), R(0),
/* 45 S> */ B(SubSmi), U8(3), R(0),
/* 59 S> */ B(Return),
]
constant pool: [
......@@ -132,13 +130,12 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 10
bytecode array length: 9
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(2),
B(BitwiseOr), R(0),
/* 45 S> */ B(BitwiseOrSmi), U8(2), R(0),
/* 59 S> */ B(Return),
]
constant pool: [
......@@ -172,13 +169,12 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 10
bytecode array length: 9
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), U8(2),
B(BitwiseAnd), R(0),
/* 45 S> */ B(BitwiseAndSmi), U8(2), R(0),
/* 59 S> */ B(Return),
]
constant pool: [
......@@ -192,13 +188,12 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 10
bytecode array length: 9
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), U8(3),
B(ShiftLeft), R(0),
/* 46 S> */ B(ShiftLeftSmi), U8(3), R(0),
/* 61 S> */ B(Return),
]
constant pool: [
......@@ -212,13 +207,12 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 10
bytecode array length: 9
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), U8(3),
B(ShiftRight), R(0),
/* 46 S> */ B(ShiftRightSmi), U8(3), R(0),
/* 61 S> */ B(Return),
]
constant pool: [
......
......@@ -481,7 +481,7 @@ snippet: "
"
frame size: 5
parameter count: 1
bytecode array length: 59
bytecode array length: 58
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
......@@ -493,10 +493,9 @@ bytecodes: [
B(JumpIfToBooleanTrue), U8(10),
B(LdaSmi), U8(2),
B(TestEqualStrict), R(3),
B(JumpIfTrue), U8(33),
B(Jump), U8(35),
B(LdaSmi), U8(1),
/* 77 E> */ B(Add), R(2),
B(JumpIfTrue), U8(32),
B(Jump), U8(34),
/* 77 E> */ B(AddSmi), U8(1), R(2),
B(Star), R(1),
/* 70 S> */ B(LdaSmi), U8(2),
B(TestEqualStrict), R(1),
......
......@@ -17,7 +17,7 @@ snippet: "
"
frame size: 1
parameter count: 1
bytecode array length: 23
bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
......@@ -25,12 +25,11 @@ bytecodes: [
/* 54 S> */ B(LdaSmi), U8(10),
/* 54 E> */ B(TestEqual), R(0),
B(LogicalNot),
B(JumpIfFalse), U8(11),
B(JumpIfFalse), U8(10),
/* 45 E> */ B(StackCheck),
/* 65 S> */ B(LdaSmi), U8(10),
B(Add), R(0),
/* 65 S> */ B(AddSmi), U8(10), R(0),
B(Star), R(0),
B(Jump), U8(-14),
B(Jump), U8(-13),
/* 79 S> */ B(Ldar), R(0),
/* 89 S> */ B(Return),
]
......@@ -99,7 +98,7 @@ snippet: "
"
frame size: 4
parameter count: 1
bytecode array length: 22
bytecode array length: 21
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(Wide), B(LdaSmi), U16(1234),
......@@ -107,8 +106,7 @@ bytecodes: [
/* 56 S> */ B(Nop),
/* 66 E> */ B(Mul), R(0),
B(Star), R(3),
B(LdaSmi), U8(1),
B(Sub), R(3),
B(SubSmi), U8(1), R(3),
B(LdrUndefined), R(1),
B(Ldar), R(1),
/* 74 S> */ B(Nop),
......
......@@ -42,6 +42,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.LoadLiteral(Smi::FromInt(0))
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(8))
.CompareOperation(Token::Value::NE, reg) // Prevent peephole optimization
// LdaSmi, Star -> LdrSmi.
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(10000000))
.StoreAccumulatorInRegister(reg)
......@@ -136,6 +138,20 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.BinaryOperation(Token::Value::SAR, reg)
.BinaryOperation(Token::Value::SHR, reg);
// Emit peephole optimizations of LdaSmi followed by binary operation.
builder.LoadLiteral(Smi::FromInt(1))
.BinaryOperation(Token::Value::ADD, reg)
.LoadLiteral(Smi::FromInt(2))
.BinaryOperation(Token::Value::SUB, reg)
.LoadLiteral(Smi::FromInt(3))
.BinaryOperation(Token::Value::BIT_AND, reg)
.LoadLiteral(Smi::FromInt(4))
.BinaryOperation(Token::Value::BIT_OR, reg)
.LoadLiteral(Smi::FromInt(5))
.BinaryOperation(Token::Value::SHL, reg)
.LoadLiteral(Smi::FromInt(6))
.BinaryOperation(Token::Value::SAR, reg);
// Emit count operatior invocations
builder.CountOperation(Token::Value::ADD).CountOperation(Token::Value::SUB);
......@@ -399,6 +415,12 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalse)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrueConstant)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalseConstant)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kAddSmi)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kSubSmi)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kBitwiseAndSmi)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kBitwiseOrSmi)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kShiftLeftSmi)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kShiftRightSmi)] = 1;
}
// Check return occurs at the end and only once in the BytecodeArray.
......
......@@ -43,8 +43,10 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.LoadLiteral(zero)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
.StackCheck(0)
.StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
.StackCheck(1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
.BinaryOperation(Token::Value::ADD, reg_0)
......@@ -122,6 +124,14 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
......@@ -140,6 +150,14 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
kPrefixByteSize;
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
......
......@@ -24,6 +24,11 @@ class BytecodePeepholeOptimizerTest : public BytecodePipelineStage,
peephole_optimizer_(&constant_array_builder_, this) {}
~BytecodePeepholeOptimizerTest() override {}
void Reset() {
last_written_.set_bytecode(Bytecode::kIllegal);
write_count_ = 0;
}
void Write(BytecodeNode* node) override {
write_count_++;
last_written_.Clone(node);
......@@ -489,6 +494,84 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaUndefinedStar) {
CHECK_EQ(last_written().bytecode(), third.bytecode());
}
TEST_F(BytecodePeepholeOptimizerTest, MergeLdaSmiWithBinaryOp) {
Bytecode operator_replacement_pairs[][2] = {
{Bytecode::kAdd, Bytecode::kAddSmi},
{Bytecode::kSub, Bytecode::kSubSmi},
{Bytecode::kBitwiseAnd, Bytecode::kBitwiseAndSmi},
{Bytecode::kBitwiseOr, Bytecode::kBitwiseOrSmi},
{Bytecode::kShiftLeft, Bytecode::kShiftLeftSmi},
{Bytecode::kShiftRight, Bytecode::kShiftRightSmi}};
for (auto operator_replacement : operator_replacement_pairs) {
uint32_t imm_operand = 17;
BytecodeNode first(Bytecode::kLdaSmi, imm_operand);
first.source_info().Clone({3, true});
uint32_t reg_operand = Register(0).ToOperand();
BytecodeNode second(operator_replacement[0], reg_operand);
optimizer()->Write(&first);
optimizer()->Write(&second);
Flush();
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written().bytecode(), operator_replacement[1]);
CHECK_EQ(last_written().operand_count(), 2);
CHECK_EQ(last_written().operand(0), imm_operand);
CHECK_EQ(last_written().operand(1), reg_operand);
CHECK_EQ(last_written().source_info(), first.source_info());
Reset();
}
}
TEST_F(BytecodePeepholeOptimizerTest, NotMergingLdaSmiWithBinaryOp) {
Bytecode operator_replacement_pairs[][2] = {
{Bytecode::kAdd, Bytecode::kAddSmi},
{Bytecode::kSub, Bytecode::kSubSmi},
{Bytecode::kBitwiseAnd, Bytecode::kBitwiseAndSmi},
{Bytecode::kBitwiseOr, Bytecode::kBitwiseOrSmi},
{Bytecode::kShiftLeft, Bytecode::kShiftLeftSmi},
{Bytecode::kShiftRight, Bytecode::kShiftRightSmi}};
for (auto operator_replacement : operator_replacement_pairs) {
uint32_t imm_operand = 17;
BytecodeNode first(Bytecode::kLdaSmi, imm_operand);
first.source_info().Clone({3, true});
uint32_t reg_operand = Register(0).ToOperand();
BytecodeNode second(operator_replacement[0], reg_operand);
second.source_info().Clone({4, true});
optimizer()->Write(&first);
optimizer()->Write(&second);
CHECK_EQ(last_written(), first);
Flush();
CHECK_EQ(last_written(), second);
Reset();
}
}
TEST_F(BytecodePeepholeOptimizerTest, MergeLdaZeroWithBinaryOp) {
Bytecode operator_replacement_pairs[][2] = {
{Bytecode::kAdd, Bytecode::kAddSmi},
{Bytecode::kSub, Bytecode::kSubSmi},
{Bytecode::kBitwiseAnd, Bytecode::kBitwiseAndSmi},
{Bytecode::kBitwiseOr, Bytecode::kBitwiseOrSmi},
{Bytecode::kShiftLeft, Bytecode::kShiftLeftSmi},
{Bytecode::kShiftRight, Bytecode::kShiftRightSmi}};
for (auto operator_replacement : operator_replacement_pairs) {
BytecodeNode first(Bytecode::kLdaZero);
uint32_t reg_operand = Register(0).ToOperand();
BytecodeNode second(operator_replacement[0], reg_operand);
optimizer()->Write(&first);
optimizer()->Write(&second);
Flush();
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written().bytecode(), operator_replacement[1]);
CHECK_EQ(last_written().operand_count(), 2);
CHECK_EQ(last_written().operand(0), 0);
CHECK_EQ(last_written().operand(1), reg_operand);
Reset();
}
}
} // namespace interpreter
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment