Commit 02f917f7 authored by ishell's avatar ishell Committed by Commit bot

[interpreter][stubs] Fixing issues found by machine graph verifier.

All issues in interpreter bytecode handlers are fixed.

BUG=

Review-Url: https://codereview.chromium.org/2552883012
Cr-Commit-Position: refs/heads/master@{#41649}
parent aabbbec6
......@@ -344,7 +344,7 @@ void Builtins::Generate_ToLength(compiler::CodeAssemblerState* state) {
Node* len = var_len.value();
// Check if {len} is a positive Smi.
assembler.GotoIf(assembler.WordIsPositiveSmi(len), &return_len);
assembler.GotoIf(assembler.TaggedIsPositiveSmi(len), &return_len);
// Check if {len} is a (negative) Smi.
assembler.GotoIf(assembler.TaggedIsSmi(len), &return_zero);
......
......@@ -258,7 +258,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
// Omit ToLength if lastindex is a non-negative smi.
{
Label call_tolength(this, Label::kDeferred), next(this);
Branch(WordIsPositiveSmi(regexp_lastindex), &next, &call_tolength);
Branch(TaggedIsPositiveSmi(regexp_lastindex), &next, &call_tolength);
Bind(&call_tolength);
{
......
This diff is collapsed.
......@@ -23,6 +23,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define HEAP_CONSTANT_LIST(V) \
V(AccessorInfoMap, AccessorInfoMap) \
V(AllocationSiteMap, AllocationSiteMap) \
V(BooleanMap, BooleanMap) \
V(CodeMap, CodeMap) \
V(empty_string, EmptyString) \
......@@ -211,7 +212,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* TaggedIsSmi(Node* a);
Node* TaggedIsNotSmi(Node* a);
// Check that the value is a non-negative smi.
Node* WordIsPositiveSmi(Node* a);
Node* TaggedIsPositiveSmi(Node* a);
// Check that a word has a word-aligned address.
Node* WordIsWordAligned(Node* word);
Node* WordIsPowerOfTwo(Node* value);
......
......@@ -617,16 +617,16 @@ void StringLengthStub::GenerateAssembly(
assembler.Return(result);
}
#define BINARY_OP_STUB(Name) \
void Name::GenerateAssembly(compiler::CodeAssemblerState* state) const { \
typedef BinaryOpWithVectorDescriptor Descriptor; \
CodeStubAssembler assembler(state); \
assembler.Return(Generate(&assembler, \
assembler.Parameter(Descriptor::kLeft), \
assembler.Parameter(Descriptor::kRight), \
assembler.Parameter(Descriptor::kSlot), \
assembler.Parameter(Descriptor::kVector), \
assembler.Parameter(Descriptor::kContext))); \
#define BINARY_OP_STUB(Name) \
void Name::GenerateAssembly(compiler::CodeAssemblerState* state) const { \
typedef BinaryOpWithVectorDescriptor Descriptor; \
CodeStubAssembler assembler(state); \
assembler.Return(Generate( \
&assembler, assembler.Parameter(Descriptor::kLeft), \
assembler.Parameter(Descriptor::kRight), \
assembler.ChangeUint32ToWord(assembler.Parameter(Descriptor::kSlot)), \
assembler.Parameter(Descriptor::kVector), \
assembler.Parameter(Descriptor::kContext))); \
}
BINARY_OP_STUB(AddWithFeedbackStub)
BINARY_OP_STUB(SubtractWithFeedbackStub)
......@@ -1232,27 +1232,26 @@ compiler::Node* DivideWithFeedbackStub::Generate(
// Do floating point division if {divisor} is zero.
assembler->GotoIf(
assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
&bailout);
assembler->WordEqual(divisor, assembler->SmiConstant(0)), &bailout);
// Do floating point division {dividend} is zero and {divisor} is
// negative.
Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
assembler->Branch(
assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
assembler->WordEqual(dividend, assembler->SmiConstant(0)),
&dividend_is_zero, &dividend_is_not_zero);
assembler->Bind(&dividend_is_zero);
{
assembler->GotoIf(
assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
assembler->SmiLessThan(divisor, assembler->SmiConstant(0)),
&bailout);
assembler->Goto(&dividend_is_not_zero);
}
assembler->Bind(&dividend_is_not_zero);
Node* untagged_divisor = assembler->SmiUntag(divisor);
Node* untagged_dividend = assembler->SmiUntag(dividend);
Node* untagged_divisor = assembler->SmiToWord32(divisor);
Node* untagged_dividend = assembler->SmiToWord32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
......@@ -1282,7 +1281,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
&bailout);
var_type_feedback.Bind(
assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
var_result.Bind(assembler->SmiTag(untagged_result));
var_result.Bind(assembler->SmiFromWord32(untagged_result));
assembler->Goto(&end);
// Bailout: convert {dividend} and {divisor} to double and do double
......@@ -2144,7 +2143,7 @@ void LoadIndexedInterceptorStub::GenerateAssembly(
Node* context = assembler.Parameter(Descriptor::kContext);
Label if_keyispositivesmi(&assembler), if_keyisinvalid(&assembler);
assembler.Branch(assembler.WordIsPositiveSmi(key), &if_keyispositivesmi,
assembler.Branch(assembler.TaggedIsPositiveSmi(key), &if_keyispositivesmi,
&if_keyisinvalid);
assembler.Bind(&if_keyispositivesmi);
assembler.TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
......@@ -2206,7 +2205,7 @@ compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
Node* boilerplate_map = assembler->LoadMap(boilerplate);
Node* instance_size = assembler->LoadMapInstanceSize(boilerplate_map);
Node* size_in_words = assembler->WordShr(object_size, kPointerSizeLog2);
assembler->GotoUnless(assembler->Word32Equal(instance_size, size_in_words),
assembler->GotoUnless(assembler->WordEqual(instance_size, size_in_words),
call_runtime);
Node* copy = assembler->Allocate(allocation_size);
......@@ -2582,10 +2581,11 @@ compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
assembler->isolate()->builtins()->builtin(Builtins::kCompileLazy));
Node* lazy_builtin = assembler->HeapConstant(lazy_builtin_handle);
Node* lazy_builtin_entry = assembler->IntPtrAdd(
lazy_builtin,
assembler->BitcastTaggedToWord(lazy_builtin),
assembler->IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
assembler->StoreObjectFieldNoWriteBarrier(
result, JSFunction::kCodeEntryOffset, lazy_builtin_entry);
result, JSFunction::kCodeEntryOffset, lazy_builtin_entry,
MachineType::PointerRepresentation());
assembler->StoreObjectFieldNoWriteBarrier(result,
JSFunction::kNextFunctionLinkOffset,
assembler->UndefinedConstant());
......@@ -2595,9 +2595,11 @@ compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
void FastNewClosureStub::GenerateAssembly(
compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
CodeStubAssembler assembler(state);
assembler.Return(
Generate(&assembler, assembler.Parameter(0), assembler.Parameter(1)));
Node* shared = assembler.Parameter(Descriptor::kSharedFunctionInfo);
Node* context = assembler.Parameter(Descriptor::kContext);
assembler.Return(Generate(&assembler, shared, context));
}
// static
......@@ -2606,21 +2608,23 @@ compiler::Node* FastNewFunctionContextStub::Generate(
compiler::Node* slots, compiler::Node* context) {
typedef compiler::Node Node;
slots = assembler->ChangeUint32ToWord(slots);
// TODO(ishell): Use CSA::OptimalParameterMode() here.
CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
Node* min_context_slots =
assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS);
Node* length = assembler->Int32Add(slots, min_context_slots);
Node* size = assembler->Int32Add(
assembler->Word32Shl(length, assembler->Int32Constant(kPointerSizeLog2)),
assembler->Int32Constant(FixedArray::kHeaderSize));
assembler->IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
Node* length = assembler->IntPtrAdd(slots, min_context_slots);
Node* size =
assembler->GetFixedArrayAllocationSize(length, FAST_ELEMENTS, mode);
// Create a new closure from the given function info in new space
Node* function_context = assembler->Allocate(size);
assembler->StoreMapNoWriteBarrier(function_context,
Heap::kFunctionContextMapRootIndex);
assembler->StoreObjectFieldNoWriteBarrier(function_context,
Context::kLengthOffset,
assembler->SmiFromWord32(length));
assembler->StoreObjectFieldNoWriteBarrier(
function_context, Context::kLengthOffset, assembler->SmiTag(length));
// Set up the fixed slots.
assembler->StoreFixedArrayElement(function_context, Context::CLOSURE_INDEX,
......@@ -2642,9 +2646,10 @@ compiler::Node* FastNewFunctionContextStub::Generate(
assembler->BuildFastFixedArrayForEach(
function_context, FAST_ELEMENTS, min_context_slots, length,
[assembler, undefined](Node* context, Node* offset) {
assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(),
context, offset, undefined);
});
assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
offset, undefined);
},
mode);
return function_context;
}
......@@ -2654,7 +2659,7 @@ void FastNewFunctionContextStub::GenerateAssembly(
typedef compiler::Node Node;
CodeStubAssembler assembler(state);
Node* function = assembler.Parameter(Descriptor::kFunction);
Node* slots = assembler.Parameter(FastNewFunctionContextDescriptor::kSlots);
Node* slots = assembler.Parameter(Descriptor::kSlots);
Node* context = assembler.Parameter(Descriptor::kContext);
assembler.Return(Generate(&assembler, function, slots, context));
......@@ -2731,14 +2736,10 @@ compiler::Node* NonEmptyShallowClone(CodeStubAssembler* assembler,
typedef compiler::Node Node;
typedef CodeStubAssembler::ParameterMode ParameterMode;
ParameterMode param_mode = CodeStubAssembler::SMI_PARAMETERS;
ParameterMode param_mode = assembler->OptimalParameterMode();
Node* length = assembler->LoadJSArrayLength(boilerplate);
if (assembler->Is64()) {
capacity = assembler->SmiUntag(capacity);
param_mode = CodeStubAssembler::INTEGER_PARAMETERS;
}
capacity = assembler->UntagParameter(capacity, param_mode);
Node *array, *elements;
std::tie(array, elements) =
......@@ -2756,9 +2757,7 @@ compiler::Node* NonEmptyShallowClone(CodeStubAssembler* assembler,
assembler->LoadObjectField(boilerplate_elements, offset));
}
if (assembler->Is64()) {
length = assembler->SmiUntag(length);
}
length = assembler->UntagParameter(length, param_mode);
assembler->Comment("copy boilerplate elements");
assembler->CopyFixedArrayElements(kind, boilerplate_elements, elements,
......
......@@ -66,6 +66,18 @@ class MachineRepresentationInferrer {
}
}
MachineRepresentation PromoteRepresentation(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
return MachineRepresentation::kWord32;
default:
break;
}
return rep;
}
void Run() {
auto blocks = schedule_->all_blocks();
for (BasicBlock* block : *blocks) {
......@@ -91,12 +103,12 @@ class MachineRepresentationInferrer {
case IrOpcode::kAtomicLoad:
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
representation_vector_[node->id()] =
LoadRepresentationOf(node->op()).representation();
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
case IrOpcode::kCheckedLoad:
representation_vector_[node->id()] =
CheckedLoadRepresentationOf(node->op()).representation();
representation_vector_[node->id()] = PromoteRepresentation(
CheckedLoadRepresentationOf(node->op()).representation());
break;
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
......@@ -104,6 +116,10 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
case IrOpcode::kUnalignedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
UnalignedLoadRepresentationOf(node->op()).representation());
break;
case IrOpcode::kPhi:
representation_vector_[node->id()] =
PhiRepresentationOf(node->op());
......@@ -119,9 +135,19 @@ class MachineRepresentationInferrer {
}
break;
}
case IrOpcode::kUnalignedLoad:
case IrOpcode::kAtomicStore:
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
representation_vector_[node->id()] = PromoteRepresentation(
StoreRepresentationOf(node->op()).representation());
break;
case IrOpcode::kCheckedStore:
representation_vector_[node->id()] =
UnalignedLoadRepresentationOf(node->op()).representation();
PromoteRepresentation(CheckedStoreRepresentationOf(node->op()));
break;
case IrOpcode::kUnalignedStore:
representation_vector_[node->id()] = PromoteRepresentation(
UnalignedStoreRepresentationOf(node->op()));
break;
case IrOpcode::kHeapConstant:
case IrOpcode::kNumberConstant:
......@@ -395,7 +421,7 @@ class MachineRepresentationChecker {
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
switch (StoreRepresentationOf(node->op()).representation()) {
switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
......@@ -403,15 +429,14 @@ class MachineRepresentationChecker {
break;
default:
CheckValueInputRepresentationIs(
node, 2,
StoreRepresentationOf(node->op()).representation());
node, 2, inferrer_->GetRepresentation(node));
}
break;
case IrOpcode::kAtomicStore:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
switch (AtomicStoreRepresentationOf(node->op())) {
switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
......@@ -419,7 +444,7 @@ class MachineRepresentationChecker {
break;
default:
CheckValueInputRepresentationIs(
node, 2, AtomicStoreRepresentationOf(node->op()));
node, 2, inferrer_->GetRepresentation(node));
}
break;
case IrOpcode::kPhi:
......
......@@ -1781,6 +1781,21 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
(!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
!strcmp(FLAG_turbo_verify_machine_graph,
data->info()->GetDebugName().get())))) {
if (FLAG_trace_csa_verify) {
AllowHandleDereference allow_deref;
CompilationInfo* info = data->info();
CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "--------------------------------------------------\n"
<< "--- Verifying " << info->GetDebugName().get()
<< " generated by TurboFan\n"
<< "--------------------------------------------------\n"
<< *data->schedule()
<< "--------------------------------------------------\n"
<< "--- End of " << info->GetDebugName().get()
<< " generated by TurboFan\n"
<< "--------------------------------------------------\n";
}
Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
data->info()->IsStub(), &temp_zone);
......
......@@ -442,6 +442,7 @@ DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_STRING(turbo_verify_machine_graph, nullptr,
"verify TurboFan machine graph before instruction selection")
DEFINE_BOOL(csa_verify, false, "verify TurboFan machine graph of code stubs")
DEFINE_BOOL(trace_csa_verify, false, "trace code stubs verification")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_stats_nvp, false,
"print TurboFan statistics in machine-readable format")
......
......@@ -416,6 +416,7 @@ class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
class FastNewClosureDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kSharedFunctionInfo)
DECLARE_DESCRIPTOR(FastNewClosureDescriptor, CallInterfaceDescriptor)
};
......
This diff is collapsed.
......@@ -24,28 +24,37 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
OperandScale operand_scale);
virtual ~InterpreterAssembler();
// Returns the count immediate for bytecode operand |operand_index| in the
// current bytecode.
// Returns the 32-bit unsigned count immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandCount(int operand_index);
// Returns the 8-bit flag for bytecode operand |operand_index| in the
// current bytecode.
// Returns the 32-bit unsigned flag for bytecode operand |operand_index|
// in the current bytecode.
compiler::Node* BytecodeOperandFlag(int operand_index);
// Returns the index immediate for bytecode operand |operand_index| in the
// current bytecode.
// Returns the 32-bit zero-extended index immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandIdx(int operand_index);
// Returns the UImm8 immediate for bytecode operand |operand_index| in the
// current bytecode.
// Returns the smi index immediate for bytecode operand |operand_index|
// in the current bytecode.
compiler::Node* BytecodeOperandIdxSmi(int operand_index);
// Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
// in the current bytecode.
compiler::Node* BytecodeOperandUImm(int operand_index);
// Returns the Imm8 immediate for bytecode operand |operand_index| in the
// current bytecode.
// Returns the 32-bit signed immediate for bytecode operand |operand_index|
// in the current bytecode.
compiler::Node* BytecodeOperandImm(int operand_index);
// Returns the register index for bytecode operand |operand_index| in the
// Returns the word-size signed immediate for bytecode operand |operand_index|
// in the current bytecode.
compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
// Returns the smi immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandImmSmi(int operand_index);
// Returns the word-size sign-extended register index for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandReg(int operand_index);
// Returns the runtime id immediate for bytecode operand
// Returns the 32-bit unsigned runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandRuntimeId(int operand_index);
// Returns the intrinsic id immediate for bytecode operand
// Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
......@@ -218,8 +227,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
// Updates the bytecode array's interrupt budget by |weight| and calls
// Runtime::kInterrupt if counter reaches zero.
// Updates the bytecode array's interrupt budget by a 32-bit signed |weight|
// and calls Runtime::kInterrupt if counter reaches zero.
void UpdateInterruptBudget(compiler::Node* weight);
// Returns the offset of register |index| relative to RegisterFilePointer().
......@@ -236,6 +245,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
MachineType result_type);
// Returns zero- or sign-extended to word32 value of the operand.
compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
compiler::Node* BytecodeOperandSignedByte(int operand_index);
compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
......@@ -243,6 +253,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
compiler::Node* BytecodeOperandSignedQuad(int operand_index);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
compiler::Node* BytecodeSignedOperand(int operand_index,
OperandSize operand_size);
compiler::Node* BytecodeUnsignedOperand(int operand_index,
......
......@@ -105,12 +105,8 @@ Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type,
InstanceTypeCompareMode mode) {
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
Node* instance_type = __ LoadInstanceType(object);
InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
end(assembler_);
if (mode == kInstanceTypeEqual) {
return __ Word32Equal(instance_type, __ Int32Constant(type));
} else {
......@@ -122,6 +118,7 @@ Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type,
Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
// TODO(ishell): Use Select here.
InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
return_false(assembler_), end(assembler_);
Node* arg = __ LoadRegister(input);
......@@ -148,6 +145,8 @@ Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count,
Node* context) {
// TODO(ishell): Use Select here.
// TODO(ishell): Use CSA::IsJSReceiverInstanceType here.
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
InterpreterAssembler::Label return_true(assembler_), return_false(assembler_),
......@@ -195,6 +194,7 @@ Node* IntrinsicsHelper::IsTypedArray(Node* input, Node* arg_count,
}
Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
// TODO(ishell): Use SelectBooleanConstant here.
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
......
This diff is collapsed.
......@@ -2273,6 +2273,7 @@ IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(Int64Mul)
IS_BINOP_MATCHER(JSAdd)
IS_BINOP_MATCHER(Float32Equal)
IS_BINOP_MATCHER(Float32LessThan)
......
......@@ -385,6 +385,8 @@ Matcher<Node*> IsInt64Add(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Mul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment