Commit b6c152e9 authored by epertoso's avatar epertoso Committed by Commit bot

[stubs] Removes the BranchIf.*() methods from CodeAssembler, changes their uses to Branch().

BranchIf and helpers were introduced when exporting the schedule from the RawMachineAssembler was not ensuring that the CFG was well-form. These methods, that were used to introduce blocks to ensure edge-split form, are now unnecessary.

BUG=

Review-Url: https://codereview.chromium.org/2426923002
Cr-Commit-Position: refs/heads/master@{#40402}
parent 6c85285b
...@@ -1475,8 +1475,9 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) { ...@@ -1475,8 +1475,9 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k), assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
heap_number_map), heap_number_map),
&continue_loop); &continue_loop);
assembler->BranchIfFloat64Equal( assembler->Branch(
search_num.value(), assembler->LoadHeapNumberValue(element_k), assembler->Float64Equal(search_num.value(),
assembler->LoadHeapNumberValue(element_k)),
&return_true, &continue_loop); &return_true, &continue_loop);
assembler->Bind(&continue_loop); assembler->Bind(&continue_loop);
...@@ -1590,8 +1591,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) { ...@@ -1590,8 +1591,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedDoubleArrayElement( Node* element_k = assembler->LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0, elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS); CodeStubAssembler::INTPTR_PARAMETERS);
assembler->BranchIfFloat64Equal(element_k, search_num.value(), assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
&return_true, &continue_loop); &return_true, &continue_loop);
assembler->Bind(&continue_loop); assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one)); index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop); assembler->Goto(&not_nan_loop);
...@@ -1650,8 +1651,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) { ...@@ -1650,8 +1651,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
elements, index_var.value(), MachineType::Float64(), 0, elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop); CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
assembler->BranchIfFloat64Equal(element_k, search_num.value(), assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
&return_true, &continue_loop); &return_true, &continue_loop);
assembler->Bind(&continue_loop); assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one)); index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop); assembler->Goto(&not_nan_loop);
...@@ -1916,8 +1917,9 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) { ...@@ -1916,8 +1917,9 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k), assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
heap_number_map), heap_number_map),
&continue_loop); &continue_loop);
assembler->BranchIfFloat64Equal( assembler->Branch(
search_num.value(), assembler->LoadHeapNumberValue(element_k), assembler->Float64Equal(search_num.value(),
assembler->LoadHeapNumberValue(element_k)),
&return_found, &continue_loop); &return_found, &continue_loop);
assembler->Bind(&continue_loop); assembler->Bind(&continue_loop);
...@@ -2008,8 +2010,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) { ...@@ -2008,8 +2010,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedDoubleArrayElement( Node* element_k = assembler->LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0, elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS); CodeStubAssembler::INTPTR_PARAMETERS);
assembler->BranchIfFloat64Equal(element_k, search_num.value(), assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
&return_found, &continue_loop); &return_found, &continue_loop);
assembler->Bind(&continue_loop); assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one)); index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop); assembler->Goto(&not_nan_loop);
...@@ -2049,8 +2051,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) { ...@@ -2049,8 +2051,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
elements, index_var.value(), MachineType::Float64(), 0, elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop); CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
assembler->BranchIfFloat64Equal(element_k, search_num.value(), assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
&return_found, &continue_loop); &return_found, &continue_loop);
assembler->Bind(&continue_loop); assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one)); index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop); assembler->Goto(&not_nan_loop);
......
...@@ -68,9 +68,10 @@ void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) { ...@@ -68,9 +68,10 @@ void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) {
Node* integer = assembler->Float64Trunc(number_value); Node* integer = assembler->Float64Trunc(number_value);
// Check if {number}s value matches the integer (ruling out the infinities). // Check if {number}s value matches the integer (ruling out the infinities).
assembler->BranchIfFloat64Equal(assembler->Float64Sub(number_value, integer), assembler->Branch(
assembler->Float64Constant(0.0), &return_true, assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
&return_false); assembler->Float64Constant(0.0)),
&return_true, &return_false);
assembler->Bind(&return_true); assembler->Bind(&return_true);
assembler->Return(assembler->BooleanConstant(true)); assembler->Return(assembler->BooleanConstant(true));
...@@ -139,9 +140,10 @@ void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) { ...@@ -139,9 +140,10 @@ void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
&return_false); &return_false);
// Check if the {integer} value is in safe integer range. // Check if the {integer} value is in safe integer range.
assembler->BranchIfFloat64LessThanOrEqual( assembler->Branch(assembler->Float64LessThanOrEqual(
assembler->Float64Abs(integer), assembler->Float64Abs(integer),
assembler->Float64Constant(kMaxSafeInteger), &return_true, &return_false); assembler->Float64Constant(kMaxSafeInteger)),
&return_true, &return_false);
assembler->Bind(&return_true); assembler->Bind(&return_true);
assembler->Return(assembler->BooleanConstant(true)); assembler->Return(assembler->BooleanConstant(true));
......
...@@ -292,8 +292,8 @@ void GenerateStringRelationalComparison(CodeStubAssembler* assembler, ...@@ -292,8 +292,8 @@ void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
assembler->Goto(&loop); assembler->Goto(&loop);
assembler->Bind(&if_valueisnotsame); assembler->Bind(&if_valueisnotsame);
assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value), assembler->Branch(assembler->Uint32LessThan(lhs_value, rhs_value),
&if_less, &if_greater); &if_less, &if_greater);
} }
assembler->Bind(&if_done); assembler->Bind(&if_done);
......
This diff is collapsed.
...@@ -167,22 +167,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { ...@@ -167,22 +167,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true, void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true,
Label* if_false) { Label* if_false) {
BranchIf(SmiEqual(a, b), if_true, if_false); Branch(SmiEqual(a, b), if_true, if_false);
} }
void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true, void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true,
Label* if_false) { Label* if_false) {
BranchIf(SmiLessThan(a, b), if_true, if_false); Branch(SmiLessThan(a, b), if_true, if_false);
} }
void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b, void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b,
Label* if_true, Label* if_false) { Label* if_true, Label* if_false) {
BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false); Branch(SmiLessThanOrEqual(a, b), if_true, if_false);
} }
void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true, void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true,
Label* if_false) { Label* if_false) {
BranchIfFloat64Equal(value, value, if_false, if_true); Branch(Float64Equal(value, value), if_false, if_true);
} }
// Branches to {if_true} if ToBoolean applied to {value} yields true, // Branches to {if_true} if ToBoolean applied to {value} yields true,
......
...@@ -2629,8 +2629,8 @@ compiler::Node* FastCloneShallowArrayStub::Generate( ...@@ -2629,8 +2629,8 @@ compiler::Node* FastCloneShallowArrayStub::Generate(
assembler->Comment("fast double elements path"); assembler->Comment("fast double elements path");
if (FLAG_debug_code) { if (FLAG_debug_code) {
Label correct_elements_map(assembler), abort(assembler, Label::kDeferred); Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
assembler->BranchIf(assembler->IsFixedDoubleArrayMap(elements_map), assembler->Branch(assembler->IsFixedDoubleArrayMap(elements_map),
&correct_elements_map, &abort); &correct_elements_map, &abort);
assembler->Bind(&abort); assembler->Bind(&abort);
{ {
......
...@@ -328,15 +328,6 @@ Node* CodeAssembler::Projection(int index, Node* value) { ...@@ -328,15 +328,6 @@ Node* CodeAssembler::Projection(int index, Node* value) {
return raw_assembler_->Projection(index, value); return raw_assembler_->Projection(index, value);
} }
void CodeAssembler::BranchIf(Node* condition, Label* if_true, Label* if_false) {
Label if_condition_is_true(this), if_condition_is_false(this);
Branch(condition, &if_condition_is_true, &if_condition_is_false);
Bind(&if_condition_is_true);
Goto(if_true);
Bind(&if_condition_is_false);
Goto(if_false);
}
void CodeAssembler::GotoIfException(Node* node, Label* if_exception, void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Variable* exception_var) { Variable* exception_var) {
Label success(this), exception(this, Label::kDeferred); Label success(this), exception(this, Label::kDeferred);
......
...@@ -444,16 +444,6 @@ class V8_EXPORT_PRIVATE CodeAssembler { ...@@ -444,16 +444,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void GotoIfException(Node* node, Label* if_exception, void GotoIfException(Node* node, Label* if_exception,
Variable* exception_var = nullptr); Variable* exception_var = nullptr);
// Branching helpers.
void BranchIf(Node* condition, Label* if_true, Label* if_false);
#define BRANCH_HELPER(name) \
void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
BranchIf(name(a, b), if_true, if_false); \
}
CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
#undef BRANCH_HELPER
// Helpers which delegate to RawMachineAssembler. // Helpers which delegate to RawMachineAssembler.
Factory* factory() const; Factory* factory() const;
Isolate* isolate() const; Isolate* isolate() const;
......
...@@ -97,7 +97,7 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) { ...@@ -97,7 +97,7 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
Label context_search(this, 2, context_search_loop_variables); Label context_search(this, 2, context_search_loop_variables);
// Fast path if the depth is 0. // Fast path if the depth is 0.
BranchIfWord32Equal(depth, Int32Constant(0), &context_found, &context_search); Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
// Loop until the depth is 0. // Loop until the depth is 0.
Bind(&context_search); Bind(&context_search);
...@@ -106,8 +106,8 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) { ...@@ -106,8 +106,8 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
cur_context.Bind( cur_context.Bind(
LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX)); LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
BranchIfWord32Equal(cur_depth.value(), Int32Constant(0), &context_found, Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
&context_search); &context_search);
} }
Bind(&context_found); Bind(&context_found);
...@@ -573,7 +573,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context, ...@@ -573,7 +573,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id); Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValue(feedback_element); Node* feedback_value = LoadWeakCellValue(feedback_element);
Node* is_monomorphic = WordEqual(function, feedback_value); Node* is_monomorphic = WordEqual(function, feedback_value);
BranchIf(is_monomorphic, &handle_monomorphic, &extra_checks); Branch(is_monomorphic, &handle_monomorphic, &extra_checks);
Bind(&handle_monomorphic); Bind(&handle_monomorphic);
{ {
...@@ -604,7 +604,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context, ...@@ -604,7 +604,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* is_megamorphic = WordEqual( Node* is_megamorphic = WordEqual(
feedback_element, feedback_element,
HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate()))); HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
BranchIf(is_megamorphic, &call, &check_allocation_site); Branch(is_megamorphic, &call, &check_allocation_site);
Bind(&check_allocation_site); Bind(&check_allocation_site);
{ {
...@@ -763,7 +763,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, ...@@ -763,7 +763,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Node* instance_type = LoadInstanceType(constructor); Node* instance_type = LoadInstanceType(constructor);
Node* is_js_function = Node* is_js_function =
WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE)); WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
BranchIf(is_js_function, &js_function, &call_construct); Branch(is_js_function, &js_function, &call_construct);
Bind(&js_function); Bind(&js_function);
{ {
...@@ -778,7 +778,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, ...@@ -778,7 +778,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
LoadFixedArrayElement(type_feedback_vector, slot_id); LoadFixedArrayElement(type_feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValue(feedback_element); Node* feedback_value = LoadWeakCellValue(feedback_element);
Node* is_monomorphic = WordEqual(constructor, feedback_value); Node* is_monomorphic = WordEqual(constructor, feedback_value);
BranchIf(is_monomorphic, &call_construct_function, &extra_checks); Branch(is_monomorphic, &call_construct_function, &extra_checks);
Bind(&extra_checks); Bind(&extra_checks);
{ {
...@@ -801,7 +801,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, ...@@ -801,7 +801,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
// monomorphic. // monomorphic.
Comment("check if weak cell is cleared"); Comment("check if weak cell is cleared");
Node* is_smi = TaggedIsSmi(feedback_value); Node* is_smi = TaggedIsSmi(feedback_value);
BranchIf(is_smi, &initialize, &mark_megamorphic); Branch(is_smi, &initialize, &mark_megamorphic);
} }
Bind(&check_allocation_site); Bind(&check_allocation_site);
...@@ -817,8 +817,8 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, ...@@ -817,8 +817,8 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
LoadFixedArrayElement(LoadNativeContext(context), LoadFixedArrayElement(LoadNativeContext(context),
Int32Constant(Context::ARRAY_FUNCTION_INDEX)); Int32Constant(Context::ARRAY_FUNCTION_INDEX));
Node* is_array_function = WordEqual(context_slot, constructor); Node* is_array_function = WordEqual(context_slot, constructor);
BranchIf(is_array_function, &set_alloc_feedback_and_call, Branch(is_array_function, &set_alloc_feedback_and_call,
&mark_megamorphic); &mark_megamorphic);
} }
Bind(&set_alloc_feedback_and_call); Bind(&set_alloc_feedback_and_call);
...@@ -833,7 +833,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, ...@@ -833,7 +833,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Comment("check if uninitialized"); Comment("check if uninitialized");
Node* is_uninitialized = WordEqual( Node* is_uninitialized = WordEqual(
feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex)); feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
BranchIf(is_uninitialized, &initialize, &mark_megamorphic); Branch(is_uninitialized, &initialize, &mark_megamorphic);
} }
Bind(&initialize); Bind(&initialize);
...@@ -845,7 +845,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, ...@@ -845,7 +845,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
LoadFixedArrayElement(LoadNativeContext(context), LoadFixedArrayElement(LoadNativeContext(context),
Int32Constant(Context::ARRAY_FUNCTION_INDEX)); Int32Constant(Context::ARRAY_FUNCTION_INDEX));
Node* is_array_function = WordEqual(context_slot, constructor); Node* is_array_function = WordEqual(context_slot, constructor);
BranchIf(is_array_function, &create_allocation_site, &create_weak_cell); Branch(is_array_function, &create_allocation_site, &create_weak_cell);
Bind(&create_allocation_site); Bind(&create_allocation_site);
{ {
...@@ -989,7 +989,7 @@ Node* InterpreterAssembler::Jump(Node* delta) { ...@@ -989,7 +989,7 @@ Node* InterpreterAssembler::Jump(Node* delta) {
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) { void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
Label match(this), no_match(this); Label match(this), no_match(this);
BranchIf(condition, &match, &no_match); Branch(condition, &match, &no_match);
Bind(&match); Bind(&match);
Jump(delta); Jump(delta);
Bind(&no_match); Bind(&no_match);
...@@ -1022,7 +1022,7 @@ Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) { ...@@ -1022,7 +1022,7 @@ Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar)); Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
Node* is_star = WordEqual(target_bytecode, star_bytecode); Node* is_star = WordEqual(target_bytecode, star_bytecode);
BranchIf(is_star, &do_inline_star, &done); Branch(is_star, &do_inline_star, &done);
Bind(&do_inline_star); Bind(&do_inline_star);
{ {
...@@ -1223,7 +1223,7 @@ void InterpreterAssembler::Abort(BailoutReason bailout_reason) { ...@@ -1223,7 +1223,7 @@ void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs, void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) { BailoutReason bailout_reason) {
Label ok(this), abort(this, Label::kDeferred); Label ok(this), abort(this, Label::kDeferred);
BranchIfWordEqual(lhs, rhs, &ok, &abort); Branch(WordEqual(lhs, rhs), &ok, &abort);
Bind(&abort); Bind(&abort);
Abort(bailout_reason); Abort(bailout_reason);
...@@ -1253,7 +1253,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) { ...@@ -1253,7 +1253,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Node* counter_reached_max = WordEqual( Node* counter_reached_max = WordEqual(
old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max())); old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
BranchIf(counter_reached_max, &counter_saturated, &counter_ok); Branch(counter_reached_max, &counter_saturated, &counter_ok);
Bind(&counter_ok); Bind(&counter_ok);
{ {
......
...@@ -1222,7 +1222,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) { ...@@ -1222,7 +1222,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi. // {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path. // Check if the {left} is a Smi take the fast path.
__ BranchIf(__ TaggedIsSmi(left), &fastpath, &slowpath); __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath); __ Bind(&fastpath);
{ {
// Try fast Smi addition first. // Try fast Smi addition first.
...@@ -1232,7 +1232,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) { ...@@ -1232,7 +1232,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
// Check if the Smi additon overflowed. // Check if the Smi additon overflowed.
Label if_notoverflow(assembler); Label if_notoverflow(assembler);
__ BranchIf(overflow, &slowpath, &if_notoverflow); __ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow); __ Bind(&if_notoverflow);
{ {
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall), __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
...@@ -1276,7 +1276,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) { ...@@ -1276,7 +1276,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi. // {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path. // Check if the {left} is a Smi take the fast path.
__ BranchIf(__ TaggedIsSmi(left), &fastpath, &slowpath); __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath); __ Bind(&fastpath);
{ {
// Try fast Smi subtraction first. // Try fast Smi subtraction first.
...@@ -1286,7 +1286,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) { ...@@ -1286,7 +1286,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
// Check if the Smi subtraction overflowed. // Check if the Smi subtraction overflowed.
Label if_notoverflow(assembler); Label if_notoverflow(assembler);
__ BranchIf(overflow, &slowpath, &if_notoverflow); __ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow); __ Bind(&if_notoverflow);
{ {
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall), __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
...@@ -1530,7 +1530,7 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { ...@@ -1530,7 +1530,7 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
Label if_true(assembler), if_false(assembler), end(assembler); Label if_true(assembler), if_false(assembler), end(assembler);
Node* true_value = __ BooleanConstant(true); Node* true_value = __ BooleanConstant(true);
Node* false_value = __ BooleanConstant(false); Node* false_value = __ BooleanConstant(false);
__ BranchIfWordEqual(value, true_value, &if_true, &if_false); __ Branch(__ WordEqual(value, true_value), &if_true, &if_false);
__ Bind(&if_true); __ Bind(&if_true);
{ {
result.Bind(false_value); result.Bind(false_value);
...@@ -2064,7 +2064,7 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) { ...@@ -2064,7 +2064,7 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
Node* use_fast_shallow_clone = __ Word32And( Node* use_fast_shallow_clone = __ Word32And(
bytecode_flags, bytecode_flags,
__ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask)); __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
__ BranchIf(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime); __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
__ Bind(&fast_shallow_clone); __ Bind(&fast_shallow_clone);
{ {
...@@ -2109,7 +2109,7 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { ...@@ -2109,7 +2109,7 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
Node* fast_clone_properties_count = Node* fast_clone_properties_count =
__ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>( __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
bytecode_flags); bytecode_flags);
__ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone); __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
__ Bind(&if_fast_clone); __ Bind(&if_fast_clone);
{ {
...@@ -2256,7 +2256,7 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { ...@@ -2256,7 +2256,7 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* duplicate_parameters_bit = __ Int32Constant( Node* duplicate_parameters_bit = __ Int32Constant(
1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
__ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
__ Bind(&if_not_duplicate_parameters); __ Bind(&if_not_duplicate_parameters);
{ {
...@@ -2312,7 +2312,7 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) { ...@@ -2312,7 +2312,7 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred); Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
Node* interrupt = __ StackCheckTriggeredInterrupt(); Node* interrupt = __ StackCheckTriggeredInterrupt();
__ BranchIf(interrupt, &stack_check_interrupt, &ok); __ Branch(interrupt, &stack_check_interrupt, &ok);
__ Bind(&ok); __ Bind(&ok);
__ Dispatch(); __ Dispatch();
...@@ -2485,7 +2485,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) { ...@@ -2485,7 +2485,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
// Check if we can use the for-in fast path potentially using the enum cache. // Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred); Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
__ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow); __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
__ Bind(&if_fast); __ Bind(&if_fast);
{ {
// Enum cache in use for {receiver}, the {key} is definitely valid. // Enum cache in use for {receiver}, the {key} is definitely valid.
...@@ -2522,7 +2522,7 @@ void Interpreter::DoForInContinue(InterpreterAssembler* assembler) { ...@@ -2522,7 +2522,7 @@ void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
// Check if {index} is at {cache_length} already. // Check if {index} is at {cache_length} already.
Label if_true(assembler), if_false(assembler), end(assembler); Label if_true(assembler), if_false(assembler), end(assembler);
__ BranchIfWordEqual(index, cache_length, &if_true, &if_false); __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false);
__ Bind(&if_true); __ Bind(&if_true);
{ {
__ SetAccumulator(__ BooleanConstant(false)); __ SetAccumulator(__ BooleanConstant(false));
...@@ -2593,7 +2593,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) { ...@@ -2593,7 +2593,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
STATIC_ASSERT(StepFrame > StepNext); STATIC_ASSERT(StepFrame > StepNext);
STATIC_ASSERT(LastStepAction == StepFrame); STATIC_ASSERT(LastStepAction == StepFrame);
Node* step_next = __ Int32Constant(StepNext); Node* step_next = __ Int32Constant(StepNext);
__ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok); __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
__ Bind(&ok); __ Bind(&ok);
Node* array = Node* array =
......
...@@ -1257,12 +1257,12 @@ TEST(TryProbeStubCache) { ...@@ -1257,12 +1257,12 @@ TEST(TryProbeStubCache) {
m.TryProbeStubCache(&stub_cache, receiver, name, &if_handler, &var_handler, m.TryProbeStubCache(&stub_cache, receiver, name, &if_handler, &var_handler,
&if_miss); &if_miss);
m.Bind(&if_handler); m.Bind(&if_handler);
m.BranchIfWordEqual(expected_handler, var_handler.value(), &passed, m.Branch(m.WordEqual(expected_handler, var_handler.value()), &passed,
&failed); &failed);
m.Bind(&if_miss); m.Bind(&if_miss);
m.BranchIfWordEqual(expected_handler, m.IntPtrConstant(0), &passed, m.Branch(m.WordEqual(expected_handler, m.IntPtrConstant(0)), &passed,
&failed); &failed);
m.Bind(&passed); m.Bind(&passed);
m.Return(m.BooleanConstant(true)); m.Return(m.BooleanConstant(true));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment