Commit b6c152e9 authored by epertoso's avatar epertoso Committed by Commit bot

[stubs] Removes the BranchIf.*() methods from CodeAssembler, changes their uses to Branch().

BranchIf and helpers were introduced when exporting the schedule from the RawMachineAssembler was not ensuring that the CFG was well-form. These methods, that were used to introduce blocks to ensure edge-split form, are now unnecessary.

BUG=

Review-Url: https://codereview.chromium.org/2426923002
Cr-Commit-Position: refs/heads/master@{#40402}
parent 6c85285b
......@@ -1475,8 +1475,9 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
heap_number_map),
&continue_loop);
assembler->BranchIfFloat64Equal(
search_num.value(), assembler->LoadHeapNumberValue(element_k),
assembler->Branch(
assembler->Float64Equal(search_num.value(),
assembler->LoadHeapNumberValue(element_k)),
&return_true, &continue_loop);
assembler->Bind(&continue_loop);
......@@ -1590,8 +1591,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS);
assembler->BranchIfFloat64Equal(element_k, search_num.value(),
&return_true, &continue_loop);
assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
&return_true, &continue_loop);
assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
......@@ -1650,8 +1651,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
assembler->BranchIfFloat64Equal(element_k, search_num.value(),
&return_true, &continue_loop);
assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
&return_true, &continue_loop);
assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
......@@ -1916,8 +1917,9 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
heap_number_map),
&continue_loop);
assembler->BranchIfFloat64Equal(
search_num.value(), assembler->LoadHeapNumberValue(element_k),
assembler->Branch(
assembler->Float64Equal(search_num.value(),
assembler->LoadHeapNumberValue(element_k)),
&return_found, &continue_loop);
assembler->Bind(&continue_loop);
......@@ -2008,8 +2010,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS);
assembler->BranchIfFloat64Equal(element_k, search_num.value(),
&return_found, &continue_loop);
assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
&return_found, &continue_loop);
assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
......@@ -2049,8 +2051,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
assembler->BranchIfFloat64Equal(element_k, search_num.value(),
&return_found, &continue_loop);
assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
&return_found, &continue_loop);
assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
......
......@@ -68,9 +68,10 @@ void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) {
Node* integer = assembler->Float64Trunc(number_value);
// Check if {number}s value matches the integer (ruling out the infinities).
assembler->BranchIfFloat64Equal(assembler->Float64Sub(number_value, integer),
assembler->Float64Constant(0.0), &return_true,
&return_false);
assembler->Branch(
assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
assembler->Float64Constant(0.0)),
&return_true, &return_false);
assembler->Bind(&return_true);
assembler->Return(assembler->BooleanConstant(true));
......@@ -139,9 +140,10 @@ void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
&return_false);
// Check if the {integer} value is in safe integer range.
assembler->BranchIfFloat64LessThanOrEqual(
assembler->Float64Abs(integer),
assembler->Float64Constant(kMaxSafeInteger), &return_true, &return_false);
assembler->Branch(assembler->Float64LessThanOrEqual(
assembler->Float64Abs(integer),
assembler->Float64Constant(kMaxSafeInteger)),
&return_true, &return_false);
assembler->Bind(&return_true);
assembler->Return(assembler->BooleanConstant(true));
......
......@@ -292,8 +292,8 @@ void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
assembler->Goto(&loop);
assembler->Bind(&if_valueisnotsame);
assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value),
&if_less, &if_greater);
assembler->Branch(assembler->Uint32LessThan(lhs_value, rhs_value),
&if_less, &if_greater);
}
assembler->Bind(&if_done);
......
This diff is collapsed.
......@@ -167,22 +167,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true,
Label* if_false) {
BranchIf(SmiEqual(a, b), if_true, if_false);
Branch(SmiEqual(a, b), if_true, if_false);
}
void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true,
Label* if_false) {
BranchIf(SmiLessThan(a, b), if_true, if_false);
Branch(SmiLessThan(a, b), if_true, if_false);
}
void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b,
Label* if_true, Label* if_false) {
BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
Branch(SmiLessThanOrEqual(a, b), if_true, if_false);
}
void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true,
Label* if_false) {
BranchIfFloat64Equal(value, value, if_false, if_true);
Branch(Float64Equal(value, value), if_false, if_true);
}
// Branches to {if_true} if ToBoolean applied to {value} yields true,
......
......@@ -2629,8 +2629,8 @@ compiler::Node* FastCloneShallowArrayStub::Generate(
assembler->Comment("fast double elements path");
if (FLAG_debug_code) {
Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
assembler->BranchIf(assembler->IsFixedDoubleArrayMap(elements_map),
&correct_elements_map, &abort);
assembler->Branch(assembler->IsFixedDoubleArrayMap(elements_map),
&correct_elements_map, &abort);
assembler->Bind(&abort);
{
......
......@@ -328,15 +328,6 @@ Node* CodeAssembler::Projection(int index, Node* value) {
return raw_assembler_->Projection(index, value);
}
void CodeAssembler::BranchIf(Node* condition, Label* if_true, Label* if_false) {
Label if_condition_is_true(this), if_condition_is_false(this);
Branch(condition, &if_condition_is_true, &if_condition_is_false);
Bind(&if_condition_is_true);
Goto(if_true);
Bind(&if_condition_is_false);
Goto(if_false);
}
void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Variable* exception_var) {
Label success(this), exception(this, Label::kDeferred);
......
......@@ -444,16 +444,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void GotoIfException(Node* node, Label* if_exception,
Variable* exception_var = nullptr);
// Branching helpers.
void BranchIf(Node* condition, Label* if_true, Label* if_false);
#define BRANCH_HELPER(name) \
void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
BranchIf(name(a, b), if_true, if_false); \
}
CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
#undef BRANCH_HELPER
// Helpers which delegate to RawMachineAssembler.
Factory* factory() const;
Isolate* isolate() const;
......
......@@ -97,7 +97,7 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
Label context_search(this, 2, context_search_loop_variables);
// Fast path if the depth is 0.
BranchIfWord32Equal(depth, Int32Constant(0), &context_found, &context_search);
Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
// Loop until the depth is 0.
Bind(&context_search);
......@@ -106,8 +106,8 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
cur_context.Bind(
LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
BranchIfWord32Equal(cur_depth.value(), Int32Constant(0), &context_found,
&context_search);
Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
&context_search);
}
Bind(&context_found);
......@@ -573,7 +573,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValue(feedback_element);
Node* is_monomorphic = WordEqual(function, feedback_value);
BranchIf(is_monomorphic, &handle_monomorphic, &extra_checks);
Branch(is_monomorphic, &handle_monomorphic, &extra_checks);
Bind(&handle_monomorphic);
{
......@@ -604,7 +604,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* is_megamorphic = WordEqual(
feedback_element,
HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
BranchIf(is_megamorphic, &call, &check_allocation_site);
Branch(is_megamorphic, &call, &check_allocation_site);
Bind(&check_allocation_site);
{
......@@ -763,7 +763,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Node* instance_type = LoadInstanceType(constructor);
Node* is_js_function =
WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
BranchIf(is_js_function, &js_function, &call_construct);
Branch(is_js_function, &js_function, &call_construct);
Bind(&js_function);
{
......@@ -778,7 +778,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
LoadFixedArrayElement(type_feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValue(feedback_element);
Node* is_monomorphic = WordEqual(constructor, feedback_value);
BranchIf(is_monomorphic, &call_construct_function, &extra_checks);
Branch(is_monomorphic, &call_construct_function, &extra_checks);
Bind(&extra_checks);
{
......@@ -801,7 +801,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
// monomorphic.
Comment("check if weak cell is cleared");
Node* is_smi = TaggedIsSmi(feedback_value);
BranchIf(is_smi, &initialize, &mark_megamorphic);
Branch(is_smi, &initialize, &mark_megamorphic);
}
Bind(&check_allocation_site);
......@@ -817,8 +817,8 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
LoadFixedArrayElement(LoadNativeContext(context),
Int32Constant(Context::ARRAY_FUNCTION_INDEX));
Node* is_array_function = WordEqual(context_slot, constructor);
BranchIf(is_array_function, &set_alloc_feedback_and_call,
&mark_megamorphic);
Branch(is_array_function, &set_alloc_feedback_and_call,
&mark_megamorphic);
}
Bind(&set_alloc_feedback_and_call);
......@@ -833,7 +833,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Comment("check if uninitialized");
Node* is_uninitialized = WordEqual(
feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
BranchIf(is_uninitialized, &initialize, &mark_megamorphic);
Branch(is_uninitialized, &initialize, &mark_megamorphic);
}
Bind(&initialize);
......@@ -845,7 +845,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
LoadFixedArrayElement(LoadNativeContext(context),
Int32Constant(Context::ARRAY_FUNCTION_INDEX));
Node* is_array_function = WordEqual(context_slot, constructor);
BranchIf(is_array_function, &create_allocation_site, &create_weak_cell);
Branch(is_array_function, &create_allocation_site, &create_weak_cell);
Bind(&create_allocation_site);
{
......@@ -989,7 +989,7 @@ Node* InterpreterAssembler::Jump(Node* delta) {
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
Label match(this), no_match(this);
BranchIf(condition, &match, &no_match);
Branch(condition, &match, &no_match);
Bind(&match);
Jump(delta);
Bind(&no_match);
......@@ -1022,7 +1022,7 @@ Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
Node* is_star = WordEqual(target_bytecode, star_bytecode);
BranchIf(is_star, &do_inline_star, &done);
Branch(is_star, &do_inline_star, &done);
Bind(&do_inline_star);
{
......@@ -1223,7 +1223,7 @@ void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
Label ok(this), abort(this, Label::kDeferred);
BranchIfWordEqual(lhs, rhs, &ok, &abort);
Branch(WordEqual(lhs, rhs), &ok, &abort);
Bind(&abort);
Abort(bailout_reason);
......@@ -1253,7 +1253,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Node* counter_reached_max = WordEqual(
old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
BranchIf(counter_reached_max, &counter_saturated, &counter_ok);
Branch(counter_reached_max, &counter_saturated, &counter_ok);
Bind(&counter_ok);
{
......
......@@ -1222,7 +1222,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
__ BranchIf(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi addition first.
......@@ -1232,7 +1232,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
// Check if the Smi additon overflowed.
Label if_notoverflow(assembler);
__ BranchIf(overflow, &slowpath, &if_notoverflow);
__ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
......@@ -1276,7 +1276,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
__ BranchIf(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi subtraction first.
......@@ -1286,7 +1286,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
// Check if the Smi subtraction overflowed.
Label if_notoverflow(assembler);
__ BranchIf(overflow, &slowpath, &if_notoverflow);
__ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
......@@ -1530,7 +1530,7 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
Label if_true(assembler), if_false(assembler), end(assembler);
Node* true_value = __ BooleanConstant(true);
Node* false_value = __ BooleanConstant(false);
__ BranchIfWordEqual(value, true_value, &if_true, &if_false);
__ Branch(__ WordEqual(value, true_value), &if_true, &if_false);
__ Bind(&if_true);
{
result.Bind(false_value);
......@@ -2064,7 +2064,7 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
Node* use_fast_shallow_clone = __ Word32And(
bytecode_flags,
__ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
__ BranchIf(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
__ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
__ Bind(&fast_shallow_clone);
{
......@@ -2109,7 +2109,7 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
Node* fast_clone_properties_count =
__ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
bytecode_flags);
__ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
__ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
__ Bind(&if_fast_clone);
{
......@@ -2256,7 +2256,7 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* duplicate_parameters_bit = __ Int32Constant(
1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
__ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
__ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
__ Bind(&if_not_duplicate_parameters);
{
......@@ -2312,7 +2312,7 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
Node* interrupt = __ StackCheckTriggeredInterrupt();
__ BranchIf(interrupt, &stack_check_interrupt, &ok);
__ Branch(interrupt, &stack_check_interrupt, &ok);
__ Bind(&ok);
__ Dispatch();
......@@ -2485,7 +2485,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
// Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
__ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow);
__ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
__ Bind(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
......@@ -2522,7 +2522,7 @@ void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
// Check if {index} is at {cache_length} already.
Label if_true(assembler), if_false(assembler), end(assembler);
__ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
__ Branch(__ WordEqual(index, cache_length), &if_true, &if_false);
__ Bind(&if_true);
{
__ SetAccumulator(__ BooleanConstant(false));
......@@ -2593,7 +2593,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
STATIC_ASSERT(StepFrame > StepNext);
STATIC_ASSERT(LastStepAction == StepFrame);
Node* step_next = __ Int32Constant(StepNext);
__ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok);
__ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
__ Bind(&ok);
Node* array =
......
......@@ -1257,12 +1257,12 @@ TEST(TryProbeStubCache) {
m.TryProbeStubCache(&stub_cache, receiver, name, &if_handler, &var_handler,
&if_miss);
m.Bind(&if_handler);
m.BranchIfWordEqual(expected_handler, var_handler.value(), &passed,
&failed);
m.Branch(m.WordEqual(expected_handler, var_handler.value()), &passed,
&failed);
m.Bind(&if_miss);
m.BranchIfWordEqual(expected_handler, m.IntPtrConstant(0), &passed,
&failed);
m.Branch(m.WordEqual(expected_handler, m.IntPtrConstant(0)), &passed,
&failed);
m.Bind(&passed);
m.Return(m.BooleanConstant(true));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment