Commit 900d6b93 authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64] Optimize Branch with near label

If a label was binded after Branch in 4096 offst, we should use Branchshort.

Change-Id: I2197e2a18a43627370ed9b67b7ef7d678a2a62a8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2944795
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Commit-Queue: Brice Dobry <brice.dobry@futurewei.com>
Reviewed-by: 's avatarBrice Dobry <brice.dobry@futurewei.com>
Cr-Commit-Position: refs/heads/master@{#75073}
parent 998370fd
...@@ -169,7 +169,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { ...@@ -169,7 +169,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
scratch, scratch2); scratch, scratch2);
__ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
__ Branch(&post_instantiation_deopt_entry); __ BranchShort(&post_instantiation_deopt_entry);
// Else: use TheHoleValue as receiver for constructor call // Else: use TheHoleValue as receiver for constructor call
__ bind(&not_create_implicit_receiver); __ bind(&not_create_implicit_receiver);
...@@ -329,7 +329,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, ...@@ -329,7 +329,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
__ GetObjectType(sfi_data, scratch1, scratch1); __ GetObjectType(sfi_data, scratch1, scratch1);
__ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE)); __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); __ BranchShort(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
sfi_data, sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
...@@ -406,7 +406,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -406,7 +406,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
Label done_loop, loop; Label done_loop, loop;
__ bind(&loop); __ bind(&loop);
__ Sub64(a3, a3, Operand(1)); __ Sub64(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg)); __ BranchShort(&done_loop, lt, a3, Operand(zero_reg));
__ CalcScaledAddress(kScratchReg, scratch, a3, kTaggedSizeLog2); __ CalcScaledAddress(kScratchReg, scratch, a3, kTaggedSizeLog2);
__ LoadAnyTaggedField( __ LoadAnyTaggedField(
kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
...@@ -500,7 +500,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, ...@@ -500,7 +500,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ Sub64(scratch1, sp, scratch1); __ Sub64(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack. // Check if the arguments will overflow the stack.
__ Sll64(scratch2, argc, kSystemPointerSizeLog2); __ Sll64(scratch2, argc, kSystemPointerSizeLog2);
__ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison. __ BranchShort(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
// Out of stack space. // Out of stack space.
__ CallRuntime(Runtime::kThrowStackOverflow); __ CallRuntime(Runtime::kThrowStackOverflow);
...@@ -600,7 +600,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ...@@ -600,7 +600,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
IsolateAddressId::kJSEntrySPAddress, masm->isolate()); IsolateAddressId::kJSEntrySPAddress, masm->isolate());
__ li(s1, js_entry_sp); __ li(s1, js_entry_sp);
__ Ld(s2, MemOperand(s1)); __ Ld(s2, MemOperand(s1));
__ Branch(&non_outermost_js, ne, s2, Operand(zero_reg)); __ BranchShort(&non_outermost_js, ne, s2, Operand(zero_reg));
__ Sd(fp, MemOperand(s1)); __ Sd(fp, MemOperand(s1));
__ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont; Label cont;
...@@ -612,7 +612,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ...@@ -612,7 +612,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Jump to a faked try block that does the invoke, with a faked catch // Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception. // block that sets the pending exception.
__ Branch(&invoke); __ BranchShort(&invoke);
__ bind(&handler_entry); __ bind(&handler_entry);
// Store the current pc as the handler offset. It's used later to create the // Store the current pc as the handler offset. It's used later to create the
...@@ -627,7 +627,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ...@@ -627,7 +627,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
IsolateAddressId::kPendingExceptionAddress, masm->isolate())); IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
__ Sd(a0, MemOperand(s1)); // We come back from 'invoke'. result is in a0. __ Sd(a0, MemOperand(s1)); // We come back from 'invoke'. result is in a0.
__ LoadRoot(a0, RootIndex::kException); __ LoadRoot(a0, RootIndex::kException);
__ Branch(&exit); __ BranchShort(&exit);
// Invoke: Link this frame into the handler chain. // Invoke: Link this frame into the handler chain.
__ bind(&invoke); __ bind(&invoke);
...@@ -670,7 +670,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ...@@ -670,7 +670,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Check if the current stack frame is marked as the outermost JS frame. // Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2; Label non_outermost_js_2;
__ pop(a5); __ pop(a5);
__ Branch(&non_outermost_js_2, ne, a5, __ BranchShort(&non_outermost_js_2, ne, a5,
Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ li(a5, js_entry_sp); __ li(a5, js_entry_sp);
__ Sd(zero_reg, MemOperand(a5)); __ Sd(zero_reg, MemOperand(a5));
...@@ -742,7 +742,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ...@@ -742,7 +742,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// a5: argv, i.e. points to first arg // a5: argv, i.e. points to first arg
Label loop, entry; Label loop, entry;
__ CalcScaledAddress(s1, a5, a4, kSystemPointerSizeLog2); __ CalcScaledAddress(s1, a5, a4, kSystemPointerSizeLog2);
__ Branch(&entry); __ BranchShort(&entry);
// s1 points past last arg. // s1 points past last arg.
__ bind(&loop); __ bind(&loop);
__ Add64(s1, s1, -kSystemPointerSize); __ Add64(s1, s1, -kSystemPointerSize);
...@@ -836,7 +836,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, ...@@ -836,7 +836,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// If actual is bigger than formal, then we should use it to free up the stack // If actual is bigger than formal, then we should use it to free up the stack
// arguments. // arguments.
__ Branch(&L1, le, actual_params_size, Operand(params_size)); __ BranchShort(&L1, le, actual_params_size, Operand(params_size));
__ Move(params_size, actual_params_size); __ Move(params_size, actual_params_size);
__ bind(&L1); __ bind(&L1);
...@@ -853,7 +853,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, ...@@ -853,7 +853,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
OptimizationMarker expected_marker, OptimizationMarker expected_marker,
Runtime::FunctionId function_id) { Runtime::FunctionId function_id) {
Label no_match; Label no_match;
__ Branch(&no_match, ne, actual_marker, Operand(expected_marker)); __ BranchShort(&no_match, ne, actual_marker, Operand(expected_marker));
GenerateTailCallToReturnedCode(masm, function_id); GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match); __ bind(&no_match);
} }
...@@ -883,7 +883,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -883,7 +883,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset)); __ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit)); __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&heal_optimized_code_slot, ne, a5, Operand(zero_reg)); __ BranchShort(&heal_optimized_code_slot, ne, a5, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into // Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code. // the optimized functions list, then tail call the optimized code.
...@@ -966,9 +966,9 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -966,9 +966,9 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide)); STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
STATIC_ASSERT(3 == STATIC_ASSERT(3 ==
static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide)); static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
__ Branch(&process_bytecode, Ugreater, bytecode, Operand(3)); __ BranchShort(&process_bytecode, Ugreater, bytecode, Operand(3));
__ And(scratch2, bytecode, Operand(1)); __ And(scratch2, bytecode, Operand(1));
__ Branch(&extra_wide, ne, scratch2, Operand(zero_reg)); __ BranchShort(&extra_wide, ne, scratch2, Operand(zero_reg));
// Load the next bytecode and update table to the wide scaled table. // Load the next bytecode and update table to the wide scaled table.
__ Add64(bytecode_offset, bytecode_offset, Operand(1)); __ Add64(bytecode_offset, bytecode_offset, Operand(1));
...@@ -976,7 +976,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -976,7 +976,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ Lbu(bytecode, MemOperand(scratch2)); __ Lbu(bytecode, MemOperand(scratch2));
__ Add64(bytecode_size_table, bytecode_size_table, __ Add64(bytecode_size_table, bytecode_size_table,
Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount)); Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
__ Branch(&process_bytecode); __ BranchShort(&process_bytecode);
__ bind(&extra_wide); __ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table. // Load the next bytecode and update table to the extra wide scaled table.
...@@ -998,12 +998,12 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -998,12 +998,12 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// If this is a JumpLoop, re-execute it to perform the jump to the beginning // If this is a JumpLoop, re-execute it to perform the jump to the beginning
// of the loop. // of the loop.
Label end, not_jump_loop; Label end, not_jump_loop;
__ Branch(&not_jump_loop, ne, bytecode, __ BranchShort(&not_jump_loop, ne, bytecode,
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop))); Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
// We need to restore the original bytecode_offset since we might have // We need to restore the original bytecode_offset since we might have
// increased it to skip the wide / extra-wide prefix bytecode. // increased it to skip the wide / extra-wide prefix bytecode.
__ Move(bytecode_offset, original_bytecode_offset); __ Move(bytecode_offset, original_bytecode_offset);
__ Branch(&end); __ BranchShort(&end);
__ bind(&not_jump_loop); __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset. // Otherwise, load the size of the current bytecode and advance the offset.
...@@ -1044,7 +1044,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1044,7 +1044,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
__ And( __ And(
scratch, optimization_state, scratch, optimization_state,
Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker)); Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
__ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg)); __ BranchShort(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
} }
Register optimization_marker = optimization_state; Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker); __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
...@@ -1168,7 +1168,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1168,7 +1168,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Register interrupt_limit = temps.Acquire(); Register interrupt_limit = temps.Acquire();
__ LoadStackLimit(interrupt_limit, __ LoadStackLimit(interrupt_limit,
MacroAssembler::StackLimitKind::kInterruptStackLimit); MacroAssembler::StackLimitKind::kInterruptStackLimit);
__ Branch(&call_stack_guard, Uless, sp_minus_frame_size, __ BranchShort(&call_stack_guard, Uless, sp_minus_frame_size,
Operand(interrupt_limit)); Operand(interrupt_limit));
__ RecordComment("]"); __ RecordComment("]");
} }
...@@ -1258,7 +1258,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1258,7 +1258,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); __ BranchShort(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
// Read off the optimization state in the feedback vector, and if there // Read off the optimization state in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead. // is optimized code or an optimization marker, call that instead.
...@@ -1323,14 +1323,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1323,14 +1323,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label loop_header; Label loop_header;
Label loop_check; Label loop_check;
__ LoadRoot(a5, RootIndex::kUndefinedValue); __ LoadRoot(a5, RootIndex::kUndefinedValue);
__ Branch(&loop_check); __ BranchShort(&loop_check);
__ bind(&loop_header); __ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration. // TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ push(a5); __ push(a5);
// Continue loop if not done. // Continue loop if not done.
__ bind(&loop_check); __ bind(&loop_check);
__ Sub64(a4, a4, Operand(kSystemPointerSize)); __ Sub64(a4, a4, Operand(kSystemPointerSize));
__ Branch(&loop_header, ge, a4, Operand(zero_reg)); __ BranchShort(&loop_header, ge, a4, Operand(zero_reg));
} }
// If the bytecode array has a valid incoming new target or generator object // If the bytecode array has a valid incoming new target or generator object
...@@ -1339,7 +1339,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1339,7 +1339,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Lw(a5, FieldMemOperand( __ Lw(a5, FieldMemOperand(
kInterpreterBytecodeArrayRegister, kInterpreterBytecodeArrayRegister,
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ Branch(&no_incoming_new_target_or_generator_register, eq, a5, __ BranchShort(&no_incoming_new_target_or_generator_register, eq, a5,
Operand(zero_reg)); Operand(zero_reg));
__ CalcScaledAddress(a5, fp, a5, kSystemPointerSizeLog2); __ CalcScaledAddress(a5, fp, a5, kSystemPointerSizeLog2);
__ Sd(a3, MemOperand(a5)); __ Sd(a3, MemOperand(a5));
...@@ -1349,7 +1349,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1349,7 +1349,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(solanes): Merge with the real stack limit check above. // TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt; Label stack_check_interrupt, after_stack_check_interrupt;
__ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit); __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
__ Branch(&stack_check_interrupt, Uless, sp, Operand(a5)); __ BranchShort(&stack_check_interrupt, Uless, sp, Operand(a5));
__ bind(&after_stack_check_interrupt); __ bind(&after_stack_check_interrupt);
// Load accumulator as undefined. // Load accumulator as undefined.
...@@ -1424,7 +1424,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1424,7 +1424,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Check if optimized code marker is available // Check if optimized code marker is available
__ And(scratch, optimization_state, __ And(scratch, optimization_state,
FeedbackVector::OptimizationTierBits::kMask); FeedbackVector::OptimizationTierBits::kMask);
__ Branch(&maybe_has_optimized_code, ne, scratch, Operand(zero_reg)); __ BranchShort(&maybe_has_optimized_code, ne, scratch, Operand(zero_reg));
Register optimization_marker = optimization_state; Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker); __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
...@@ -1642,13 +1642,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1642,13 +1642,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
__ GetObjectType(t0, kInterpreterDispatchTableRegister, __ GetObjectType(t0, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister); kInterpreterDispatchTableRegister);
__ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, __ BranchShort(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
Operand(INTERPRETER_DATA_TYPE)); Operand(INTERPRETER_DATA_TYPE));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
__ Add64(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Add64(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Branch(&trampoline_loaded); __ BranchShort(&trampoline_loaded);
__ bind(&builtin_trampoline); __ bind(&builtin_trampoline);
__ li(t0, ExternalReference:: __ li(t0, ExternalReference::
...@@ -1685,7 +1685,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1685,7 +1685,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) { if (FLAG_debug_code) {
Label okay; Label okay;
__ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister, __ BranchShort(&okay, ge, kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Unreachable code. // Unreachable code.
__ break_(0xCC); __ break_(0xCC);
...@@ -1923,12 +1923,12 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { ...@@ -1923,12 +1923,12 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Ld(arg_array, MemOperand(sp, 2 * kSystemPointerSize)); __ Ld(arg_array, MemOperand(sp, 2 * kSystemPointerSize));
Label done0, done1; Label done0, done1;
__ Branch(&done0, ne, argc, Operand(zero_reg)); __ BranchShort(&done0, ne, argc, Operand(zero_reg));
__ Move(arg_array, undefined_value); // if argc == 0 __ Move(arg_array, undefined_value); // if argc == 0
__ Move(this_arg, undefined_value); // if argc == 0 __ Move(this_arg, undefined_value); // if argc == 0
__ bind(&done0); // else (i.e., argc > 0) __ bind(&done0); // else (i.e., argc > 0)
__ Branch(&done1, ne, argc, Operand(1)); __ BranchShort(&done1, ne, argc, Operand(1));
__ Move(arg_array, undefined_value); // if argc == 1 __ Move(arg_array, undefined_value); // if argc == 1
__ bind(&done1); // else (i.e., argc > 1) __ bind(&done1); // else (i.e., argc > 1)
...@@ -1951,7 +1951,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { ...@@ -1951,7 +1951,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 3. Tail call with no arguments if argArray is null or undefined. // 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments; Label no_arguments;
__ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments); __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
__ Branch(&no_arguments, eq, arg_array, Operand(undefined_value)); __ BranchShort(&no_arguments, eq, arg_array, Operand(undefined_value));
// 4a. Apply the receiver to the given argArray. // 4a. Apply the receiver to the given argArray.
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
...@@ -1976,7 +1976,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { ...@@ -1976,7 +1976,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// a0: actual number of arguments // a0: actual number of arguments
{ {
Label done; Label done;
__ Branch(&done, ne, a0, Operand(zero_reg)); __ BranchShort(&done, ne, a0, Operand(zero_reg));
__ PushRoot(RootIndex::kUndefinedValue); __ PushRoot(RootIndex::kUndefinedValue);
__ Add64(a0, a0, Operand(1)); __ Add64(a0, a0, Operand(1));
__ bind(&done); __ bind(&done);
...@@ -2018,18 +2018,18 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { ...@@ -2018,18 +2018,18 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Ld(arguments_list, MemOperand(sp, 3 * kSystemPointerSize)); __ Ld(arguments_list, MemOperand(sp, 3 * kSystemPointerSize));
Label done0, done1, done2; Label done0, done1, done2;
__ Branch(&done0, ne, argc, Operand(zero_reg)); __ BranchShort(&done0, ne, argc, Operand(zero_reg));
__ Move(arguments_list, undefined_value); // if argc == 0 __ Move(arguments_list, undefined_value); // if argc == 0
__ Move(this_argument, undefined_value); // if argc == 0 __ Move(this_argument, undefined_value); // if argc == 0
__ Move(target, undefined_value); // if argc == 0 __ Move(target, undefined_value); // if argc == 0
__ bind(&done0); // argc != 0 __ bind(&done0); // argc != 0
__ Branch(&done1, ne, argc, Operand(1)); __ BranchShort(&done1, ne, argc, Operand(1));
__ Move(arguments_list, undefined_value); // if argc == 1 __ Move(arguments_list, undefined_value); // if argc == 1
__ Move(this_argument, undefined_value); // if argc == 1 __ Move(this_argument, undefined_value); // if argc == 1
__ bind(&done1); // argc > 1 __ bind(&done1); // argc > 1
__ Branch(&done2, ne, argc, Operand(2)); __ BranchShort(&done2, ne, argc, Operand(2));
__ Move(arguments_list, undefined_value); // if argc == 2 __ Move(arguments_list, undefined_value); // if argc == 2
__ bind(&done2); // argc > 2 __ bind(&done2); // argc > 2
...@@ -2081,18 +2081,18 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { ...@@ -2081,18 +2081,18 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Ld(new_target, MemOperand(sp, 3 * kSystemPointerSize)); __ Ld(new_target, MemOperand(sp, 3 * kSystemPointerSize));
Label done0, done1, done2; Label done0, done1, done2;
__ Branch(&done0, ne, argc, Operand(zero_reg)); __ BranchShort(&done0, ne, argc, Operand(zero_reg));
__ Move(arguments_list, undefined_value); // if argc == 0 __ Move(arguments_list, undefined_value); // if argc == 0
__ Move(new_target, undefined_value); // if argc == 0 __ Move(new_target, undefined_value); // if argc == 0
__ Move(target, undefined_value); // if argc == 0 __ Move(target, undefined_value); // if argc == 0
__ bind(&done0); __ bind(&done0);
__ Branch(&done1, ne, argc, Operand(1)); __ BranchShort(&done1, ne, argc, Operand(1));
__ Move(arguments_list, undefined_value); // if argc == 1 __ Move(arguments_list, undefined_value); // if argc == 1
__ Move(new_target, target); // if argc == 1 __ Move(new_target, target); // if argc == 1
__ bind(&done1); __ bind(&done1);
__ Branch(&done2, ne, argc, Operand(2)); __ BranchShort(&done2, ne, argc, Operand(2));
__ Move(new_target, target); // if argc == 2 __ Move(new_target, target); // if argc == 2
__ bind(&done2); __ bind(&done2);
...@@ -2137,9 +2137,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, ...@@ -2137,9 +2137,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label ok, fail; Label ok, fail;
__ AssertNotSmi(a2); __ AssertNotSmi(a2);
__ GetObjectType(a2, kScratchReg, kScratchReg); __ GetObjectType(a2, kScratchReg, kScratchReg);
__ Branch(&ok, eq, kScratchReg, Operand(FIXED_ARRAY_TYPE)); __ BranchShort(&ok, eq, kScratchReg, Operand(FIXED_ARRAY_TYPE));
__ Branch(&fail, ne, kScratchReg, Operand(FIXED_DOUBLE_ARRAY_TYPE)); __ BranchShort(&fail, ne, kScratchReg, Operand(FIXED_DOUBLE_ARRAY_TYPE));
__ Branch(&ok, eq, a4, Operand(zero_reg)); __ BranchShort(&ok, eq, a4, Operand(zero_reg));
// Fall through. // Fall through.
__ bind(&fail); __ bind(&fail);
__ Abort(AbortReason::kOperandIsNotAFixedArray); __ Abort(AbortReason::kOperandIsNotAFixedArray);
...@@ -2187,14 +2187,14 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, ...@@ -2187,14 +2187,14 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register hole_value = temps.Acquire(); Register hole_value = temps.Acquire();
__ Add64(src, args, FixedArray::kHeaderSize - kHeapObjectTag); __ Add64(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add64(a0, a0, len); // The 'len' argument for Call() or Construct(). __ Add64(a0, a0, len); // The 'len' argument for Call() or Construct().
__ Branch(&done, eq, len, Operand(zero_reg)); __ BranchShort(&done, eq, len, Operand(zero_reg));
__ Sll64(scratch, len, kTaggedSizeLog2); __ Sll64(scratch, len, kTaggedSizeLog2);
__ Sub64(scratch, sp, Operand(scratch)); __ Sub64(scratch, sp, Operand(scratch));
__ LoadRoot(hole_value, RootIndex::kTheHoleValue); __ LoadRoot(hole_value, RootIndex::kTheHoleValue);
__ bind(&loop); __ bind(&loop);
__ LoadTaggedPointerField(a5, MemOperand(src)); __ LoadTaggedPointerField(a5, MemOperand(src));
__ Add64(src, src, kTaggedSize); __ Add64(src, src, kTaggedSize);
__ Branch(&push, ne, a5, Operand(hole_value)); __ BranchShort(&push, ne, a5, Operand(hole_value));
__ LoadRoot(a5, RootIndex::kUndefinedValue); __ LoadRoot(a5, RootIndex::kUndefinedValue);
__ bind(&push); __ bind(&push);
__ Sd(a5, MemOperand(a7, 0)); __ Sd(a5, MemOperand(a7, 0));
...@@ -2234,7 +2234,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, ...@@ -2234,7 +2234,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
FieldMemOperand(a3, HeapObject::kMapOffset)); FieldMemOperand(a3, HeapObject::kMapOffset));
__ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask)); __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
__ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg)); __ BranchShort(&new_target_constructor, ne, scratch, Operand(zero_reg));
__ bind(&new_target_not_constructor); __ bind(&new_target_not_constructor);
{ {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
...@@ -2307,7 +2307,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, ...@@ -2307,7 +2307,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
} }
} }
} }
__ Branch(&stack_done); __ BranchShort(&stack_done);
__ bind(&stack_overflow); __ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow); __ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&stack_done); __ bind(&stack_done);
...@@ -2478,7 +2478,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { ...@@ -2478,7 +2478,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Add64(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Add64(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop); __ bind(&loop);
__ Sub64(a4, a4, Operand(1)); __ Sub64(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg)); __ BranchShort(&done_loop, lt, a4, Operand(zero_reg));
__ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2); __ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
__ LoadAnyTaggedField(kScratchReg, MemOperand(a5)); __ LoadAnyTaggedField(kScratchReg, MemOperand(a5));
__ Push(kScratchReg); __ Push(kScratchReg);
...@@ -2547,7 +2547,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { ...@@ -2547,7 +2547,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Check if target has a [[Call]] internal method. // Check if target has a [[Call]] internal method.
__ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(scratch, scratch, Operand(Map::Bits1::IsCallableBit::kMask)); __ And(scratch, scratch, Operand(Map::Bits1::IsCallableBit::kMask));
__ Branch(&non_callable, eq, scratch, Operand(zero_reg)); __ BranchShort(&non_callable, eq, scratch, Operand(zero_reg));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
type, Operand(JS_PROXY_TYPE)); type, Operand(JS_PROXY_TYPE));
...@@ -2591,7 +2591,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { ...@@ -2591,7 +2591,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset)); __ Lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
__ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ Branch(&call_generic_stub, eq, a4, Operand(zero_reg)); __ BranchShort(&call_generic_stub, eq, a4, Operand(zero_reg));
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
...@@ -2619,7 +2619,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ...@@ -2619,7 +2619,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
__ CmpTagged(scratch, a1, a3); __ CmpTagged(scratch, a1, a3);
__ Branch(&skip, ne, scratch, Operand(zero_reg)); __ BranchShort(&skip, ne, scratch, Operand(zero_reg));
} }
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
...@@ -2666,7 +2666,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2666,7 +2666,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET, eq, scratch, Operand(JS_BOUND_FUNCTION_TYPE)); RelocInfo::CODE_TARGET, eq, scratch, Operand(JS_BOUND_FUNCTION_TYPE));
// Only dispatch to proxies after checking whether they are constructors. // Only dispatch to proxies after checking whether they are constructors.
__ Branch(&non_proxy, ne, scratch, Operand(JS_PROXY_TYPE)); __ BranchShort(&non_proxy, ne, scratch, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
...@@ -2827,7 +2827,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ...@@ -2827,7 +2827,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Ld(a2, MemOperand(a2)); __ Ld(a2, MemOperand(a2));
__ LoadRoot(a4, RootIndex::kTheHoleValue); __ LoadRoot(a4, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime. // Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, a4, Operand(a2)); __ BranchShort(&okay, eq, a4, Operand(a2));
__ stop(); __ stop();
__ bind(&okay); __ bind(&okay);
} }
...@@ -2880,7 +2880,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ...@@ -2880,7 +2880,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If the handler is a JS frame, restore the context to the frame. Note that // If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (cp == 0) for non-JS frames. // the context will be set to (cp == 0) for non-JS frames.
Label zero; Label zero;
__ Branch(&zero, eq, cp, Operand(zero_reg)); __ BranchShort(&zero, eq, cp, Operand(zero_reg));
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero); __ bind(&zero);
...@@ -2922,7 +2922,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ...@@ -2922,7 +2922,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// If we had no exceptions then set result_reg and we are done. // If we had no exceptions then set result_reg and we are done.
Label error; Label error;
__ Branch(&error, eq, scratch, Operand(zero_reg)); __ BranchShort(&error, eq, scratch, Operand(zero_reg));
__ Move(result_reg, scratch3); __ Move(result_reg, scratch3);
__ Branch(&done); __ Branch(&done);
__ bind(&error); __ bind(&error);
...@@ -2953,7 +2953,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ...@@ -2953,7 +2953,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// If the delta is strictly positive, all bits would be shifted away, // If the delta is strictly positive, all bits would be shifted away,
// which means that we can return 0. // which means that we can return 0.
__ Branch(&normal_exponent, le, result_reg, Operand(zero_reg)); __ BranchShort(&normal_exponent, le, result_reg, Operand(zero_reg));
__ Move(result_reg, zero_reg); __ Move(result_reg, zero_reg);
__ Branch(&done); __ Branch(&done);
...@@ -2970,9 +2970,9 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ...@@ -2970,9 +2970,9 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// We must specially handle shifts greater than 31. // We must specially handle shifts greater than 31.
Label high_shift_needed, high_shift_done; Label high_shift_needed, high_shift_done;
__ Branch(&high_shift_needed, lt, scratch, Operand(32)); __ BranchShort(&high_shift_needed, lt, scratch, Operand(32));
__ Move(input_high, zero_reg); __ Move(input_high, zero_reg);
__ Branch(&high_shift_done); __ BranchShort(&high_shift_done);
__ bind(&high_shift_needed); __ bind(&high_shift_needed);
// Set the implicit 1 before the mantissa part in input_high. // Set the implicit 1 before the mantissa part in input_high.
...@@ -2989,12 +2989,12 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ...@@ -2989,12 +2989,12 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label pos_shift, shift_done, sign_negative; Label pos_shift, shift_done, sign_negative;
__ li(kScratchReg, 32); __ li(kScratchReg, 32);
__ subw(scratch, kScratchReg, scratch); __ subw(scratch, kScratchReg, scratch);
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); __ BranchShort(&pos_shift, ge, scratch, Operand(zero_reg));
// Negate scratch. // Negate scratch.
__ Sub32(scratch, zero_reg, scratch); __ Sub32(scratch, zero_reg, scratch);
__ Sll32(input_low, input_low, scratch); __ Sll32(input_low, input_low, scratch);
__ Branch(&shift_done); __ BranchShort(&shift_done);
__ bind(&pos_shift); __ bind(&pos_shift);
__ srlw(input_low, input_low, scratch); __ srlw(input_low, input_low, scratch);
...@@ -3006,7 +3006,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ...@@ -3006,7 +3006,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
result_reg = sign; result_reg = sign;
sign = no_reg; sign = no_reg;
__ Sub32(result_reg, zero_reg, input_high); __ Sub32(result_reg, zero_reg, input_high);
__ Branch(&sign_negative, ne, scratch, Operand(zero_reg)); __ BranchShort(&sign_negative, ne, scratch, Operand(zero_reg));
__ Move(result_reg, input_high); __ Move(result_reg, input_high);
__ bind(&sign_negative); __ bind(&sign_negative);
...@@ -3060,14 +3060,14 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, ...@@ -3060,14 +3060,14 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
Register scratch = temp.Acquire(); Register scratch = temp.Acquire();
__ li(scratch, ExternalReference::is_profiling_address(isolate)); __ li(scratch, ExternalReference::is_profiling_address(isolate));
__ Lb(scratch, MemOperand(scratch, 0)); __ Lb(scratch, MemOperand(scratch, 0));
__ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg)); __ BranchShort(&profiler_enabled, ne, scratch, Operand(zero_reg));
__ li(scratch, ExternalReference::address_of_runtime_stats_flag()); __ li(scratch, ExternalReference::address_of_runtime_stats_flag());
__ Lw(scratch, MemOperand(scratch, 0)); __ Lw(scratch, MemOperand(scratch, 0));
__ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg)); __ BranchShort(&profiler_enabled, ne, scratch, Operand(zero_reg));
{ {
// Call the api function directly. // Call the api function directly.
__ Move(scratch, function_address); __ Move(scratch, function_address);
__ Branch(&end_profiler_check); __ BranchShort(&end_profiler_check);
} }
__ bind(&profiler_enabled); __ bind(&profiler_enabled);
...@@ -3131,7 +3131,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, ...@@ -3131,7 +3131,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ LoadRoot(a4, RootIndex::kTheHoleValue); __ LoadRoot(a4, RootIndex::kTheHoleValue);
__ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate)); __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
__ Ld(a5, MemOperand(kScratchReg)); __ Ld(a5, MemOperand(kScratchReg));
__ Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); __ BranchShort(&promote_scheduled_exception, ne, a4, Operand(a5));
__ Ret(); __ Ret();
...@@ -3772,7 +3772,7 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { ...@@ -3772,7 +3772,7 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
Label deopt, bailout; Label deopt, bailout;
__ Branch(&deopt, ne, a0, __ BranchShort(&deopt, ne, a0,
Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess))); Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
__ MaybeRestoreRegisters(registers); __ MaybeRestoreRegisters(registers);
......
...@@ -130,7 +130,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index, ...@@ -130,7 +130,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond, Register src1, Condition cond, Register src1,
const Operand& src2) { const Operand& src2) {
Label skip; Label skip;
Branch(&skip, NegateCondition(cond), src1, src2); BranchShort(&skip, NegateCondition(cond), src1, src2);
Ld(destination, Ld(destination,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
bind(&skip); bind(&skip);
...@@ -194,7 +194,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, ...@@ -194,7 +194,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok; Label ok;
DCHECK(!AreAliased(value, dst, scratch, object)); DCHECK(!AreAliased(value, dst, scratch, object));
And(scratch, dst, Operand(kTaggedSize - 1)); And(scratch, dst, Operand(kTaggedSize - 1));
Branch(&ok, eq, scratch, Operand(zero_reg)); BranchShort(&ok, eq, scratch, Operand(zero_reg));
ebreak(); ebreak();
bind(&ok); bind(&ok);
} }
...@@ -2300,28 +2300,28 @@ void TurboAssembler::Clz32(Register rd, Register xx) { ...@@ -2300,28 +2300,28 @@ void TurboAssembler::Clz32(Register rd, Register xx) {
Move(x, xx); Move(x, xx);
li(n, Operand(32)); li(n, Operand(32));
srliw(y, x, 16); srliw(y, x, 16);
Branch(&L0, eq, y, Operand(zero_reg)); BranchShort(&L0, eq, y, Operand(zero_reg));
Move(x, y); Move(x, y);
addiw(n, n, -16); addiw(n, n, -16);
bind(&L0); bind(&L0);
srliw(y, x, 8); srliw(y, x, 8);
Branch(&L1, eq, y, Operand(zero_reg)); BranchShort(&L1, eq, y, Operand(zero_reg));
addiw(n, n, -8); addiw(n, n, -8);
Move(x, y); Move(x, y);
bind(&L1); bind(&L1);
srliw(y, x, 4); srliw(y, x, 4);
Branch(&L2, eq, y, Operand(zero_reg)); BranchShort(&L2, eq, y, Operand(zero_reg));
addiw(n, n, -4); addiw(n, n, -4);
Move(x, y); Move(x, y);
bind(&L2); bind(&L2);
srliw(y, x, 2); srliw(y, x, 2);
Branch(&L3, eq, y, Operand(zero_reg)); BranchShort(&L3, eq, y, Operand(zero_reg));
addiw(n, n, -2); addiw(n, n, -2);
Move(x, y); Move(x, y);
bind(&L3); bind(&L3);
srliw(y, x, 1); srliw(y, x, 1);
subw(rd, n, x); subw(rd, n, x);
Branch(&L4, eq, y, Operand(zero_reg)); BranchShort(&L4, eq, y, Operand(zero_reg));
addiw(rd, n, -2); addiw(rd, n, -2);
bind(&L4); bind(&L4);
} }
...@@ -2349,33 +2349,33 @@ void TurboAssembler::Clz64(Register rd, Register xx) { ...@@ -2349,33 +2349,33 @@ void TurboAssembler::Clz64(Register rd, Register xx) {
Move(x, xx); Move(x, xx);
li(n, Operand(64)); li(n, Operand(64));
srli(y, x, 32); srli(y, x, 32);
Branch(&L0, eq, y, Operand(zero_reg)); BranchShort(&L0, eq, y, Operand(zero_reg));
addiw(n, n, -32); addiw(n, n, -32);
Move(x, y); Move(x, y);
bind(&L0); bind(&L0);
srli(y, x, 16); srli(y, x, 16);
Branch(&L1, eq, y, Operand(zero_reg)); BranchShort(&L1, eq, y, Operand(zero_reg));
addiw(n, n, -16); addiw(n, n, -16);
Move(x, y); Move(x, y);
bind(&L1); bind(&L1);
srli(y, x, 8); srli(y, x, 8);
Branch(&L2, eq, y, Operand(zero_reg)); BranchShort(&L2, eq, y, Operand(zero_reg));
addiw(n, n, -8); addiw(n, n, -8);
Move(x, y); Move(x, y);
bind(&L2); bind(&L2);
srli(y, x, 4); srli(y, x, 4);
Branch(&L3, eq, y, Operand(zero_reg)); BranchShort(&L3, eq, y, Operand(zero_reg));
addiw(n, n, -4); addiw(n, n, -4);
Move(x, y); Move(x, y);
bind(&L3); bind(&L3);
srli(y, x, 2); srli(y, x, 2);
Branch(&L4, eq, y, Operand(zero_reg)); BranchShort(&L4, eq, y, Operand(zero_reg));
addiw(n, n, -2); addiw(n, n, -2);
Move(x, y); Move(x, y);
bind(&L4); bind(&L4);
srli(y, x, 1); srli(y, x, 1);
subw(rd, n, x); subw(rd, n, x);
Branch(&L5, eq, y, Operand(zero_reg)); BranchShort(&L5, eq, y, Operand(zero_reg));
addiw(rd, n, -2); addiw(rd, n, -2);
bind(&L5); bind(&L5);
} }
......
...@@ -1457,7 +1457,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1457,7 +1457,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// output (i.e., kScratchReg < output) // output (i.e., kScratchReg < output)
if (set_overflow_to_min_i32) { if (set_overflow_to_min_i32) {
__ Add32(kScratchReg, i.OutputRegister(), 1); __ Add32(kScratchReg, i.OutputRegister(), 1);
__ Branch(&done, lt, i.OutputRegister(), Operand(kScratchReg)); __ BranchShort(&done, lt, i.OutputRegister(), Operand(kScratchReg));
__ Move(i.OutputRegister(), kScratchReg); __ Move(i.OutputRegister(), kScratchReg);
__ bind(&done); __ bind(&done);
} }
...@@ -1475,7 +1475,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1475,7 +1475,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Trunc_l_d(i.OutputRegister(), i.InputDoubleRegister(0), result); __ Trunc_l_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
if (set_overflow_to_min_i64) { if (set_overflow_to_min_i64) {
__ Add64(kScratchReg, i.OutputRegister(), 1); __ Add64(kScratchReg, i.OutputRegister(), 1);
__ Branch(&done, lt, i.OutputRegister(), Operand(kScratchReg)); __ BranchShort(&done, lt, i.OutputRegister(), Operand(kScratchReg));
__ Move(i.OutputRegister(), kScratchReg); __ Move(i.OutputRegister(), kScratchReg);
__ bind(&done); __ bind(&done);
} }
...@@ -2516,7 +2516,7 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -2516,7 +2516,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Ld(kScratchReg, MemOperand(kScratchReg)); __ Ld(kScratchReg, MemOperand(kScratchReg));
__ Add64(kScratchReg, kScratchReg, __ Add64(kScratchReg, kScratchReg,
Operand(required_slots * kSystemPointerSize)); Operand(required_slots * kSystemPointerSize));
__ Branch(&done, uge, sp, Operand(kScratchReg)); __ BranchShort(&done, uge, sp, Operand(kScratchReg));
} }
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
...@@ -2631,7 +2631,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { ...@@ -2631,7 +2631,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots > 1) { if (parameter_slots > 1) {
Label done; Label done;
__ li(kScratchReg, parameter_slots); __ li(kScratchReg, parameter_slots);
__ Branch(&done, ge, t0, Operand(kScratchReg)); __ BranchShort(&done, ge, t0, Operand(kScratchReg));
__ Move(t0, kScratchReg); __ Move(t0, kScratchReg);
__ bind(&done); __ bind(&done);
} }
......
...@@ -161,7 +161,7 @@ void RegExpMacroAssemblerRISCV::Backtrack() { ...@@ -161,7 +161,7 @@ void RegExpMacroAssemblerRISCV::Backtrack() {
__ Ld(a0, MemOperand(frame_pointer(), kBacktrackCount)); __ Ld(a0, MemOperand(frame_pointer(), kBacktrackCount));
__ Add64(a0, a0, Operand(1)); __ Add64(a0, a0, Operand(1));
__ Sd(a0, MemOperand(frame_pointer(), kBacktrackCount)); __ Sd(a0, MemOperand(frame_pointer(), kBacktrackCount));
__ Branch(&next, ne, a0, Operand(backtrack_limit())); __ BranchShort(&next, ne, a0, Operand(backtrack_limit()));
// Backtrack limit exceeded. // Backtrack limit exceeded.
if (can_fallback()) { if (can_fallback()) {
...@@ -213,7 +213,7 @@ void RegExpMacroAssemblerRISCV::CheckCharacterLT(uc16 limit, Label* on_less) { ...@@ -213,7 +213,7 @@ void RegExpMacroAssemblerRISCV::CheckCharacterLT(uc16 limit, Label* on_less) {
void RegExpMacroAssemblerRISCV::CheckGreedyLoop(Label* on_equal) { void RegExpMacroAssemblerRISCV::CheckGreedyLoop(Label* on_equal) {
Label backtrack_non_equal; Label backtrack_non_equal;
__ Lw(a0, MemOperand(backtrack_stackpointer(), 0)); __ Lw(a0, MemOperand(backtrack_stackpointer(), 0));
__ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0)); __ BranchShort(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
__ Add64(backtrack_stackpointer(), backtrack_stackpointer(), __ Add64(backtrack_stackpointer(), backtrack_stackpointer(),
Operand(kIntSize)); Operand(kIntSize));
__ bind(&backtrack_non_equal); __ bind(&backtrack_non_equal);
...@@ -230,7 +230,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase( ...@@ -230,7 +230,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase(
// At this point, the capture registers are either both set or both cleared. // At this point, the capture registers are either both set or both cleared.
// If the capture length is zero, then the capture is either empty or cleared. // If the capture length is zero, then the capture is either empty or cleared.
// Fall through in both cases. // Fall through in both cases.
__ Branch(&fallthrough, eq, a1, Operand(zero_reg)); __ BranchShort(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) { if (read_backward) {
__ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
...@@ -267,20 +267,20 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase( ...@@ -267,20 +267,20 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase(
__ Lbu(a4, MemOperand(a2, 0)); __ Lbu(a4, MemOperand(a2, 0));
__ addi(a2, a2, char_size()); __ addi(a2, a2, char_size());
__ Branch(&loop_check, eq, a4, Operand(a3)); __ BranchShort(&loop_check, eq, a4, Operand(a3));
// Mismatch, try case-insensitive match (converting letters to lower-case). // Mismatch, try case-insensitive match (converting letters to lower-case).
__ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case. __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case.
__ Or(a4, a4, Operand(0x20)); // Also convert input character. __ Or(a4, a4, Operand(0x20)); // Also convert input character.
__ Branch(&fail, ne, a4, Operand(a3)); __ BranchShort(&fail, ne, a4, Operand(a3));
__ Sub64(a3, a3, Operand('a')); __ Sub64(a3, a3, Operand('a'));
__ Branch(&loop_check, Uless_equal, a3, Operand('z' - 'a')); __ BranchShort(&loop_check, Uless_equal, a3, Operand('z' - 'a'));
// Latin-1: Check for values in range [224,254] but not 247. // Latin-1: Check for values in range [224,254] but not 247.
__ Sub64(a3, a3, Operand(224 - 'a')); __ Sub64(a3, a3, Operand(224 - 'a'));
// Weren't Latin-1 letters. // Weren't Latin-1 letters.
__ Branch(&fail, Ugreater, a3, Operand(254 - 224)); __ BranchShort(&fail, Ugreater, a3, Operand(254 - 224));
// Check for 247. // Check for 247.
__ Branch(&fail, eq, a3, Operand(247 - 224)); __ BranchShort(&fail, eq, a3, Operand(247 - 224));
__ bind(&loop_check); __ bind(&loop_check);
__ Branch(&loop, lt, a0, Operand(a1)); __ Branch(&loop, lt, a0, Operand(a1));
...@@ -374,7 +374,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReference(int start_reg, ...@@ -374,7 +374,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReference(int start_reg,
// At this point, the capture registers are either both set or both cleared. // At this point, the capture registers are either both set or both cleared.
// If the capture length is zero, then the capture is either empty or cleared. // If the capture length is zero, then the capture is either empty or cleared.
// Fall through in both cases. // Fall through in both cases.
__ Branch(&fallthrough, eq, a1, Operand(zero_reg)); __ BranchShort(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) { if (read_backward) {
__ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
...@@ -489,10 +489,10 @@ bool RegExpMacroAssemblerRISCV::CheckSpecialCharacterClass(uc16 type, ...@@ -489,10 +489,10 @@ bool RegExpMacroAssemblerRISCV::CheckSpecialCharacterClass(uc16 type,
if (mode_ == LATIN1) { if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0. // One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success; Label success;
__ Branch(&success, eq, current_character(), Operand(' ')); __ BranchShort(&success, eq, current_character(), Operand(' '));
// Check range 0x09..0x0D. // Check range 0x09..0x0D.
__ Sub64(a0, current_character(), Operand('\t')); __ Sub64(a0, current_character(), Operand('\t'));
__ Branch(&success, Uless_equal, a0, Operand('\r' - '\t')); __ BranchShort(&success, Uless_equal, a0, Operand('\r' - '\t'));
// \u00a0 (NBSP). // \u00a0 (NBSP).
BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t')); BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t'));
__ bind(&success); __ bind(&success);
...@@ -564,7 +564,7 @@ bool RegExpMacroAssemblerRISCV::CheckSpecialCharacterClass(uc16 type, ...@@ -564,7 +564,7 @@ bool RegExpMacroAssemblerRISCV::CheckSpecialCharacterClass(uc16 type,
Label done; Label done;
if (mode_ != LATIN1) { if (mode_ != LATIN1) {
// Table is 256 entries, so all Latin1 characters can be tested. // Table is 256 entries, so all Latin1 characters can be tested.
__ Branch(&done, Ugreater, current_character(), Operand('z')); __ BranchShort(&done, Ugreater, current_character(), Operand('z'));
} }
ExternalReference map = ExternalReference map =
ExternalReference::re_word_character_map(isolate()); ExternalReference::re_word_character_map(isolate());
...@@ -665,11 +665,11 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) { ...@@ -665,11 +665,11 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ Ld(a0, MemOperand(a0)); __ Ld(a0, MemOperand(a0));
__ Sub64(a0, sp, a0); __ Sub64(a0, sp, a0);
// Handle it if the stack pointer is already below the stack limit. // Handle it if the stack pointer is already below the stack limit.
__ Branch(&stack_limit_hit, le, a0, Operand(zero_reg)); __ BranchShort(&stack_limit_hit, le, a0, Operand(zero_reg));
// Check if there is room for the variable number of registers above // Check if there is room for the variable number of registers above
// the stack limit. // the stack limit.
__ Branch(&stack_ok, Ugreater_equal, a0, __ BranchShort(&stack_ok, Ugreater_equal, a0,
Operand(num_registers_ * kSystemPointerSize)); Operand(num_registers_ * kSystemPointerSize));
// Exit with OutOfMemory exception. There is not enough space on the stack // Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers. // for our working registers.
__ li(a0, Operand(EXCEPTION)); __ li(a0, Operand(EXCEPTION));
...@@ -704,7 +704,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) { ...@@ -704,7 +704,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
Label load_char_start_regexp, start_regexp; Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise. // Load newline if index is at start, previous character otherwise.
__ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg)); __ BranchShort(&load_char_start_regexp, ne, a1, Operand(zero_reg));
__ li(current_character(), Operand('\n')); __ li(current_character(), Operand('\n'));
__ jmp(&start_regexp); __ jmp(&start_regexp);
...@@ -797,7 +797,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) { ...@@ -797,7 +797,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// output registers is reduced by the number of stored captures. // output registers is reduced by the number of stored captures.
__ Sub64(a1, a1, num_saved_registers_); __ Sub64(a1, a1, num_saved_registers_);
// Check whether we have enough room for another set of capture results. // Check whether we have enough room for another set of capture results.
__ Branch(&return_a0, lt, a1, Operand(num_saved_registers_)); __ BranchShort(&return_a0, lt, a1, Operand(num_saved_registers_));
__ Sd(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); __ Sd(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output. // Advance the location for output.
...@@ -814,8 +814,8 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) { ...@@ -814,8 +814,8 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ Branch(&load_char_start_regexp, ne, current_input_offset(), __ Branch(&load_char_start_regexp, ne, current_input_offset(),
Operand(s3)); Operand(s3));
// Offset from the end is zero if we already reached the end. // Offset from the end is zero if we already reached the end.
__ Branch(&exit_label_, eq, current_input_offset(), __ BranchShort(&exit_label_, eq, current_input_offset(),
Operand(zero_reg)); Operand(zero_reg));
// Advance current position after a zero-length match. // Advance current position after a zero-length match.
Label advance; Label advance;
__ bind(&advance); __ bind(&advance);
...@@ -894,7 +894,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) { ...@@ -894,7 +894,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ MultiPop(regexp_registers); __ MultiPop(regexp_registers);
// If return nullptr, we have failed to grow the stack, and // If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception. // must exit with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, a0, Operand(zero_reg)); __ BranchShort(&exit_with_exception, eq, a0, Operand(zero_reg));
// Otherwise use return value as new stack pointer. // Otherwise use return value as new stack pointer.
__ mv(backtrack_stackpointer(), a0); __ mv(backtrack_stackpointer(), a0);
// Restore saved registers and continue. // Restore saved registers and continue.
...@@ -976,7 +976,7 @@ void RegExpMacroAssemblerRISCV::PushBacktrack(Label* label) { ...@@ -976,7 +976,7 @@ void RegExpMacroAssemblerRISCV::PushBacktrack(Label* label) {
} else { } else {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label after_constant; Label after_constant;
__ Branch(&after_constant); __ BranchShort(&after_constant);
int offset = masm_->pc_offset(); int offset = masm_->pc_offset();
int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag; int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
__ emit(0); __ emit(0);
...@@ -1016,8 +1016,8 @@ void RegExpMacroAssemblerRISCV::ReadStackPointerFromRegister(int reg) { ...@@ -1016,8 +1016,8 @@ void RegExpMacroAssemblerRISCV::ReadStackPointerFromRegister(int reg) {
void RegExpMacroAssemblerRISCV::SetCurrentPositionFromEnd(int by) { void RegExpMacroAssemblerRISCV::SetCurrentPositionFromEnd(int by) {
Label after_position; Label after_position;
__ Branch(&after_position, ge, current_input_offset(), __ BranchShort(&after_position, ge, current_input_offset(),
Operand(-by * char_size())); Operand(-by * char_size()));
__ li(current_input_offset(), -by * char_size()); __ li(current_input_offset(), -by * char_size());
// On RegExp code entry (where this operation is used), the character before // On RegExp code entry (where this operation is used), the character before
// the current position is expected to be already loaded. // the current position is expected to be already loaded.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment