Commit 95f8ac49 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

s390x: unify constant load and LoadRR as mov

Change-Id: I6d7e263b84d6871cb13cb01b2b51299b9249d961
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2586994Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#71720}
parent e677c91f
......@@ -52,7 +52,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
__ CallRuntime(function_id, 1);
__ LoadRR(r4, r2);
__ mov(r4, r2);
// Restore target function, new target and actual argument count.
__ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
......@@ -204,7 +204,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// receiver. The second copy is pushed after the arguments, we saved in r6
// since r0 needs to store the number of arguments before
// InvokingFunction.
__ LoadRR(r8, r2);
__ mov(r8, r2);
// Set up pointer to first argument (skip receiver).
__ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
......@@ -391,7 +391,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
__ LoadRR(r8, r5);
__ mov(r8, r5);
__ bind(&loop);
__ SubP(r8, r8, Operand(1));
......@@ -432,8 +432,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
__ LoadRR(r5, r3);
__ LoadRR(r3, r6);
__ mov(r5, r3);
__ mov(r3, r6);
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ JumpCodeObject(r4);
......@@ -543,11 +543,11 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Initialize the root register.
// C calling convention. The first argument is passed in r2.
__ LoadRR(kRootRegister, r2);
__ mov(kRootRegister, r2);
}
// save r6 to r1
__ LoadRR(r1, r6);
__ mov(r1, r6);
// Push a frame with special values setup to mark it as an entry frame.
// Bad FP (-1)
......@@ -559,7 +559,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
pushed_stack_space += 5 * kSystemPointerSize;
// Push a bad frame pointer to fail if it is used.
__ LoadImmP(r9, Operand(-1));
__ mov(r9, Operand(-1));
__ mov(r8, Operand(StackFrame::TypeToMarker(type)));
__ mov(r7, Operand(StackFrame::TypeToMarker(type)));
......@@ -580,7 +580,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
// restore r6
__ LoadRR(r6, r1);
__ mov(r6, r1);
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
......@@ -719,7 +719,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv
// r0,r2,r7-r9, cp may be clobbered
__ LoadRR(r2, r6);
__ mov(r2, r6);
// Load argv from the stack.
__ LoadP(
r6, MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
......@@ -789,8 +789,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(r5);
// Setup new.target, argc and function.
__ LoadRR(r5, r3);
__ LoadRR(r3, r4);
__ mov(r5, r3);
__ mov(r3, r4);
// r2: argc
// r3: function
// r5: new.target
......@@ -798,10 +798,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(r4, RootIndex::kUndefinedValue);
__ LoadRR(r6, r4);
__ LoadRR(r7, r6);
__ LoadRR(r8, r6);
__ LoadRR(r9, r6);
__ mov(r6, r4);
__ mov(r7, r6);
__ mov(r8, r6);
__ mov(r9, r6);
// Invoke the code.
Handle<Code> builtin = is_construct
......@@ -831,7 +831,7 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
// r2: root_register_value
// r3: microtask_queue
__ LoadRR(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r3);
__ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r3);
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
......@@ -843,7 +843,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
// Store code entry in the closure.
__ StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ LoadRR(scratch1,
__ mov(scratch1,
optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
......@@ -873,7 +873,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Label corrected_args_count;
__ CmpP(params_size, actual_params_size);
__ bge(&corrected_args_count);
__ LoadRR(params_size, actual_params_size);
__ mov(params_size, actual_params_size);
__ bind(&corrected_args_count);
#endif
......@@ -1141,7 +1141,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ lghi(r1, Operand(0));
__ mov(r1, Operand(0));
__ StoreU16(r1,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
......@@ -1175,7 +1175,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ ShiftRightP(r4, r4, Operand(kSystemPointerSizeLog2));
__ LoadAndTestP(r4, r4);
__ beq(&no_args);
__ LoadRR(r1, r4);
__ mov(r1, r4);
__ bind(&loop);
__ push(kInterpreterAccumulatorRegister);
__ SubP(r1, Operand(1));
......@@ -1338,7 +1338,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver. Argument count is correct.
__ LoadRR(r5, r2);
__ mov(r5, r2);
}
// Push the arguments.
......@@ -1573,7 +1573,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
Register scratch = ip;
if (with_result) {
if (java_script_builtin) {
__ LoadRR(scratch, r2);
__ mov(scratch, r2);
} else {
// Overwrite the hole inserted by the deoptimizer with the return value
// from the LAZY deopt point.
......@@ -1614,7 +1614,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ AddP(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(r0);
__ LoadRR(r14, r0);
__ mov(r14, r0);
__ LoadEntryFromBuiltinIndex(builtin);
__ Jump(builtin);
}
......@@ -1682,7 +1682,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ AddP(r2, r3);
__ AddP(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ LoadRR(r14, r0);
__ mov(r14, r0);
// And "return" to the OSR entry point of the function.
__ Ret();
......@@ -1702,7 +1702,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// present) instead.
{
__ LoadRoot(r7, RootIndex::kUndefinedValue);
__ LoadRR(r4, r7);
__ mov(r4, r7);
Label done;
__ LoadP(r3, MemOperand(sp)); // receiver
......@@ -1742,7 +1742,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
__ LoadImmP(r2, Operand::Zero());
__ mov(r2, Operand::Zero());
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
......@@ -1784,8 +1784,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// thisArgument (if present) instead.
{
__ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(r7, r3);
__ LoadRR(r4, r3);
__ mov(r7, r3);
__ mov(r4, r3);
Label done;
......@@ -1835,15 +1835,15 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// (if present) instead.
{
__ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(r4, r3);
__ mov(r4, r3);
Label done;
__ LoadRR(r6, r3);
__ mov(r6, r3);
__ cghi(r2, Operand(1));
__ blt(&done);
__ LoadP(r3, MemOperand(sp, kSystemPointerSize)); // thisArg
__ LoadRR(r5, r3);
__ mov(r5, r3);
__ cghi(r2, Operand(2));
__ blt(&done);
__ LoadP(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
......@@ -1957,11 +1957,11 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
{
Label copy, check;
Register num = ip, src = r8, dest = r7;
__ LoadRR(src, sp);
__ mov(src, sp);
__ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, r1);
// Update stack pointer.
__ LoadRR(dest, sp);
__ mov(dest, sp);
__ ltgr(num, r2);
__ b(&check);
__ bind(&copy);
......@@ -1981,7 +1981,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ beq(&no_args);
__ AddP(r4, r4,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ LoadRR(r1, r6);
__ mov(r1, r6);
__ bind(&loop);
__ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
__ la(r4, MemOperand(r4, kTaggedSize));
......@@ -2037,7 +2037,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
#ifdef V8_NO_ARGUMENTS_ADAPTOR
// TODO(victorgomes): Remove this copy when all the arguments adaptor frame
// code is erased.
__ LoadRR(r6, fp);
__ mov(r6, fp);
__ LoadP(r7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
#else
// Check if we have an arguments adaptor frame below the function frame.
......@@ -2055,7 +2055,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ LoadU16(
r7,
FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
__ LoadRR(r6, fp);
__ mov(r6, fp);
}
__ b(&arguments_done);
__ bind(&arguments_adaptor);
......@@ -2085,7 +2085,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ StackOverflowCheck(r7, scratch, &stack_overflow);
// Forward the arguments from the caller frame.
__ LoadRR(r5, r5);
__ mov(r5, r5);
// Point to the first argument to copy (skipping the receiver).
__ AddP(r6, r6,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
......@@ -2099,11 +2099,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label copy, check;
Register num = r1, src = ip,
dest = r4; // r7 and r10 are context and root.
__ LoadRR(src, sp);
__ mov(src, sp);
// Update stack pointer.
__ ShiftLeftP(scratch, r7, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, scratch);
__ LoadRR(dest, sp);
__ mov(dest, sp);
__ ltgr(num, r2);
__ b(&check);
__ bind(&copy);
......@@ -2208,12 +2208,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r2);
__ Push(r2, r3);
__ LoadRR(r2, r5);
__ mov(r2, r5);
__ Push(cp);
__ Call(BUILTIN_CODE(masm->isolate(), ToObject),
RelocInfo::CODE_TARGET);
__ Pop(cp);
__ LoadRR(r5, r2);
__ mov(r5, r2);
__ Pop(r2, r3);
__ SmiUntag(r2);
}
......@@ -2611,7 +2611,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point.
__ bind(&invoke);
__ LoadRR(r2, r4);
__ mov(r2, r4);
// r2 : expected number of arguments
// r3 : function (passed through to callee)
// r5 : new target (passed through to callee)
......@@ -2674,7 +2674,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ LoadSmiLiteral(cp, Smi::zero());
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
// The entrypoint address is the return value.
__ LoadRR(ip, r2);
__ mov(ip, r2);
// Restore registers.
__ MultiPopDoubles(fp_regs);
......@@ -2719,11 +2719,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// r4: pointer to the first argument
__ LoadRR(r7, r3);
__ mov(r7, r3);
if (argv_mode == kArgvInRegister) {
// Move argv into the correct register.
__ LoadRR(r3, r4);
__ mov(r3, r4);
} else {
// Compute the argv pointer.
__ ShiftLeftP(r3, r2, Operand(kSystemPointerSizeLog2));
......@@ -2753,8 +2753,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Store a copy of argc, argv in callee-saved registers for later.
__ LoadRR(r6, r2);
__ LoadRR(r8, r3);
__ mov(r6, r2);
__ mov(r8, r3);
// r2, r6: number of arguments including receiver (C callee-saved)
// r3, r8: pointer to the first argument
// r7: pointer to builtin function (C callee-saved)
......@@ -2767,14 +2767,14 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Use frame storage reserved by calling function to pass return
// buffer as implicit first argument in R2. Shfit original parameters
// by one register each.
__ LoadRR(r4, r3);
__ LoadRR(r3, r2);
__ mov(r4, r3);
__ mov(r3, r2);
__ la(r2,
MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
isolate_reg = r5;
// Clang doesn't preserve r2 (result buffer)
// write to r8 (preserved) before entry
__ LoadRR(r8, r2);
__ mov(r8, r2);
}
// Call C built-in.
__ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
......@@ -2783,7 +2783,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
__ LoadRR(r2, r8);
__ mov(r2, r8);
__ LoadP(r3, MemOperand(r2, kSystemPointerSize));
__ LoadP(r2, MemOperand(r2));
}
......@@ -2840,8 +2840,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, 0, r2);
__ LoadImmP(r2, Operand::Zero());
__ LoadImmP(r3, Operand::Zero());
__ mov(r2, Operand::Zero());
__ mov(r3, Operand::Zero());
__ Move(r4, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(find_handler, 3);
}
......@@ -3032,7 +3032,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bne(&profiler_enabled, Label::kNear);
{
// Call the api function directly.
__ LoadRR(scratch, function_address);
__ mov(scratch, function_address);
__ b(&end_profiler_check, Label::kNear);
}
__ bind(&profiler_enabled);
......@@ -3104,11 +3104,11 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
__ StoreU64(r7, MemOperand(r9, kLimitOffset));
__ LoadRR(r6, r2);
__ mov(r6, r2);
__ PrepareCallCFunction(1, r7);
__ Move(r2, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
__ LoadRR(r2, r6);
__ mov(r2, r6);
__ b(&leave_exit_frame, Label::kNear);
}
......@@ -3177,7 +3177,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
__ LoadRR(scratch, sp);
__ mov(scratch, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
......@@ -3281,7 +3281,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ LoadRR(r2, sp); // r2 = Handle<Name>
__ mov(r2, sp); // r2 = Handle<Name>
__ AddP(r3, r2, Operand(1 * kSystemPointerSize)); // r3 = v8::PCI::args_
// If ABI passes Handles (pointer-sized struct) in a register:
......@@ -3379,26 +3379,26 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
static constexpr int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
__ lgfi(r4, Operand(Deoptimizer::kFixedExitSizeMarker));
__ mov(r4, Operand(Deoptimizer::kFixedExitSizeMarker));
// Cleanse the Return address for 31-bit
__ CleanseP(r14);
// Get the address of the location in the code object (r5)(return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r6.
__ LoadRR(r5, r14);
__ mov(r5, r14);
__ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
__ SubP(r6, fp, r6);
// Allocate a new deoptimizer object.
// Pass six arguments in r2 to r7.
__ PrepareCallCFunction(6, r7);
__ LoadImmP(r2, Operand::Zero());
__ mov(r2, Operand::Zero());
Label context_check;
__ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r3, &context_check);
__ LoadP(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ LoadImmP(r3, Operand(static_cast<int>(deopt_kind)));
__ mov(r3, Operand(static_cast<int>(deopt_kind)));
// r4: bailout id already loaded.
// r5: code address or 0 already loaded.
// r6: Fp-to-sp delta.
......@@ -3537,7 +3537,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ push(r8);
// Restore the registers from the last output frame.
__ LoadRR(r1, r4);
__ mov(r1, r4);
for (int i = kNumberOfRegisters - 1; i > 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
......
......@@ -307,7 +307,7 @@ void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
LoadRR(destination, kRootRegister);
mov(destination, kRootRegister);
} else if (is_uint12(offset)) {
la(destination, MemOperand(kRootRegister, offset));
} else {
......@@ -485,7 +485,7 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) {
void TurboAssembler::Move(Register dst, Register src, Condition cond) {
if (dst != src) {
if (cond == al) {
LoadRR(dst, src);
mov(dst, src);
} else {
LoadOnConditionP(cond, dst, src);
}
......@@ -560,7 +560,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
DCHECK_NE(scratch2, r0);
ShiftLeftP(scratch, size, Operand(kSystemPointerSizeLog2));
lay(scratch, MemOperand(array, scratch));
LoadRR(scratch2, array);
mov(scratch2, array);
bind(&loop);
CmpP(scratch2, scratch);
bge(&done);
......@@ -1270,7 +1270,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment));
}
LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
LoadRR(sp, r1);
mov(sp, r1);
int frame_ends = pc_offset();
return frame_ends;
}
......@@ -1457,7 +1457,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register tmp_reg = scratch1;
Label loop;
AddP(tmp_reg, callee_args_count, Operand(1)); // +1 for receiver
LoadRR(r1, tmp_reg);
mov(r1, tmp_reg);
bind(&loop);
LoadP(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
StoreU64(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
......@@ -1466,7 +1466,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
BranchOnCount(r1, &loop);
// Leave current frame.
LoadRR(sp, dst_reg);
mov(sp, dst_reg);
}
MemOperand MacroAssembler::StackLimitAsMemOperand(StackLimitKind kind) {
......@@ -1532,12 +1532,12 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
{
Label copy, check;
Register num = r7, src = r8, dest = ip; // r7 and r8 are context and root.
LoadRR(src, sp);
mov(src, sp);
// Update stack pointer.
ShiftLeftP(scratch, expected_parameter_count,
Operand(kSystemPointerSizeLog2));
SubP(sp, sp, scratch);
LoadRR(dest, sp);
mov(dest, sp);
ltgr(num, actual_parameter_count);
b(&check);
bind(&copy);
......@@ -1781,7 +1781,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
Label* on_in_range) {
if (lower_limit != 0) {
Register scratch = r0;
LoadRR(scratch, value);
mov(scratch, value);
slgfi(scratch, Operand(lower_limit));
CmpLogicalP(scratch, Operand(higher_limit - lower_limit));
} else {
......@@ -2093,7 +2093,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for stack arguments
// -- preserving original value of sp.
LoadRR(scratch, sp);
mov(scratch, sp);
lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp,
......@@ -2279,15 +2279,13 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
UNREACHABLE();
}
void TurboAssembler::mov(Register dst, Register src) { lgr(dst, src); }
void TurboAssembler::mov(Register dst, const Operand& src) {
#if V8_TARGET_ARCH_S390X
int64_t value;
#else
int value;
#endif
int64_t value = 0;
if (src.is_heap_object_request()) {
RequestHeapObject(src.heap_object_request());
value = 0;
} else {
value = src.immediate();
}
......@@ -2297,15 +2295,35 @@ void TurboAssembler::mov(Register dst, const Operand& src) {
RecordRelocInfo(src.rmode(), value);
}
#if V8_TARGET_ARCH_S390X
int32_t hi_32 = static_cast<int64_t>(value) >> 32;
int32_t hi_32 = static_cast<int32_t>(value >> 32);
int32_t lo_32 = static_cast<int32_t>(value);
if (src.rmode() == RelocInfo::NONE) {
if (hi_32 == 0) {
if (is_uint16(lo_32)) {
llill(dst, Operand(lo_32));
return;
}
llilf(dst, Operand(lo_32));
return;
} else if (lo_32 == 0) {
if (is_uint16(hi_32)) {
llihl(dst, Operand(hi_32));
return;
}
llihf(dst, Operand(hi_32));
return;
} else if (is_int16(value)) {
lghi(dst, Operand(value));
return;
} else if (is_int32(value)) {
lgfi(dst, Operand(value));
return;
}
}
iihf(dst, Operand(hi_32));
iilf(dst, Operand(lo_32));
#else
iilf(dst, Operand(value));
#endif
}
void TurboAssembler::Mul32(Register dst, const MemOperand& src1) {
......@@ -2691,7 +2709,7 @@ void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
AddPImm_RRI(dst, src, opnd);
return;
}
LoadRR(dst, src);
mov(dst, src);
}
AddP(dst, opnd);
}
......@@ -2740,7 +2758,7 @@ void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
AddP_RRR(dst, src1, src2);
return;
} else {
LoadRR(dst, src1);
mov(dst, src1);
}
} else if (dst == src2) {
src2 = src1;
......@@ -2761,7 +2779,7 @@ void TurboAssembler::AddP_ExtendSrc(Register dst, Register src1,
lgfr(dst, src2);
agr(dst, src1);
} else {
if (dst != src1) LoadRR(dst, src1);
if (dst != src1) mov(dst, src1);
agfr(dst, src2);
}
#else
......@@ -2960,7 +2978,7 @@ void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
SubP_RRR(dst, src1, src2);
return;
}
if (dst != src1 && dst != src2) LoadRR(dst, src1);
if (dst != src1 && dst != src2) mov(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
if (dst != src1 && dst == src2) {
Label done;
......@@ -2980,7 +2998,7 @@ void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
void TurboAssembler::SubP_ExtendSrc(Register dst, Register src1,
Register src2) {
#if V8_TARGET_ARCH_S390X
if (dst != src1 && dst != src2) LoadRR(dst, src1);
if (dst != src1 && dst != src2) mov(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
if (dst != src1 && dst == src2) {
......@@ -3118,7 +3136,7 @@ void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
AndP_RRR(dst, src1, src2);
return;
} else {
LoadRR(dst, src1);
mov(dst, src1);
}
} else if (dst == src2) {
src2 = src1;
......@@ -3205,7 +3223,7 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
}
// If we are &'ing zero, we can just whack the dst register and skip copy
if (dst != src && (0 != value)) LoadRR(dst, src);
if (dst != src && (0 != value)) mov(dst, src);
AndP(dst, opnd);
}
......@@ -3241,7 +3259,7 @@ void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
OrP_RRR(dst, src1, src2);
return;
} else {
LoadRR(dst, src1);
mov(dst, src1);
}
} else if (dst == src2) {
src2 = src1;
......@@ -3293,7 +3311,7 @@ void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) {
// OR Pointer Size - dst = src & imm
void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) {
if (dst != src) LoadRR(dst, src);
if (dst != src) mov(dst, src);
OrP(dst, opnd);
}
......@@ -3329,7 +3347,7 @@ void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
XorP_RRR(dst, src1, src2);
return;
} else {
LoadRR(dst, src1);
mov(dst, src1);
}
} else if (dst == src2) {
src2 = src1;
......@@ -3378,7 +3396,7 @@ void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) {
// XOR Pointer Size - dst = src & imm
void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) {
if (dst != src) LoadRR(dst, src);
if (dst != src) mov(dst, src);
XorP(dst, opnd);
}
......@@ -3673,7 +3691,7 @@ void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
mvhi(mem, opnd);
#endif
} else {
LoadImmP(scratch, opnd);
mov(scratch, opnd);
StoreU64(scratch, mem);
}
}
......@@ -4299,7 +4317,7 @@ void TurboAssembler::ClearRightImm(Register dst, Register src,
uint64_t hexMask = ~((1L << numBitsToClear) - 1);
// S390 AND instr clobbers source. Make a copy if necessary
if (dst != src) LoadRR(dst, src);
if (dst != src) mov(dst, src);
if (numBitsToClear <= 16) {
nill(dst, Operand(static_cast<uint16_t>(hexMask)));
......@@ -4342,16 +4360,16 @@ void TurboAssembler::Popcnt64(Register dst, Register src) {
void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
if (src == dst) return;
DCHECK(!AreAliased(src, dst, scratch));
LoadRR(scratch, src);
LoadRR(src, dst);
LoadRR(dst, scratch);
mov(scratch, src);
mov(src, dst);
mov(dst, scratch);
}
void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch));
if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
DCHECK(!AreAliased(src, scratch));
LoadRR(scratch, src);
mov(scratch, src);
LoadP(src, dst);
StoreU64(scratch, dst);
}
......
......@@ -73,9 +73,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
#define XorP_RRR xgrk
// Load / Store
#define LoadRR lgr
#define LoadAndTestRR ltgr
#define LoadImmP lghi
// Compare
#define CmpPH cghi
......@@ -110,9 +108,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
#define XorP_RRR xrk
// Load / Store
#define LoadRR lr
#define LoadAndTestRR ltr
#define LoadImmP lhi
// Compare
#define CmpPH chi
......@@ -504,6 +500,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#endif
void mov(Register dst, const Operand& src);
void mov(Register dst, Register src);
void CleanUInt32(Register x) {
#ifdef V8_TARGET_ARCH_S390X
......@@ -899,7 +896,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
else if (dst != src) // If we didn't shift, we might need to copy
LoadRR(dst, src);
mov(dst, src);
int width = rangeStart - rangeEnd + 1;
#if V8_TARGET_ARCH_S390X
uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
......
......@@ -602,7 +602,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
// Copy remainder to output reg
#define ASSEMBLE_MODULO(div_instr, shift_instr) \
do { \
__ LoadRR(r0, i.InputRegister(0)); \
__ mov(r0, i.InputRegister(0)); \
__ shift_instr(r0, Operand(32)); \
__ div_instr(r0, i.InputRegister(1)); \
__ LoadU32(i.OutputRegister(), r0); \
......@@ -1169,8 +1169,8 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ LoadImmP(kSpeculationPoisonRegister, Operand::Zero());
__ LoadImmP(r0, Operand(-1));
__ mov(kSpeculationPoisonRegister, Operand::Zero());
__ mov(r0, Operand(-1));
__ CmpP(kJavaScriptCallCodeStartRegister, scratch);
__ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
}
......@@ -1417,13 +1417,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleReturn(instr->InputAt(0));
break;
case kArchFramePointer:
__ LoadRR(i.OutputRegister(), fp);
__ mov(i.OutputRegister(), fp);
break;
case kArchParentFramePointer:
if (frame_access_state()->has_frame()) {
__ LoadP(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ LoadRR(i.OutputRegister(), fp);
__ mov(i.OutputRegister(), fp);
}
break;
case kArchStackPointerGreaterThan: {
......@@ -1930,7 +1930,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#if V8_TARGET_ARCH_S390X
case kS390_Cntlz64: {
__ flogr(r0, i.InputRegister(0));
__ LoadRR(i.OutputRegister(), r0);
__ mov(i.OutputRegister(), r0);
break;
}
#endif
......@@ -2122,7 +2122,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
kRoundToNearest);
__ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ mov(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
}
......@@ -2131,21 +2131,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
i.InputDoubleRegister(0));
__ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ mov(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
}
case kS390_DoubleToInt64: {
Label done;
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand(1));
__ mov(i.OutputRegister(1), Operand(1));
}
__ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
__ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
__ mov(i.OutputRegister(1), Operand::Zero());
} else {
__ lghi(i.OutputRegister(0), Operand::Zero());
__ mov(i.OutputRegister(0), Operand::Zero());
}
__ bind(&done);
break;
......@@ -2153,15 +2153,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_DoubleToUint64: {
Label done;
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand(1));
__ mov(i.OutputRegister(1), Operand(1));
}
__ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
i.InputDoubleRegister(0));
__ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
__ mov(i.OutputRegister(1), Operand::Zero());
} else {
__ lghi(i.OutputRegister(0), Operand::Zero());
__ mov(i.OutputRegister(0), Operand::Zero());
}
__ bind(&done);
break;
......@@ -2189,7 +2189,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection.
__ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ mov(i.OutputRegister(0), Operand::Zero());
}
__ bind(&done);
break;
......@@ -2197,15 +2197,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Float32ToUint64: {
Label done;
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand(1));
__ mov(i.OutputRegister(1), Operand(1));
}
__ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
i.InputDoubleRegister(0));
__ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
__ mov(i.OutputRegister(1), Operand::Zero());
} else {
__ lghi(i.OutputRegister(0), Operand::Zero());
__ mov(i.OutputRegister(0), Operand::Zero());
}
__ bind(&done);
break;
......@@ -2213,14 +2213,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Float32ToInt64: {
Label done;
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand(1));
__ mov(i.OutputRegister(1), Operand(1));
}
__ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
__ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
__ mov(i.OutputRegister(1), Operand::Zero());
} else {
__ lghi(i.OutputRegister(0), Operand::Zero());
__ mov(i.OutputRegister(0), Operand::Zero());
}
__ bind(&done);
break;
......@@ -3500,7 +3500,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_F32x4RecipApprox: {
__ lgfi(kScratchReg, Operand(1));
__ mov(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
......@@ -3516,7 +3516,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DoubleRegister tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
__ vfsq(tempFPReg1, i.InputSimd128Register(0), Condition(0), Condition(0),
Condition(2));
__ lgfi(kScratchReg, Operand(1));
__ mov(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
......@@ -3560,7 +3560,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src = i.InputSimd128Register(0);
Register dst = i.OutputRegister();
Register temp = i.TempRegister(0);
__ lgfi(dst, Operand(1));
__ mov(dst, Operand(1));
__ xgr(temp, temp);
__ vtm(src, src, Condition(0), Condition(0), Condition(0));
__ locgr(Condition(8), dst, temp);
......@@ -3570,7 +3570,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src = i.InputSimd128Register(0); \
Register dst = i.OutputRegister(); \
Register temp = i.TempRegister(0); \
__ lgfi(temp, Operand(1)); \
__ mov(temp, Operand(1)); \
__ xgr(dst, dst); \
__ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
Condition(0), Condition(2)); \
......@@ -3617,12 +3617,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_S128Const: {
#ifdef V8_TARGET_BIG_ENDIAN
for (int index = 0, j = 0; index < 2; index++, j = +2) {
__ lgfi(index < 1 ? ip : r0, Operand(i.InputInt32(j)));
__ mov(index < 1 ? ip : r0, Operand(i.InputInt32(j)));
__ iihf(index < 1 ? ip : r0, Operand(i.InputInt32(j + 1)));
}
#else
for (int index = 0, j = 0; index < 2; index++, j = +2) {
__ lgfi(index < 1 ? r0 : ip, Operand(i.InputInt32(j)));
__ mov(index < 1 ? r0 : ip, Operand(i.InputInt32(j)));
__ iihf(index < 1 ? r0 : ip, Operand(i.InputInt32(j + 1)));
}
#endif
......@@ -3932,10 +3932,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// create 2 * 8 byte inputs indicating new indices
for (int i = 0, j = 0; i < 2; i++, j = +2) {
#ifdef V8_TARGET_BIG_ENDIAN
__ lgfi(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
__ mov(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
__ iihf(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
#else
__ lgfi(i < 1 ? r0 : ip, Operand(k8x16_indices[j]));
__ mov(i < 1 ? r0 : ip, Operand(k8x16_indices[j]));
__ iihf(i < 1 ? r0 : ip, Operand(k8x16_indices[j + 1]));
#endif
}
......@@ -3975,10 +3975,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_I64x2BitMask: {
#ifdef V8_TARGET_BIG_ENDIAN
__ lgfi(kScratchReg, Operand(0x80800040));
__ mov(kScratchReg, Operand(0x80800040));
__ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
#else
__ lgfi(kScratchReg, Operand(0x80808080));
__ mov(kScratchReg, Operand(0x80808080));
__ iihf(kScratchReg, Operand(0x40008080));
#endif
__ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
......@@ -3990,10 +3990,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_I32x4BitMask: {
#ifdef V8_TARGET_BIG_ENDIAN
__ lgfi(kScratchReg, Operand(0x204060));
__ mov(kScratchReg, Operand(0x204060));
__ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
#else
__ lgfi(kScratchReg, Operand(0x80808080));
__ mov(kScratchReg, Operand(0x80808080));
__ iihf(kScratchReg, Operand(0x60402000));
#endif
__ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
......@@ -4005,10 +4005,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_I16x8BitMask: {
#ifdef V8_TARGET_BIG_ENDIAN
__ lgfi(kScratchReg, Operand(0x40506070));
__ mov(kScratchReg, Operand(0x40506070));
__ iihf(kScratchReg, Operand(0x102030));
#else
__ lgfi(kScratchReg, Operand(0x30201000));
__ mov(kScratchReg, Operand(0x30201000));
__ iihf(kScratchReg, Operand(0x70605040));
#endif
__ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
......@@ -4020,14 +4020,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_I8x16BitMask: {
#ifdef V8_TARGET_BIG_ENDIAN
__ lgfi(r0, Operand(0x60687078));
__ mov(r0, Operand(0x60687078));
__ iihf(r0, Operand(0x40485058));
__ lgfi(ip, Operand(0x20283038));
__ mov(ip, Operand(0x20283038));
__ iihf(ip, Operand(0x81018));
#else
__ lgfi(ip, Operand(0x58504840));
__ mov(ip, Operand(0x58504840));
__ iihf(ip, Operand(0x78706860));
__ lgfi(r0, Operand(0x18100800));
__ mov(r0, Operand(0x18100800));
__ iihf(r0, Operand(0x38302820));
#endif
__ vlvgp(kScratchDoubleReg, ip, r0);
......@@ -4173,7 +4173,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
}
condition = NegateFlagsCondition(condition);
__ LoadImmP(r0, Operand::Zero());
__ mov(r0, Operand::Zero());
__ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
kSpeculationPoisonRegister, r0);
}
......@@ -4272,14 +4272,14 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
Condition cond = FlagsConditionToCondition(condition, op);
Label done;
if (check_unordered) {
__ LoadImmP(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
__ mov(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
: Operand(1));
__ bunordered(&done);
}
// TODO(john.yan): use load imm high on condition here
__ LoadImmP(reg, Operand::Zero());
__ LoadImmP(kScratchReg, Operand(1));
__ mov(reg, Operand::Zero());
__ mov(kScratchReg, Operand(1));
// locr is sufficient since reg's upper 32 is guarrantee to be 0
__ locr(cond, reg, kScratchReg);
__ bind(&done);
......@@ -4346,7 +4346,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ lay(sp, MemOperand(sp, -kSystemPointerSize));
} else {
__ Push(r14, fp);
__ LoadRR(fp, sp);
__ mov(fp, sp);
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue(ip);
......
......@@ -35,14 +35,14 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Leave the frame.
// - Restart the frame by calling the function.
__ LoadRR(fp, r3);
__ mov(fp, r3);
__ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LeaveFrame(StackFrame::INTERNAL);
__ LoadTaggedPointerField(
r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadU16(
r2, FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
__ LoadRR(r4, r2);
__ mov(r4, r2);
__ InvokeFunction(r3, r4, r2, JUMP_FUNCTION);
}
......
......@@ -5892,29 +5892,35 @@ EVALUATE(OILL) {
}
EVALUATE(LLIHH) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(LLIHL);
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint64_t imm = static_cast<uint64_t>(i2) & 0xffff;
set_register(r1, imm << 48);
return length;
}
EVALUATE(LLIHL) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(LLIHL);
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint64_t imm = static_cast<uint64_t>(i2) & 0xffff;
set_register(r1, imm << 32);
return length;
}
EVALUATE(LLILH) {
DCHECK_OPCODE(LLILH);
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint64_t imm = static_cast<uint64_t>(i2);
set_register(r1, (imm << 48) >> 32);
uint64_t imm = static_cast<uint64_t>(i2) & 0xffff;
set_register(r1, imm << 16);
return length;
}
EVALUATE(LLILL) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(LLILL);
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint64_t imm = static_cast<uint64_t>(i2) & 0xffff;
set_register(r1, imm);
return length;
}
inline static int TestUnderMask(uint16_t val, uint16_t mask,
......
......@@ -121,7 +121,7 @@ RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
// If the code gets too big or corrupted, an internal exception will be
// raised, and we will exit right away.
__ bind(&internal_failure_label_);
__ LoadImmP(r2, Operand(FAILURE));
__ mov(r2, Operand(FAILURE));
__ Ret();
__ bind(&start_label_); // And then continue from here.
}
......@@ -338,9 +338,9 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
// Address of start of capture.
__ AddP(r2, end_of_input_address());
// Length of capture.
__ LoadRR(r4, r3);
__ mov(r4, r3);
// Save length in callee-save register for use on return.
__ LoadRR(r6, r3);
__ mov(r6, r3);
// Address of current input position.
__ AddP(r3, current_input_offset(), end_of_input_address());
if (read_backward) {
......@@ -620,7 +620,7 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
}
void RegExpMacroAssemblerS390::Fail() {
__ LoadImmP(r2, Operand(FAILURE));
__ mov(r2, Operand(FAILURE));
__ b(&exit_label_);
}
......@@ -674,13 +674,13 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
//
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ LoadRR(frame_pointer(), sp);
__ mov(frame_pointer(), sp);
__ lay(sp, MemOperand(sp, -10 * kSystemPointerSize));
STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize);
__ mov(r1, Operand::Zero()); // success counter
STATIC_ASSERT(kStringStartMinusOne ==
kSuccessfulCaptures - kSystemPointerSize);
__ LoadRR(r0, r1); // offset of location
__ mov(r0, r1); // offset of location
__ StoreMultipleP(r0, r9, MemOperand(sp, 0));
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ Push(r1); // The backtrack counter.
......@@ -723,7 +723,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
// Set r1 to address of char before start of the input string
// (effectively string position -1).
__ LoadRR(r1, r4);
__ mov(r1, r4);
__ SubP(r1, current_input_offset(), Operand(char_size()));
if (mode_ == UC16) {
__ ShiftLeftP(r0, r3, Operand(1));
......@@ -885,7 +885,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ b(&load_char_start_regexp);
} else {
__ LoadImmP(r2, Operand(SUCCESS));
__ mov(r2, Operand(SUCCESS));
}
}
......@@ -897,7 +897,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ bind(&return_r2);
// Skip sp past regexp registers and local variables..
__ LoadRR(sp, frame_pointer());
__ mov(sp, frame_pointer());
// Restore registers r6..r15.
__ LoadMultipleP(r6, sp, MemOperand(sp, 6 * kSystemPointerSize));
......@@ -934,7 +934,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, r2);
__ LoadRR(r2, backtrack_stackpointer());
__ mov(r2, backtrack_stackpointer());
__ AddP(r3, frame_pointer(), Operand(kStackHighEnd));
__ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack(isolate());
......@@ -944,7 +944,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ CmpP(r2, Operand::Zero());
__ beq(&exit_with_exception);
// Otherwise use return value as new stack pointer.
__ LoadRR(backtrack_stackpointer(), r2);
__ mov(backtrack_stackpointer(), r2);
// Restore saved registers and continue.
SafeReturn();
}
......@@ -953,13 +953,13 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// If any of the code above needed to exit with an exception.
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ LoadImmP(r2, Operand(EXCEPTION));
__ mov(r2, Operand(EXCEPTION));
__ b(&return_r2);
}
if (fallback_label_.is_linked()) {
__ bind(&fallback_label_);
__ LoadImmP(r2, Operand(FALLBACK_TO_EXPERIMENTAL));
__ mov(r2, Operand(FALLBACK_TO_EXPERIMENTAL));
__ b(&return_r2);
}
......@@ -1098,7 +1098,7 @@ void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
static constexpr int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
__ LoadRR(r4, frame_pointer());
__ mov(r4, frame_pointer());
// Code of self.
__ mov(r3, Operand(masm_->CodeObject()));
// r2 becomes return address pointer.
......@@ -1208,7 +1208,7 @@ void RegExpMacroAssemblerS390::SafeReturn() {
void RegExpMacroAssemblerS390::SafeCallTarget(Label* name) {
__ bind(name);
__ CleanseP(r14);
__ LoadRR(r0, r14);
__ mov(r0, r14);
__ mov(ip, Operand(masm_->CodeObject()));
__ SubP(r0, r0, ip);
__ push(r0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment