Commit 84f96903 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

s390x: cleanup StoreXXX as std

a few unused functions

Drive-By: Also clean up LoadSimd128 as LoadV128 and remove
Change-Id: I4cdee0fcb1e153309492026b4334af27afba7ec1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2584442
Commit-Queue: Junliang Yan <junyan@redhat.com>
Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/master@{#71701}
parent 72249208
......@@ -590,7 +590,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Move(r7, js_entry_sp);
__ LoadAndTestP(scrach, MemOperand(r7));
__ bne(&non_outermost_js, Label::kNear);
__ StoreP(fp, MemOperand(r7));
__ StoreU64(fp, MemOperand(r7));
__ Load(scrach, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont, Label::kNear);
......@@ -598,7 +598,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Load(scrach, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ StoreP(scrach, MemOperand(sp)); // frame-type
__ StoreU64(scrach, MemOperand(sp)); // frame-type
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
......@@ -618,7 +618,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
masm->isolate()));
__ StoreP(r2, MemOperand(scrach));
__ StoreU64(r2, MemOperand(scrach));
__ LoadRoot(r2, RootIndex::kException);
__ b(&exit, Label::kNear);
......@@ -653,14 +653,14 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ bne(&non_outermost_js_2, Label::kNear);
__ mov(scrach, Operand::Zero());
__ Move(r7, js_entry_sp);
__ StoreP(scrach, MemOperand(r7));
__ StoreU64(scrach, MemOperand(r7));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(r5);
__ Move(scrach, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
masm->isolate()));
__ StoreP(r5, MemOperand(scrach));
__ StoreU64(r5, MemOperand(scrach));
// Reset the stack to the callee saved registers.
__ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
......@@ -1125,7 +1125,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadS32(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
__ AddP(r1, r1, Operand(1));
__ StoreW(r1, FieldMemOperand(feedback_vector,
__ StoreU32(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
// Open a frame scope to indicate that there is a frame on the stack. The
......@@ -1142,7 +1142,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ lghi(r1, Operand(0));
__ StoreHalfWord(r1,
__ StoreU16(r1,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
r0);
......@@ -1192,7 +1192,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpP(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
__ ShiftLeftP(r8, r8, Operand(kSystemPointerSizeLog2));
__ StoreP(r5, MemOperand(fp, r8));
__ StoreU64(r5, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
// Perform interrupt stack check.
......@@ -1252,7 +1252,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset)));
__ StoreP(kInterpreterBytecodeOffsetRegister,
__ StoreU64(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ CallRuntime(Runtime::kStackGuard);
......@@ -1266,7 +1266,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
__ StoreP(r0,
__ StoreU64(r0,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ jmp(&after_stack_check_interrupt);
......@@ -1541,7 +1541,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
__ StoreP(r4,
__ StoreU64(r4,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
......@@ -1577,7 +1577,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
} else {
// Overwrite the hole inserted by the deoptimizer with the return value
// from the LAZY deopt point.
__ StoreP(
__ StoreU64(
r2, MemOperand(
sp, config->num_allocatable_general_registers() *
kSystemPointerSize +
......@@ -1598,7 +1598,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ AddP(r2, r2,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(scratch, MemOperand(sp, r1));
__ StoreU64(scratch, MemOperand(sp, r1));
// Recover arguments count.
__ SubP(r2, r2,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
......@@ -1716,7 +1716,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&done);
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreP(r7, MemOperand(sp));
__ StoreU64(r7, MemOperand(sp));
}
// ----------- S t a t e -------------
......@@ -1802,7 +1802,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ bind(&done);
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreP(r7, MemOperand(sp));
__ StoreU64(r7, MemOperand(sp));
}
// ----------- S t a t e -------------
......@@ -1853,7 +1853,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&done);
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreP(r6, MemOperand(sp));
__ StoreU64(r6, MemOperand(sp));
}
// ----------- S t a t e -------------
......@@ -1891,11 +1891,11 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Cleanse the top nibble of 31-bit pointers.
__ CleanseP(r14);
__ StoreP(r14, MemOperand(sp, 4 * kSystemPointerSize));
__ StoreP(fp, MemOperand(sp, 3 * kSystemPointerSize));
__ StoreP(r6, MemOperand(sp, 2 * kSystemPointerSize));
__ StoreP(r3, MemOperand(sp, 1 * kSystemPointerSize));
__ StoreP(r2, MemOperand(sp, 0 * kSystemPointerSize));
__ StoreU64(r14, MemOperand(sp, 4 * kSystemPointerSize));
__ StoreU64(fp, MemOperand(sp, 3 * kSystemPointerSize));
__ StoreU64(r6, MemOperand(sp, 2 * kSystemPointerSize));
__ StoreU64(r3, MemOperand(sp, 1 * kSystemPointerSize));
__ StoreU64(r2, MemOperand(sp, 0 * kSystemPointerSize));
__ Push(Smi::zero()); // Padding.
__ la(fp,
MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
......@@ -1967,7 +1967,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&copy);
__ LoadP(r0, MemOperand(src));
__ lay(src, MemOperand(src, kSystemPointerSize));
__ StoreP(r0, MemOperand(dest));
__ StoreU64(r0, MemOperand(dest));
__ lay(dest, MemOperand(dest, kSystemPointerSize));
__ SubP(num, num, Operand(1));
__ bind(&check);
......@@ -1989,7 +1989,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bne(&skip, Label::kNear);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
__ StoreP(scratch, MemOperand(r7));
__ StoreU64(scratch, MemOperand(r7));
__ lay(r7, MemOperand(r7, kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
......@@ -2109,7 +2109,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&copy);
__ LoadP(r0, MemOperand(src));
__ lay(src, MemOperand(src, kSystemPointerSize));
__ StoreP(r0, MemOperand(dest));
__ StoreU64(r0, MemOperand(dest));
__ lay(dest, MemOperand(dest, kSystemPointerSize));
__ SubP(num, num, Operand(1));
__ bind(&check);
......@@ -2127,7 +2127,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ SubP(r7, r7, Operand(1));
__ ShiftLeftP(r1, r7, Operand(kSystemPointerSizeLog2));
__ LoadP(scratch, MemOperand(r6, r1));
__ StoreP(scratch, MemOperand(r4, r1));
__ StoreU64(scratch, MemOperand(r4, r1));
__ CmpP(r7, Operand::Zero());
__ bne(&loop);
}
......@@ -2859,7 +2859,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
Label skip;
__ CmpP(cp, Operand::Zero());
__ beq(&skip, Label::kNear);
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
// Reset the masking register. This is done independent of the underlying
......@@ -2980,7 +2980,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
argument_offset -= 2 * kSystemPointerSize;
__ bind(&fastpath_done);
__ StoreP(result_reg, MemOperand(sp, argument_offset));
__ StoreU64(result_reg, MemOperand(sp, argument_offset));
__ Pop(result_reg, scratch);
__ Ret();
......@@ -3052,7 +3052,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ LoadP(r7, MemOperand(r9, kLimitOffset));
__ LoadU32(r8, MemOperand(r9, kLevelOffset));
__ AddP(r8, Operand(1));
__ StoreW(r8, MemOperand(r9, kLevelOffset));
__ StoreU32(r8, MemOperand(r9, kLevelOffset));
__ StoreReturnAddressAndCall(scratch);
......@@ -3066,14 +3066,14 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ StoreP(r6, MemOperand(r9, kNextOffset));
__ StoreU64(r6, MemOperand(r9, kNextOffset));
if (__ emit_debug_code()) {
__ LoadU32(r3, MemOperand(r9, kLevelOffset));
__ CmpP(r3, r8);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ SubP(r8, Operand(1));
__ StoreW(r8, MemOperand(r9, kLevelOffset));
__ StoreU32(r8, MemOperand(r9, kLevelOffset));
__ CmpP(r7, MemOperand(r9, kLimitOffset));
__ bne(&delete_allocated_handles, Label::kNear);
......@@ -3103,7 +3103,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
__ StoreP(r7, MemOperand(r9, kLimitOffset));
__ StoreU64(r7, MemOperand(r9, kLimitOffset));
__ LoadRR(r6, r2);
__ PrepareCallCFunction(1, r7);
__ Move(r2, ExternalReference::isolate_address(isolate));
......@@ -3158,22 +3158,22 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
__ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kSystemPointerSize)));
// kHolder.
__ StoreP(holder, MemOperand(sp, 0 * kSystemPointerSize));
__ StoreU64(holder, MemOperand(sp, 0 * kSystemPointerSize));
// kIsolate.
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ StoreP(scratch, MemOperand(sp, 1 * kSystemPointerSize));
__ StoreU64(scratch, MemOperand(sp, 1 * kSystemPointerSize));
// kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ StoreP(scratch, MemOperand(sp, 2 * kSystemPointerSize));
__ StoreP(scratch, MemOperand(sp, 3 * kSystemPointerSize));
__ StoreU64(scratch, MemOperand(sp, 2 * kSystemPointerSize));
__ StoreU64(scratch, MemOperand(sp, 3 * kSystemPointerSize));
// kData.
__ StoreP(call_data, MemOperand(sp, 4 * kSystemPointerSize));
__ StoreU64(call_data, MemOperand(sp, 4 * kSystemPointerSize));
// kNewTarget.
__ StoreP(scratch, MemOperand(sp, 5 * kSystemPointerSize));
__ StoreU64(scratch, MemOperand(sp, 5 * kSystemPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
......@@ -3195,18 +3195,18 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
kSystemPointerSize));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ AddP(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
kSystemPointerSize));
// FunctionCallbackInfo::length_.
__ StoreW(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) *
__ StoreU32(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) *
kSystemPointerSize));
// We also store the number of bytes to drop from the stack after returning
......@@ -3215,7 +3215,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
__ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2));
__ AddP(scratch, r1);
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
kSystemPointerSize));
// v8::InvocationCallback's argument.
......@@ -3310,13 +3310,13 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
if (!ABI_PASSES_HANDLES_IN_REGS) {
// pass 1st arg by reference
__ StoreP(r2, MemOperand(sp, arg0Slot * kSystemPointerSize));
__ StoreU64(r2, MemOperand(sp, arg0Slot * kSystemPointerSize));
__ AddP(r2, sp, Operand(arg0Slot * kSystemPointerSize));
}
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
__ StoreP(r3, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
__ StoreU64(r3, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
__ AddP(r3, sp, Operand(accessorInfoSlot * kSystemPointerSize));
// r3 = v8::PropertyCallbackInfo&
......@@ -3365,7 +3365,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ StoreDouble(dreg, MemOperand(sp, offset));
__ StoreF64(dreg, MemOperand(sp, offset));
}
// Push all GPRs onto the stack
......@@ -3374,7 +3374,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate));
__ StoreP(fp, MemOperand(r1));
__ StoreU64(fp, MemOperand(r1));
static constexpr int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
......@@ -3404,7 +3404,8 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// r6: Fp-to-sp delta.
// Parm6: isolate is passed on the stack.
__ Move(r7, ExternalReference::isolate_address(isolate));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
__ StoreU64(r7,
MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
// Call Deoptimizer::New().
{
......@@ -3427,7 +3428,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r4, MemOperand(r3, offset));
__ StoreU64(r4, MemOperand(r3, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
......@@ -3440,7 +3441,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
// TODO(joransiu): MVC opportunity
__ LoadF64(d0, MemOperand(sp, src_offset));
__ StoreDouble(d0, MemOperand(r3, dst_offset));
__ StoreF64(d0, MemOperand(r3, dst_offset));
}
// Mark the stack as not iterable for the CPU profiler which won't be able to
......@@ -3451,7 +3452,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
Register zero = r6;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ lhi(zero, Operand(0));
__ StoreByte(zero, MemOperand(is_iterable));
__ StoreU8(zero, MemOperand(is_iterable));
}
// Remove the saved registers from the stack.
......@@ -3471,7 +3472,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ b(&pop_loop_header, Label::kNear);
__ bind(&pop_loop);
__ pop(r6);
__ StoreP(r6, MemOperand(r5, 0));
__ StoreU64(r6, MemOperand(r5, 0));
__ la(r5, MemOperand(r5, kSystemPointerSize));
__ bind(&pop_loop_header);
__ CmpP(r4, sp);
......@@ -3551,7 +3552,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
Register one = r6;
__ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
__ lhi(one, Operand(1));
__ StoreByte(one, MemOperand(is_iterable));
__ StoreU8(one, MemOperand(is_iterable));
}
__ pop(ip); // get continuation, leave pc on stack
......
......@@ -580,7 +580,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) {
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kSystemPointerSize;
StoreP(ToRegister(i), MemOperand(location, stack_offset));
StoreU64(ToRegister(i), MemOperand(location, stack_offset));
}
}
}
......@@ -606,7 +606,7 @@ void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
if ((dregs & (1 << i)) != 0) {
DoubleRegister dreg = DoubleRegister::from_code(i);
stack_offset -= kDoubleSize;
StoreDouble(dreg, MemOperand(location, stack_offset));
StoreF64(dreg, MemOperand(location, stack_offset));
}
}
}
......@@ -668,10 +668,10 @@ void TurboAssembler::StoreTaggedField(const Register& value,
const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
RecordComment("[ StoreTagged");
StoreW(value, dst_field_operand);
StoreU32(value, dst_field_operand);
RecordComment("]");
} else {
StoreP(value, dst_field_operand, scratch);
StoreU64(value, dst_field_operand, scratch);
}
}
......@@ -1317,16 +1317,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
if (emit_debug_code()) {
StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
StoreU64(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(),
r1);
}
// Save the frame pointer and the context in top.
Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
StoreP(fp, MemOperand(r1));
StoreU64(fp, MemOperand(r1));
Move(r1,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
StoreP(cp, MemOperand(r1));
StoreU64(cp, MemOperand(r1));
// Optionally save all volatile double registers.
if (save_doubles) {
......@@ -1348,11 +1349,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
}
lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
StoreP(MemOperand(sp), Operand::Zero(), r0);
StoreU64(MemOperand(sp), Operand::Zero(), r0);
// Set the exit frame sp value to point just before the return address
// location.
lay(r1, MemOperand(sp, kStackFrameSPSlot * kSystemPointerSize));
StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
StoreU64(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
int TurboAssembler::ActivationFrameAlignment() {
......@@ -1385,7 +1386,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Clear top frame.
Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
StoreP(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
StoreU64(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
// Restore current context from top and clear it in debug mode.
Move(ip,
......@@ -1396,7 +1397,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
mov(r1, Operand(Context::kInvalidContext));
Move(ip,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
StoreP(r1, MemOperand(ip));
StoreU64(r1, MemOperand(ip));
#endif
// Tear down the exit frame, pop the arguments, and return.
......@@ -1459,7 +1460,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
LoadRR(r1, tmp_reg);
bind(&loop);
LoadP(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
StoreP(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
StoreU64(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
lay(src_reg, MemOperand(src_reg, -kSystemPointerSize));
lay(dst_reg, MemOperand(dst_reg, -kSystemPointerSize));
BranchOnCount(r1, &loop);
......@@ -1542,7 +1543,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&copy);
LoadP(r0, MemOperand(src));
lay(src, MemOperand(src, kSystemPointerSize));
StoreP(r0, MemOperand(dest));
StoreU64(r0, MemOperand(dest));
lay(dest, MemOperand(dest, kSystemPointerSize));
SubP(num, num, Operand(1));
bind(&check);
......@@ -1554,7 +1555,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
{
Label loop;
bind(&loop);
StoreP(scratch, MemOperand(ip));
StoreU64(scratch, MemOperand(ip));
lay(ip, MemOperand(ip, kSystemPointerSize));
SubP(expected_parameter_count, expected_parameter_count, Operand(1));
bgt(&loop);
......@@ -1729,13 +1730,13 @@ void MacroAssembler::PushStackHandler() {
// Store padding.
lghi(r0, Operand::Zero());
StoreP(r0, MemOperand(sp)); // Padding.
StoreU64(r0, MemOperand(sp)); // Padding.
// Copy the old handler into the next handler slot.
MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
Operand(kSystemPointerSize));
// Set this new handler as the current one.
StoreP(sp, MemOperand(r7));
StoreU64(sp, MemOperand(r7));
}
void MacroAssembler::PopStackHandler() {
......@@ -1746,7 +1747,7 @@ void MacroAssembler::PopStackHandler() {
Pop(r3);
Move(ip,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
StoreP(r3, MemOperand(ip));
StoreU64(r3, MemOperand(ip));
Drop(1); // Drop padding.
}
......@@ -1801,7 +1802,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
push(r14);
// Put input on stack.
lay(sp, MemOperand(sp, -kDoubleSize));
StoreDouble(double_input, MemOperand(sp));
StoreF64(double_input, MemOperand(sp));
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
......@@ -1889,7 +1890,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
// @TODO(john.yan): can be optimized by asi()
LoadS32(scratch1, MemOperand(scratch2));
AddP(scratch1, Operand(value));
StoreW(scratch1, MemOperand(scratch2));
StoreU32(scratch1, MemOperand(scratch2));
}
}
......@@ -1901,7 +1902,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// @TODO(john.yan): can be optimized by asi()
LoadS32(scratch1, MemOperand(scratch2));
AddP(scratch1, Operand(-value));
StoreW(scratch1, MemOperand(scratch2));
StoreU32(scratch1, MemOperand(scratch2));
}
}
......@@ -2097,7 +2098,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp,
Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
StoreP(scratch,
StoreU64(scratch,
MemOperand(sp, (stack_passed_arguments)*kSystemPointerSize));
} else {
stack_space += stack_passed_arguments;
......@@ -2159,9 +2160,9 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// See x64 code for reasoning about how to address the isolate data fields.
if (root_array_available()) {
LoadPC(r0);
StoreP(r0, MemOperand(kRootRegister,
StoreU64(r0, MemOperand(kRootRegister,
IsolateData::fast_c_call_caller_pc_offset()));
StoreP(fp, MemOperand(kRootRegister,
StoreU64(fp, MemOperand(kRootRegister,
IsolateData::fast_c_call_caller_fp_offset()));
} else {
DCHECK_NOT_NULL(isolate());
......@@ -2169,10 +2170,10 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Move(addr_scratch,
ExternalReference::fast_c_call_caller_pc_address(isolate()));
LoadPC(r0);
StoreP(r0, MemOperand(addr_scratch));
StoreU64(r0, MemOperand(addr_scratch));
Move(addr_scratch,
ExternalReference::fast_c_call_caller_fp_address(isolate()));
StoreP(fp, MemOperand(addr_scratch));
StoreU64(fp, MemOperand(addr_scratch));
}
// Just call directly. The function called cannot cause a GC, or
......@@ -2191,14 +2192,14 @@ void TurboAssembler::CallCFunctionHelper(Register function,
lghi(zero_scratch, Operand::Zero());
if (root_array_available()) {
StoreP(
StoreU64(
zero_scratch,
MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
} else {
DCHECK_NOT_NULL(isolate());
Move(addr_scratch,
ExternalReference::fast_c_call_caller_fp_address(isolate()));
StoreP(zero_scratch, MemOperand(addr_scratch));
StoreU64(zero_scratch, MemOperand(addr_scratch));
}
int stack_passed_arguments =
......@@ -2877,25 +2878,6 @@ void TurboAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
// Subtract Instructions
//----------------------------------------------------------------------------
// Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
// src2)
void TurboAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
Register src2) {
if (dst != src2 && dst != src1) {
lr(dst, src1);
slbr(dst, src2);
} else if (dst != src2) {
// dst == src1
DCHECK(dst == src1);
slbr(dst, src2);
} else {
// dst == src2
DCHECK(dst == src2);
lr(r0, dst);
SubLogicalWithBorrow32(dst, src1, r0);
}
}
// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
void TurboAssembler::SubLogical32(Register dst, Register src1, Register src2) {
if (dst != src2 && dst != src1) {
......@@ -3732,7 +3714,7 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
}
// Store a "pointer" sized value to the memory location
void TurboAssembler::StoreP(Register src, const MemOperand& mem,
void TurboAssembler::StoreU64(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(scratch != no_reg);
......@@ -3747,15 +3729,15 @@ void TurboAssembler::StoreP(Register src, const MemOperand& mem,
#if V8_TARGET_ARCH_S390X
stg(src, mem);
#else
// StoreW will try to generate ST if offset fits, otherwise
// StoreU32 will try to generate ST if offset fits, otherwise
// it'll generate STY.
StoreW(src, mem);
StoreU32(src, mem);
#endif
}
}
// Store a "pointer" sized constant to the memory location
void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
DCHECK_EQ(opnd.rmode(), RelocInfo::NONE);
......@@ -3770,7 +3752,7 @@ void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
#endif
} else {
LoadImmP(scratch, opnd);
StoreP(scratch, mem);
StoreU64(scratch, mem);
}
}
......@@ -4041,15 +4023,7 @@ void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) {
}
}
// Load Single Precision (32-bit) Floating Point number from memory,
// and convert to Double Precision (64-bit)
void TurboAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
const MemOperand& mem) {
LoadF32(dst, mem);
ldebr(dst, dst);
}
void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem,
Register scratch) {
if (is_uint12(mem.offset())) {
vl(dst, mem, Condition(0));
......@@ -4061,7 +4035,7 @@ void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
}
// Store Double Precision (64-bit) Floating Point number to memory
void TurboAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
void TurboAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
std(dst, mem);
} else {
......@@ -4070,7 +4044,7 @@ void TurboAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
}
// Store Single Precision (32-bit) Floating Point number to memory
void TurboAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
ste(src, mem);
} else {
......@@ -4078,16 +4052,7 @@ void TurboAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
}
}
// Convert Double precision (64-bit) to Single Precision (32-bit)
// and store resulting Float32 to memory
void TurboAssembler::StoreDoubleAsFloat32(DoubleRegister src,
const MemOperand& mem,
DoubleRegister scratch) {
ledbr(scratch, src);
StoreFloat32(scratch, mem);
}
void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem,
void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
Register scratch) {
if (is_uint12(mem.offset())) {
vst(src, mem, Condition(0));
......@@ -4178,8 +4143,7 @@ void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
void TurboAssembler::LoadFloat32ToDouble(DoubleRegister dst,
const MemOperand& opnd,
void TurboAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
ldeb(dst, opnd);
......@@ -4191,7 +4155,7 @@ void TurboAssembler::LoadFloat32ToDouble(DoubleRegister dst,
// Variable length depending on whether offset fits into immediate field
// MemOperand of RX or RXY format
void TurboAssembler::StoreW(Register src, const MemOperand& mem,
void TurboAssembler::StoreU32(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
......@@ -4218,7 +4182,7 @@ void TurboAssembler::StoreW(Register src, const MemOperand& mem,
} else if (use_RXYform) {
sty(src, mem);
} else {
StoreW(src, MemOperand(base, scratch));
StoreU32(src, MemOperand(base, scratch));
}
}
......@@ -4260,7 +4224,7 @@ void TurboAssembler::LoadS16(Register dst, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void TurboAssembler::StoreHalfWord(Register src, const MemOperand& mem,
void TurboAssembler::StoreU16(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
......@@ -4278,7 +4242,7 @@ void TurboAssembler::StoreHalfWord(Register src, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void TurboAssembler::StoreByte(Register src, const MemOperand& mem,
void TurboAssembler::StoreU8(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
......@@ -4467,7 +4431,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
DCHECK(!AreAliased(src, scratch));
LoadRR(scratch, src);
LoadP(src, dst);
StoreP(scratch, dst);
StoreU64(scratch, dst);
}
void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
......@@ -4479,8 +4443,8 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
DCHECK(!AreAliased(scratch_0, scratch_1));
LoadP(scratch_0, src);
LoadP(scratch_1, dst);
StoreP(scratch_0, dst);
StoreP(scratch_1, src);
StoreU64(scratch_0, dst);
StoreU64(scratch_1, src);
}
void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
......@@ -4497,18 +4461,18 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
DCHECK(!AreAliased(src, scratch));
ldr(scratch, src);
LoadF32(src, dst);
StoreFloat32(scratch, dst);
StoreF32(scratch, dst);
}
void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
DoubleRegister scratch) {
// push d0, to be used as scratch
lay(sp, MemOperand(sp, -kDoubleSize));
StoreDouble(d0, MemOperand(sp));
StoreF64(d0, MemOperand(sp));
LoadF32(scratch, src);
LoadF32(d0, dst);
StoreFloat32(scratch, dst);
StoreFloat32(d0, src);
StoreF32(scratch, dst);
StoreF32(d0, src);
// restore d0
LoadF64(d0, MemOperand(sp));
lay(sp, MemOperand(sp, kDoubleSize));
......@@ -4528,18 +4492,18 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
DCHECK(!AreAliased(src, scratch));
ldr(scratch, src);
LoadF64(src, dst);
StoreDouble(scratch, dst);
StoreF64(scratch, dst);
}
void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
DoubleRegister scratch) {
// push d0, to be used as scratch
lay(sp, MemOperand(sp, -kDoubleSize));
StoreDouble(d0, MemOperand(sp));
StoreF64(d0, MemOperand(sp));
LoadF64(scratch, src);
LoadF64(d0, dst);
StoreDouble(scratch, dst);
StoreDouble(d0, src);
StoreF64(scratch, dst);
StoreF64(d0, src);
// restore d0
LoadF64(d0, MemOperand(sp));
lay(sp, MemOperand(sp, kDoubleSize));
......@@ -4557,21 +4521,21 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
Simd128Register scratch) {
DCHECK(!AreAliased(src, scratch));
vlr(scratch, src, Condition(0), Condition(0), Condition(0));
LoadSimd128(src, dst, ip);
StoreSimd128(scratch, dst, ip);
LoadV128(src, dst, ip);
StoreV128(scratch, dst, ip);
}
void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
Simd128Register scratch) {
// push d0, to be used as scratch
lay(sp, MemOperand(sp, -kSimd128Size));
StoreSimd128(d0, MemOperand(sp), ip);
LoadSimd128(scratch, src, ip);
LoadSimd128(d0, dst, ip);
StoreSimd128(scratch, dst, ip);
StoreSimd128(d0, src, ip);
StoreV128(d0, MemOperand(sp), ip);
LoadV128(scratch, src, ip);
LoadV128(d0, dst, ip);
StoreV128(scratch, dst, ip);
StoreV128(d0, src, ip);
// restore d0
LoadSimd128(d0, MemOperand(sp), ip);
LoadV128(d0, MemOperand(sp), ip);
lay(sp, MemOperand(sp, kSimd128Size));
}
......@@ -4688,7 +4652,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Label return_label;
larl(r14, &return_label); // Generate the return addr of call later.
StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
StoreU64(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
// zLinux ABI requires caller's frame to have sufficient space for callee
// preserved regsiter save area.
......
......@@ -284,9 +284,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Add Logical (Register - Register)
void AddLogical32(Register dst, Register src1, Register src2);
// Add Logical With Carry (Register - Register)
void AddLogicalWithCarry32(Register dst, Register src1, Register src2);
// Add Logical (Register - Immediate)
void AddLogical(Register dst, const Operand& imm);
void AddLogicalP(Register dst, const Operand& imm);
......@@ -297,12 +294,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Subtract (Register - Immediate)
void Sub32(Register dst, const Operand& imm);
void Sub32_RI(Register dst, const Operand& imm) { Sub32(dst, imm); }
void SubP(Register dst, const Operand& imm);
void Sub32(Register dst, Register src, const Operand& imm);
void Sub32_RRI(Register dst, Register src, const Operand& imm) {
Sub32(dst, src, imm);
}
void SubP(Register dst, Register src, const Operand& imm);
// Subtract (Register - Register)
......@@ -326,8 +319,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SubLogicalP_ExtendSrc(Register dst, const MemOperand& opnd);
// Subtract Logical 32-bit
void SubLogical32(Register dst, Register src1, Register src2);
// Subtract Logical With Borrow 32-bit
void SubLogicalWithBorrow32(Register dst, Register src1, Register src2);
// Multiply
void MulP(Register dst, const Operand& opnd);
......@@ -427,9 +418,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load Floating Point
void LoadF64(DoubleRegister dst, const MemOperand& opnd);
void LoadF32(DoubleRegister dst, const MemOperand& opnd);
void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
void LoadSimd128(Simd128Register dst, const MemOperand& mem,
Register scratch);
void LoadV128(Simd128Register dst, const MemOperand& mem, Register scratch);
void AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch);
......@@ -447,7 +436,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DoubleRegister scratch);
void DivFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch);
void LoadFloat32ToDouble(DoubleRegister dst, const MemOperand& opnd,
void LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch);
// Load On Condition
......@@ -457,12 +446,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadPositive32(Register result, Register input);
// Store Floating Point
void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
DoubleRegister scratch);
void StoreSimd128(Simd128Register src, const MemOperand& mem,
Register scratch);
void StoreF64(DoubleRegister dst, const MemOperand& opnd);
void StoreF32(DoubleRegister dst, const MemOperand& opnd);
void StoreV128(Simd128Register src, const MemOperand& mem, Register scratch);
void Branch(Condition c, const Operand& opnd);
void BranchOnCount(Register r1, Label* l);
......@@ -529,12 +515,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void push(DoubleRegister src) {
lay(sp, MemOperand(sp, -kSystemPointerSize));
StoreDouble(src, MemOperand(sp));
StoreF64(src, MemOperand(sp));
}
void push(Register src) {
lay(sp, MemOperand(sp, -kSystemPointerSize));
StoreP(src, MemOperand(sp));
StoreU64(src, MemOperand(sp));
}
void pop(DoubleRegister dst) {
......@@ -558,25 +544,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
lay(sp, MemOperand(sp, -kSystemPointerSize * 2));
StoreP(src1, MemOperand(sp, kSystemPointerSize));
StoreP(src2, MemOperand(sp, 0));
StoreU64(src1, MemOperand(sp, kSystemPointerSize));
StoreU64(src2, MemOperand(sp, 0));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
lay(sp, MemOperand(sp, -kSystemPointerSize * 3));
StoreP(src1, MemOperand(sp, kSystemPointerSize * 2));
StoreP(src2, MemOperand(sp, kSystemPointerSize));
StoreP(src3, MemOperand(sp, 0));
StoreU64(src1, MemOperand(sp, kSystemPointerSize * 2));
StoreU64(src2, MemOperand(sp, kSystemPointerSize));
StoreU64(src3, MemOperand(sp, 0));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
lay(sp, MemOperand(sp, -kSystemPointerSize * 4));
StoreP(src1, MemOperand(sp, kSystemPointerSize * 3));
StoreP(src2, MemOperand(sp, kSystemPointerSize * 2));
StoreP(src3, MemOperand(sp, kSystemPointerSize));
StoreP(src4, MemOperand(sp, 0));
StoreU64(src1, MemOperand(sp, kSystemPointerSize * 3));
StoreU64(src2, MemOperand(sp, kSystemPointerSize * 2));
StoreU64(src3, MemOperand(sp, kSystemPointerSize));
StoreU64(src4, MemOperand(sp, 0));
}
// Push five registers. Pushes leftmost register first (to highest address).
......@@ -594,11 +580,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DCHECK(src4 != src5);
lay(sp, MemOperand(sp, -kSystemPointerSize * 5));
StoreP(src1, MemOperand(sp, kSystemPointerSize * 4));
StoreP(src2, MemOperand(sp, kSystemPointerSize * 3));
StoreP(src3, MemOperand(sp, kSystemPointerSize * 2));
StoreP(src4, MemOperand(sp, kSystemPointerSize));
StoreP(src5, MemOperand(sp, 0));
StoreU64(src1, MemOperand(sp, kSystemPointerSize * 4));
StoreU64(src2, MemOperand(sp, kSystemPointerSize * 3));
StoreU64(src3, MemOperand(sp, kSystemPointerSize * 2));
StoreU64(src4, MemOperand(sp, kSystemPointerSize));
StoreU64(src5, MemOperand(sp, 0));
}
enum PushArrayOrder { kNormal, kReverse };
......@@ -748,16 +734,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadFloat32Literal(DoubleRegister result, float value, Register scratch);
void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);
void StoreU32(Register src, const MemOperand& mem, Register scratch = no_reg);
void LoadS16(Register dst, Register src);
void LoadS16(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreHalfWord(Register src, const MemOperand& mem,
Register scratch = r0);
void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
void StoreU16(Register src, const MemOperand& mem, Register scratch = r0);
void StoreU8(Register src, const MemOperand& mem, Register scratch = r0);
void CmpSmiLiteral(Register src1, Smi smi, Register scratch);
// Set new rounding mode RN to FPSCR
......@@ -768,8 +753,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// These exist to provide portability between 32 and 64bit
void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
void StoreP(const MemOperand& mem, const Operand& opnd,
void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
void StoreU64(const MemOperand& mem, const Operand& opnd,
Register scratch = no_reg);
void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
......@@ -1069,7 +1054,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
StoreP(rec, MemOperand(sp, 0));
StoreU64(rec, MemOperand(sp, 0));
}
void CallRuntime(const Runtime::Function* f, int num_arguments,
......
......@@ -699,7 +699,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
DoubleRegister value = i.InputDoubleRegister(index); \
__ StoreFloat32(value, operand); \
__ StoreF32(value, operand); \
} while (0)
#define ASSEMBLE_STORE_DOUBLE() \
......@@ -708,7 +708,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
DoubleRegister value = i.InputDoubleRegister(index); \
__ StoreDouble(value, operand); \
__ StoreF64(value, operand); \
} while (0)
#define ASSEMBLE_STORE_INTEGER(asm_instr) \
......@@ -1346,7 +1346,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
// Put the return address in a stack slot.
__ larl(r0, &return_location);
__ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
__ StoreU64(r0,
MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
}
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
......@@ -1505,7 +1506,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadF32(i.OutputFloatRegister(), MemOperand(fp, offset));
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
__ LoadSimd128(i.OutputSimd128Register(), MemOperand(fp, offset),
__ LoadV128(i.OutputSimd128Register(), MemOperand(fp, offset),
kScratchReg);
}
} else {
......@@ -2001,18 +2002,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
switch (op->representation()) {
case MachineRepresentation::kFloat32:
__ lay(sp, MemOperand(sp, -kSystemPointerSize));
__ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
__ StoreF32(i.InputDoubleRegister(0), MemOperand(sp));
break;
case MachineRepresentation::kFloat64:
__ lay(sp, MemOperand(sp, -kDoubleSize));
__ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
__ StoreF64(i.InputDoubleRegister(0), MemOperand(sp));
frame_access_state()->IncreaseSPDelta(kDoubleSize /
kSystemPointerSize);
break;
case MachineRepresentation::kSimd128: {
__ lay(sp, MemOperand(sp, -kSimd128Size));
__ StoreSimd128(i.InputDoubleRegister(0), MemOperand(sp),
kScratchReg);
__ StoreV128(i.InputDoubleRegister(0), MemOperand(sp), kScratchReg);
frame_access_state()->IncreaseSPDelta(kSimd128Size /
kSystemPointerSize);
break;
......@@ -2032,13 +2032,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
__ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
__ StoreF64(i.InputDoubleRegister(0), MemOperand(sp));
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
__ StoreF32(i.InputDoubleRegister(0), MemOperand(sp));
}
} else {
__ StoreP(i.InputRegister(0), MemOperand(sp));
__ StoreU64(i.InputRegister(0), MemOperand(sp));
}
break;
}
......@@ -2047,19 +2047,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
__ StoreDouble(i.InputDoubleRegister(0),
__ StoreF64(i.InputDoubleRegister(0),
MemOperand(sp, slot * kSystemPointerSize));
} else if (op->representation() == MachineRepresentation::kFloat32) {
__ StoreFloat32(i.InputDoubleRegister(0),
__ StoreF32(i.InputDoubleRegister(0),
MemOperand(sp, slot * kSystemPointerSize));
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
__ StoreSimd128(i.InputDoubleRegister(0),
MemOperand(sp, slot * kSystemPointerSize),
kScratchReg);
__ StoreV128(i.InputDoubleRegister(0),
MemOperand(sp, slot * kSystemPointerSize), kScratchReg);
}
} else {
__ StoreP(i.InputRegister(0),
__ StoreU64(i.InputRegister(0),
MemOperand(sp, slot * kSystemPointerSize));
}
break;
......@@ -2230,8 +2229,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_UNARY_OP(D_DInstr(ledbr), nullInstr, nullInstr);
break;
case kS390_Float32ToDouble:
ASSEMBLE_UNARY_OP(D_DInstr(ldebr), D_MTInstr(LoadFloat32ToDouble),
nullInstr);
ASSEMBLE_UNARY_OP(D_DInstr(ldebr), D_MTInstr(LoadF32AsF64), nullInstr);
break;
case kS390_DoubleExtractLowWord32:
__ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
......@@ -2367,17 +2365,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_StoreWord8:
ASSEMBLE_STORE_INTEGER(StoreByte);
ASSEMBLE_STORE_INTEGER(StoreU8);
break;
case kS390_StoreWord16:
ASSEMBLE_STORE_INTEGER(StoreHalfWord);
ASSEMBLE_STORE_INTEGER(StoreU16);
break;
case kS390_StoreWord32:
ASSEMBLE_STORE_INTEGER(StoreW);
ASSEMBLE_STORE_INTEGER(StoreU32);
break;
#if V8_TARGET_ARCH_S390X
case kS390_StoreWord64:
ASSEMBLE_STORE_INTEGER(StoreP);
ASSEMBLE_STORE_INTEGER(StoreU64);
break;
#endif
case kS390_StoreReverse16:
......@@ -4575,7 +4573,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsRegister()) {
__ Move(g.ToRegister(destination), src);
} else {
__ StoreP(src, g.ToMemOperand(destination));
__ StoreU64(src, g.ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
......@@ -4585,7 +4583,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
Register temp = kScratchReg;
__ LoadP(temp, src, r0);
__ StoreP(temp, g.ToMemOperand(destination));
__ StoreU64(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
......@@ -4653,7 +4651,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
}
if (destination->IsStackSlot()) {
__ StoreP(dst, g.ToMemOperand(destination), r0);
__ StoreU64(dst, g.ToMemOperand(destination), r0);
}
} else {
DoubleRegister dst = destination->IsFPRegister()
......@@ -4669,9 +4667,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
if (destination->IsFloatStackSlot()) {
__ StoreFloat32(dst, g.ToMemOperand(destination));
__ StoreF32(dst, g.ToMemOperand(destination));
} else if (destination->IsDoubleStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination));
__ StoreF64(dst, g.ToMemOperand(destination));
}
}
} else if (source->IsFPRegister()) {
......@@ -4682,8 +4680,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Condition(0), Condition(0), Condition(0));
} else {
DCHECK(destination->IsSimd128StackSlot());
__ StoreSimd128(g.ToSimd128Register(source),
g.ToMemOperand(destination), kScratchReg);
__ StoreV128(g.ToSimd128Register(source), g.ToMemOperand(destination),
kScratchReg);
}
} else {
DoubleRegister src = g.ToDoubleRegister(source);
......@@ -4694,9 +4692,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK(destination->IsFPStackSlot());
LocationOperand* op = LocationOperand::cast(source);
if (op->representation() == MachineRepresentation::kFloat64) {
__ StoreDouble(src, g.ToMemOperand(destination));
__ StoreF64(src, g.ToMemOperand(destination));
} else {
__ StoreFloat32(src, g.ToMemOperand(destination));
__ StoreF32(src, g.ToMemOperand(destination));
}
}
}
......@@ -4711,7 +4709,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ LoadF32(g.ToDoubleRegister(destination), src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
__ LoadSimd128(g.ToSimd128Register(destination), g.ToMemOperand(source),
__ LoadV128(g.ToSimd128Register(destination), g.ToMemOperand(source),
kScratchReg);
}
} else {
......@@ -4719,14 +4717,14 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister temp = kScratchDoubleReg;
if (op->representation() == MachineRepresentation::kFloat64) {
__ LoadF64(temp, src);
__ StoreDouble(temp, g.ToMemOperand(destination));
__ StoreF64(temp, g.ToMemOperand(destination));
} else if (op->representation() == MachineRepresentation::kFloat32) {
__ LoadF32(temp, src);
__ StoreFloat32(temp, g.ToMemOperand(destination));
__ StoreF32(temp, g.ToMemOperand(destination));
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
__ LoadSimd128(kScratchDoubleReg, g.ToMemOperand(source), kScratchReg);
__ StoreSimd128(kScratchDoubleReg, g.ToMemOperand(destination),
__ LoadV128(kScratchDoubleReg, g.ToMemOperand(source), kScratchReg);
__ StoreV128(kScratchDoubleReg, g.ToMemOperand(destination),
kScratchReg);
}
}
......
......@@ -160,7 +160,7 @@ void RegExpMacroAssemblerS390::AdvanceRegister(int reg, int by) {
__ LoadP(r2, register_location(reg), r0);
__ mov(r0, Operand(by));
__ AddRR(r2, r0);
__ StoreP(r2, register_location(reg));
__ StoreU64(r2, register_location(reg));
}
}
}
......@@ -171,7 +171,7 @@ void RegExpMacroAssemblerS390::Backtrack() {
Label next;
__ LoadP(r2, MemOperand(frame_pointer(), kBacktrackCount), r0);
__ AddP(r2, r2, Operand(1));
__ StoreP(r2, MemOperand(frame_pointer(), kBacktrackCount), r0);
__ StoreU64(r2, MemOperand(frame_pointer(), kBacktrackCount), r0);
__ CmpLogicalP(r2, Operand(backtrack_limit()));
__ bne(&next);
......@@ -733,7 +733,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
__ StoreP(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
__ StoreU64(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
......@@ -761,12 +761,12 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ Load(r4, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
__ StoreP(r1, MemOperand(r3, -kSystemPointerSize));
__ StoreU64(r1, MemOperand(r3, -kSystemPointerSize));
__ lay(r3, MemOperand(r3, -kSystemPointerSize));
__ BranchOnCount(r4, &init_loop);
} else {
for (int i = 0; i < num_saved_registers_; i++) {
__ StoreP(r1, register_location(i));
__ StoreU64(r1, register_location(i));
}
}
}
......@@ -814,13 +814,14 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ AddP(r4, r0);
__ AddP(r5, r0);
__ AddP(r6, r0);
__ StoreW(r3,
MemOperand(r2, -(num_saved_registers_ - i - 3) * kIntSize));
__ StoreW(r4,
MemOperand(r2, -(num_saved_registers_ - i - 2) * kIntSize));
__ StoreW(r5,
MemOperand(r2, -(num_saved_registers_ - i - 1) * kIntSize));
__ StoreW(r6, MemOperand(r2, -(num_saved_registers_ - i) * kIntSize));
__ StoreU32(
r3, MemOperand(r2, -(num_saved_registers_ - i - 3) * kIntSize));
__ StoreU32(
r4, MemOperand(r2, -(num_saved_registers_ - i - 2) * kIntSize));
__ StoreU32(
r5, MemOperand(r2, -(num_saved_registers_ - i - 1) * kIntSize));
__ StoreU32(r6,
MemOperand(r2, -(num_saved_registers_ - i) * kIntSize));
i += 4;
} else {
__ LoadMultipleP(r3, r4, register_location(i + 1));
......@@ -830,9 +831,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
}
__ AddP(r3, r0);
__ AddP(r4, r0);
__ StoreW(r3,
MemOperand(r2, -(num_saved_registers_ - i - 1) * kIntSize));
__ StoreW(r4, MemOperand(r2, -(num_saved_registers_ - i) * kIntSize));
__ StoreU32(
r3, MemOperand(r2, -(num_saved_registers_ - i - 1) * kIntSize));
__ StoreU32(r4,
MemOperand(r2, -(num_saved_registers_ - i) * kIntSize));
i += 2;
}
}
......@@ -849,7 +851,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ LoadP(r4, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ AddP(r2, Operand(1));
__ StoreP(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
__ StoreU64(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ SubP(r3, Operand(num_saved_registers_));
......@@ -857,10 +859,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ CmpP(r3, Operand(num_saved_registers_));
__ blt(&return_r2);
__ StoreP(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
__ StoreU64(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ AddP(r4, Operand(num_saved_registers_ * kIntSize));
__ StoreP(r4, MemOperand(frame_pointer(), kRegisterOutput));
__ StoreU64(r4, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r2 to initialize registers with its value in the next run.
__ LoadP(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
......@@ -1005,7 +1007,7 @@ void RegExpMacroAssemblerS390::PopCurrentPosition() {
void RegExpMacroAssemblerS390::PopRegister(int register_index) {
Pop(r2);
__ StoreP(r2, register_location(register_index));
__ StoreU64(r2, register_location(register_index));
}
void RegExpMacroAssemblerS390::PushBacktrack(Label* label) {
......@@ -1055,7 +1057,7 @@ void RegExpMacroAssemblerS390::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerS390::SetRegister(int register_index, int to) {
DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
__ mov(r2, Operand(to));
__ StoreP(r2, register_location(register_index));
__ StoreU64(r2, register_location(register_index));
}
bool RegExpMacroAssemblerS390::Succeed() {
......@@ -1066,10 +1068,10 @@ bool RegExpMacroAssemblerS390::Succeed() {
void RegExpMacroAssemblerS390::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
if (cp_offset == 0) {
__ StoreP(current_input_offset(), register_location(reg));
__ StoreU64(current_input_offset(), register_location(reg));
} else {
__ AddP(r2, current_input_offset(), Operand(cp_offset * char_size()));
__ StoreP(r2, register_location(reg));
__ StoreU64(r2, register_location(reg));
}
}
......@@ -1077,14 +1079,14 @@ void RegExpMacroAssemblerS390::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
__ LoadP(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ StoreP(r2, register_location(reg));
__ StoreU64(r2, register_location(reg));
}
}
void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
__ LoadP(r3, MemOperand(frame_pointer(), kStackHighEnd));
__ SubP(r2, backtrack_stackpointer(), r3);
__ StoreP(r2, register_location(reg));
__ StoreU64(r2, register_location(reg));
}
// Private methods:
......@@ -1216,7 +1218,7 @@ void RegExpMacroAssemblerS390::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
__ lay(backtrack_stackpointer(),
MemOperand(backtrack_stackpointer(), -kSystemPointerSize));
__ StoreP(source, MemOperand(backtrack_stackpointer()));
__ StoreU64(source, MemOperand(backtrack_stackpointer()));
}
void RegExpMacroAssemblerS390::Pop(Register target) {
......@@ -1250,7 +1252,7 @@ void RegExpMacroAssemblerS390::CallCFunctionUsingStub(
__ mov(code_pointer(), Operand(function));
Label ret;
__ larl(r14, &ret);
__ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
__ StoreU64(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
__ b(code_pointer());
__ bind(&ret);
if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
......
......@@ -295,11 +295,11 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// instructions per slot.
uint32_t remainder = size;
for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
StoreP(r0, liftoff::GetStackSlot(start + remainder));
StoreU64(r0, liftoff::GetStackSlot(start + remainder));
}
DCHECK(remainder == 4 || remainder == 0);
if (remainder) {
StoreW(r0, liftoff::GetStackSlot(start + remainder));
StoreU32(r0, liftoff::GetStackSlot(start + remainder));
}
} else {
// General case for bigger counts (9 instructions).
......@@ -311,7 +311,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
Label loop;
bind(&loop);
StoreP(r0, MemOperand(r0));
StoreU64(r0, MemOperand(r0));
la(r0, MemOperand(r0, kSystemPointerSize));
CmpLogicalP(r3, r4);
bne(&loop);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment