Commit 3745599a authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

s390x: cleanup 32/64 portablility macros

Change-Id: I59c905182294dc4e8fb8caf03f10ea66d332e034
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2586153Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#71724}
parent ddbda0ee
......@@ -396,7 +396,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&loop);
__ SubP(r8, r8, Operand(1));
__ blt(&done_loop);
__ ShiftLeftP(r1, r8, Operand(kTaggedSizeLog2));
__ ShiftLeftU64(r1, r8, Operand(kTaggedSizeLog2));
__ la(scratch, MemOperand(r4, r1));
__ LoadAnyTaggedField(scratch,
FieldMemOperand(scratch, FixedArray::kHeaderSize));
......@@ -768,7 +768,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r9: scratch reg to hold index into argv
Label argLoop, argExit;
__ ShiftLeftP(r9, r2, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r9, r2, Operand(kSystemPointerSizeLog2));
__ lay(r9, MemOperand(r6, r9, -kSystemPointerSize)); // point to last arg
__ ltgr(r7, r2);
......@@ -864,8 +864,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Compute the size of the actual parameters + receiver (in bytes).
__ LoadP(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ ShiftLeftP(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
__ AddP(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
// If actual is bigger than formal, then we should use it to free up the stack
......@@ -1045,7 +1045,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftP(scratch3, bytecode, Operand(2));
__ ShiftLeftU64(scratch3, bytecode, Operand(2));
__ LoadU32(scratch3, MemOperand(bytecode_size_table, scratch3));
__ AddP(bytecode_offset, bytecode_offset, scratch3);
......@@ -1172,7 +1172,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ ShiftRightP(r4, r4, Operand(kSystemPointerSizeLog2));
__ ShiftRightU64(r4, r4, Operand(kSystemPointerSizeLog2));
__ LoadAndTestP(r4, r4);
__ beq(&no_args);
__ mov(r1, r4);
......@@ -1191,7 +1191,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ CmpP(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
__ ShiftLeftP(r8, r8, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r8, r8, Operand(kSystemPointerSizeLog2));
__ StoreU64(r5, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
......@@ -1215,7 +1215,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadU8(r5, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r5, r5, Operand(kSystemPointerSizeLog2));
__ LoadP(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, r5));
__ Call(kJavaScriptCallCodeStartRegister);
......@@ -1307,7 +1307,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register start_address,
Register scratch) {
__ SubP(scratch, num_args, Operand(1));
__ ShiftLeftP(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ SubP(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, r1, scratch,
......@@ -1508,7 +1508,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Register scratch = temps.Acquire();
__ LoadU8(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftP(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ LoadP(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
......@@ -1597,7 +1597,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// from LAZY is always the last argument.
__ AddP(r2, r2,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
__ StoreU64(scratch, MemOperand(sp, r1));
// Recover arguments count.
__ SubP(r2, r2,
......@@ -1714,7 +1714,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreU64(r7, MemOperand(sp));
}
......@@ -1800,7 +1800,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreU64(r7, MemOperand(sp));
}
......@@ -1851,7 +1851,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadP(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreU64(r6, MemOperand(sp));
}
......@@ -1958,7 +1958,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label copy, check;
Register num = ip, src = r8, dest = r7;
__ mov(src, sp);
__ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, r6, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, r1);
// Update stack pointer.
__ mov(dest, sp);
......@@ -2090,7 +2090,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ AddP(r6, r6,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
kSystemPointerSize));
__ ShiftLeftP(scratch, r4, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(scratch, r4, Operand(kSystemPointerSizeLog2));
__ AddP(r6, r6, scratch);
// Move the arguments already in the stack,
......@@ -2101,7 +2101,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
dest = r4; // r7 and r10 are context and root.
__ mov(src, sp);
// Update stack pointer.
__ ShiftLeftP(scratch, r7, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(scratch, r7, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, scratch);
__ mov(dest, sp);
__ ltgr(num, r2);
......@@ -2125,7 +2125,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&loop);
{
__ SubP(r7, r7, Operand(1));
__ ShiftLeftP(r1, r7, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, r7, Operand(kSystemPointerSizeLog2));
__ LoadP(scratch, MemOperand(r6, r1));
__ StoreU64(scratch, MemOperand(r4, r1));
__ CmpP(r7, Operand::Zero());
......@@ -2274,7 +2274,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
__ ShiftLeftP(r9, r6, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r9, r6, Operand(kSystemPointerSizeLog2));
__ SubP(r1, sp, r9);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
......@@ -2302,7 +2302,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ SubP(r1, r6, Operand(1));
__ ShiftLeftP(r1, r1, Operand(kTaggedSizeLog2));
__ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
__ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0);
__ Push(scratch);
__ SubP(r6, r6, Operand(1));
......@@ -2532,11 +2532,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
__ ShiftLeftP(r2, r4, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r2, r4, Operand(kSystemPointerSizeLog2));
__ AddP(r2, fp);
// adjust for return address and receiver
__ AddP(r2, r2, Operand(2 * kSystemPointerSize));
__ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r6, r4, Operand(kSystemPointerSizeLog2));
__ SubP(r6, r2, r6);
// Copy the arguments (including the receiver) to the new stack frame.
......@@ -2571,7 +2571,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ LoadRoot(r7, RootIndex::kUndefinedValue);
__ SmiUntag(r1, r2);
__ SubP(r8, r4, r1);
__ ShiftLeftP(r1, r8, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, r8, Operand(kSystemPointerSizeLog2));
__ SubP(r6, fp, r1);
// Adjust for frame.
__ SubP(r6, r6,
......@@ -2726,7 +2726,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(r3, r4);
} else {
// Compute the argv pointer.
__ ShiftLeftP(r3, r2, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r3, r2, Operand(kSystemPointerSizeLog2));
__ lay(r3, MemOperand(r3, sp, -kSystemPointerSize));
}
......@@ -2933,7 +2933,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ ble(&only_low, Label::kNear);
// 21 <= exponent <= 51, shift scratch_low and scratch_high
// to generate the result.
__ ShiftRight(scratch_low, scratch_low, scratch);
__ ShiftRightU32(scratch_low, scratch_low, scratch);
// Scratch contains: 52 - exponent.
// We needs: exponent - 20.
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
......@@ -2943,9 +2943,9 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// Set the implicit 1 before the mantissa part in scratch_high.
STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
__ mov(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
__ ShiftLeftP(r0, r0, Operand(16));
__ ShiftLeftU64(r0, r0, Operand(16));
__ OrP(result_reg, result_reg, r0);
__ ShiftLeft(r0, result_reg, scratch);
__ ShiftLeftU32(r0, result_reg, scratch);
__ OrP(result_reg, scratch_low, r0);
__ b(&negate, Label::kNear);
......@@ -2956,8 +2956,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ bind(&only_low);
// 52 <= exponent <= 83, shift only scratch_low.
// On entry, scratch contains: 52 - exponent.
__ LoadComplementRR(scratch, scratch);
__ ShiftLeft(result_reg, scratch_low, scratch);
__ lcgr(scratch, scratch);
__ ShiftLeftU32(result_reg, scratch_low, scratch);
__ bind(&negate);
// If input was positive, scratch_high ASR 31 equals 0 and
......@@ -2966,13 +2966,13 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// If the input was negative, we have to negate the result.
// Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
// New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
__ ShiftRightArith(r0, scratch_high, Operand(31));
__ ShiftRightS32(r0, scratch_high, Operand(31));
#if V8_TARGET_ARCH_S390X
__ lgfr(r0, r0);
__ ShiftRightP(r0, r0, Operand(32));
__ ShiftRightU64(r0, r0, Operand(32));
#endif
__ XorP(result_reg, r0);
__ ShiftRight(r0, scratch_high, Operand(31));
__ ShiftRightU32(r0, scratch_high, Operand(31));
__ AddP(result_reg, r0);
__ bind(&done);
......@@ -3213,7 +3213,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// from the API function here.
__ mov(scratch,
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
__ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, argc, Operand(kSystemPointerSizeLog2));
__ AddP(scratch, r1);
__ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
kSystemPointerSize));
......@@ -3497,7 +3497,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// r3 = one past the last FrameDescription**.
__ LoadU32(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
__ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
__ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r3, r3, Operand(kSystemPointerSizeLog2));
__ AddP(r3, r6, r3);
__ b(&outer_loop_header, Label::kNear);
......
......@@ -533,7 +533,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#define DECLARE_S390_RS_SHIFT_FORMAT(name, opcode) \
void name(Register r1, Register r2, const Operand& opnd = Operand::Zero()) { \
DCHECK(r2 != r0); \
rs_format(opcode, r1.code(), r0.code(), r2.code(), opnd.immediate()); \
} \
void name(Register r1, const Operand& opnd) { \
......
......@@ -437,7 +437,7 @@ void TurboAssembler::Drop(int count) {
}
void TurboAssembler::Drop(Register count, Register scratch) {
ShiftLeftP(scratch, count, Operand(kSystemPointerSizeLog2));
ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
AddP(sp, sp, scratch);
}
......@@ -546,7 +546,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
Label loop, done;
if (order == kNormal) {
ShiftLeftP(scratch, size, Operand(kSystemPointerSizeLog2));
ShiftLeftU64(scratch, size, Operand(kSystemPointerSizeLog2));
lay(scratch, MemOperand(array, scratch));
bind(&loop);
CmpP(array, scratch);
......@@ -558,7 +558,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
bind(&done);
} else {
DCHECK_NE(scratch2, r0);
ShiftLeftP(scratch, size, Operand(kSystemPointerSizeLog2));
ShiftLeftU64(scratch, size, Operand(kSystemPointerSizeLog2));
lay(scratch, MemOperand(array, scratch));
mov(scratch2, array);
bind(&loop);
......@@ -1405,8 +1405,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count.is_valid()) {
if (!argument_count_is_length) {
ShiftLeftP(argument_count, argument_count,
Operand(kSystemPointerSizeLog2));
ShiftLeftU64(argument_count, argument_count,
Operand(kSystemPointerSizeLog2));
}
la(sp, MemOperand(sp, argument_count));
}
......@@ -1429,14 +1429,14 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
// after we drop current frame. We AddP kSystemPointerSize to count the
// receiver argument which is not included into formal parameters count.
Register dst_reg = scratch0;
ShiftLeftP(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
ShiftLeftU64(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
AddP(dst_reg, fp, dst_reg);
AddP(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
Register src_reg = caller_args_count;
// Calculate the end of source area. +kSystemPointerSize is for the receiver.
ShiftLeftP(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
ShiftLeftU64(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
AddP(src_reg, sp, src_reg);
AddP(src_reg, src_reg, Operand(kSystemPointerSize));
......@@ -1494,7 +1494,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
// here which will cause scratch to become negative.
SubP(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
ShiftLeftP(r0, num_args, Operand(kSystemPointerSizeLog2));
ShiftLeftU64(r0, num_args, Operand(kSystemPointerSizeLog2));
CmpP(scratch, r0);
ble(stack_overflow); // Signed comparison.
}
......@@ -1534,8 +1534,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register num = r7, src = r8, dest = ip; // r7 and r8 are context and root.
mov(src, sp);
// Update stack pointer.
ShiftLeftP(scratch, expected_parameter_count,
Operand(kSystemPointerSizeLog2));
ShiftLeftU64(scratch, expected_parameter_count,
Operand(kSystemPointerSizeLog2));
SubP(sp, sp, scratch);
mov(dest, sp);
ltgr(num, actual_parameter_count);
......@@ -2706,7 +2706,7 @@ void TurboAssembler::Add32(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
if (dst != src) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
AddPImm_RRI(dst, src, opnd);
aghik(dst, src, opnd);
return;
}
mov(dst, src);
......@@ -2718,7 +2718,7 @@ void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::Add32(Register dst, Register src) { ar(dst, src); }
// Add Pointer Size (Register dst = Register dst + Register src)
void TurboAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
void TurboAssembler::AddP(Register dst, Register src) { agr(dst, src); }
// Add Pointer Size with src extension
// (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
......@@ -2755,7 +2755,7 @@ void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
AddP_RRR(dst, src1, src2);
agrk(dst, src1, src2);
return;
} else {
mov(dst, src1);
......@@ -2763,7 +2763,7 @@ void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
} else if (dst == src2) {
src2 = src1;
}
AddRR(dst, src2);
agr(dst, src2);
}
// Add Pointer Size with src extension
......@@ -2937,7 +2937,7 @@ void TurboAssembler::SubP(Register dst, Register src, const Operand& imm) {
void TurboAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
// Subtract Pointer Size (Register dst = Register dst - Register src)
void TurboAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
void TurboAssembler::SubP(Register dst, Register src) { sgr(dst, src); }
// Subtract Pointer Size with src extension
// (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
......@@ -2975,14 +2975,14 @@ void TurboAssembler::Sub32(Register dst, Register src1, Register src2) {
void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
SubP_RRR(dst, src1, src2);
sgrk(dst, src1, src2);
return;
}
if (dst != src1 && dst != src2) mov(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
if (dst != src1 && dst == src2) {
Label done;
LoadComplementRR(dst, dst); // dst = -dst
lcgr(dst, dst); // dst = -dst
b(overflow, &done);
AddP(dst, src1); // dst = dst + src
bind(&done);
......@@ -3003,7 +3003,7 @@ void TurboAssembler::SubP_ExtendSrc(Register dst, Register src1,
// In scenario where we have dst = src - dst, we need to swap and negate
if (dst != src1 && dst == src2) {
lgfr(dst, dst); // Sign extend this operand first.
LoadComplementRR(dst, dst); // dst = -dst
lcgr(dst, dst); // dst = -dst
AddP(dst, src1); // dst = -dst + src
} else {
sgfr(dst, src2);
......@@ -3108,7 +3108,7 @@ void TurboAssembler::SubLogicalP_ExtendSrc(Register dst,
void TurboAssembler::And(Register dst, Register src) { nr(dst, src); }
// AND Pointer Size - dst = dst & src
void TurboAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
void TurboAssembler::AndP(Register dst, Register src) { ngr(dst, src); }
// Non-clobbering AND 32-bit - dst = src1 & src1
void TurboAssembler::And(Register dst, Register src1, Register src2) {
......@@ -3133,7 +3133,7 @@ void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
AndP_RRR(dst, src1, src2);
ngrk(dst, src1, src2);
return;
} else {
mov(dst, src1);
......@@ -3231,7 +3231,7 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); }
// OR Pointer Size - dst = dst & src
void TurboAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
void TurboAssembler::OrP(Register dst, Register src) { ogr(dst, src); }
// Non-clobbering OR 32-bit - dst = src1 & src1
void TurboAssembler::Or(Register dst, Register src1, Register src2) {
......@@ -3256,7 +3256,7 @@ void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
OrP_RRR(dst, src1, src2);
ogrk(dst, src1, src2);
return;
} else {
mov(dst, src1);
......@@ -3319,7 +3319,7 @@ void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) {
void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); }
// XOR Pointer Size - dst = dst & src
void TurboAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
void TurboAssembler::XorP(Register dst, Register src) { xgr(dst, src); }
// Non-clobbering XOR 32-bit - dst = src1 & src1
void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
......@@ -3344,7 +3344,7 @@ void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
XorP_RRR(dst, src1, src2);
xgrk(dst, src1, src2);
return;
} else {
mov(dst, src1);
......@@ -4199,108 +4199,100 @@ void TurboAssembler::StoreU8(Register src, const MemOperand& mem,
}
// Shift left logical for 32-bit integer types.
void TurboAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
if (dst == src) {
sll(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
sllk(dst, src, val);
} else {
lr(dst, src);
sll(dst, val);
}
void TurboAssembler::ShiftLeftU32(Register dst, Register src,
const Operand& val) {
ShiftLeftU32(dst, src, r0, val);
}
// Shift left logical for 32-bit integer types.
void TurboAssembler::ShiftLeft(Register dst, Register src, Register val) {
void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register val,
const Operand& val2) {
if (dst == src) {
sll(dst, val);
sll(dst, val, val2);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
sllk(dst, src, val);
sllk(dst, src, val, val2);
} else {
DCHECK(dst != val); // The lr/sll path clobbers val.
DCHECK(dst != val || val == r0); // The lr/sll path clobbers val.
lr(dst, src);
sll(dst, val);
sll(dst, val, val2);
}
}
// Shift left logical for 32-bit integer types.
void TurboAssembler::ShiftLeftU64(Register dst, Register src,
const Operand& val) {
ShiftLeftU64(dst, src, r0, val);
}
// Shift left logical for 32-bit integer types.
void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register val,
const Operand& val2) {
sllg(dst, src, val, val2);
}
// Shift right logical for 32-bit integer types.
void TurboAssembler::ShiftRight(Register dst, Register src,
const Operand& val) {
if (dst == src) {
srl(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srlk(dst, src, val);
} else {
lr(dst, src);
srl(dst, val);
}
void TurboAssembler::ShiftRightU32(Register dst, Register src,
const Operand& val) {
ShiftRightU32(dst, src, r0, val);
}
// Shift right logical for 32-bit integer types.
void TurboAssembler::ShiftRight(Register dst, Register src, Register val) {
void TurboAssembler::ShiftRightU32(Register dst, Register src, Register val,
const Operand& val2) {
if (dst == src) {
srl(dst, val);
srl(dst, val, val2);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srlk(dst, src, val);
srlk(dst, src, val, val2);
} else {
DCHECK(dst != val); // The lr/srl path clobbers val.
DCHECK(dst != val || val == r0); // The lr/srl path clobbers val.
lr(dst, src);
srl(dst, val);
srl(dst, val, val2);
}
}
// Shift left arithmetic for 32-bit integer types.
void TurboAssembler::ShiftLeftArith(Register dst, Register src,
const Operand& val) {
if (dst == src) {
sla(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
slak(dst, src, val);
} else {
lr(dst, src);
sla(dst, val);
}
void TurboAssembler::ShiftRightU64(Register dst, Register src, Register val,
const Operand& val2) {
srlg(dst, src, val, val2);
}
// Shift left arithmetic for 32-bit integer types.
void TurboAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
if (dst == src) {
sla(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
slak(dst, src, val);
} else {
DCHECK(dst != val); // The lr/sla path clobbers val.
lr(dst, src);
sla(dst, val);
}
// Shift right logical for 64-bit integer types.
void TurboAssembler::ShiftRightU64(Register dst, Register src,
const Operand& val) {
ShiftRightU64(dst, src, r0, val);
}
// Shift right arithmetic for 32-bit integer types.
void TurboAssembler::ShiftRightArith(Register dst, Register src,
const Operand& val) {
if (dst == src) {
sra(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srak(dst, src, val);
} else {
lr(dst, src);
sra(dst, val);
}
void TurboAssembler::ShiftRightS32(Register dst, Register src,
const Operand& val) {
ShiftRightS32(dst, src, r0, val);
}
// Shift right arithmetic for 32-bit integer types.
void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
void TurboAssembler::ShiftRightS32(Register dst, Register src, Register val,
const Operand& val2) {
if (dst == src) {
sra(dst, val);
sra(dst, val, val2);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srak(dst, src, val);
srak(dst, src, val, val2);
} else {
DCHECK(dst != val); // The lr/sra path clobbers val.
DCHECK(dst != val || val == r0); // The lr/sra path clobbers val.
lr(dst, src);
sra(dst, val);
sra(dst, val, val2);
}
}
// Shift right arithmetic for 64-bit integer types.
void TurboAssembler::ShiftRightS64(Register dst, Register src,
const Operand& val) {
ShiftRightS64(dst, src, r0, val);
}
// Shift right arithmetic for 64-bit integer types.
void TurboAssembler::ShiftRightS64(Register dst, Register src, Register val,
const Operand& val2) {
srag(dst, src, val, val2);
}
// Clear right most # of bits
void TurboAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) {
......@@ -4334,9 +4326,9 @@ void TurboAssembler::Popcnt32(Register dst, Register src) {
DCHECK(dst != r0);
popcnt(dst, src);
ShiftRight(r0, dst, Operand(16));
ShiftRightU32(r0, dst, Operand(16));
ar(dst, r0);
ShiftRight(r0, dst, Operand(8));
ShiftRightU32(r0, dst, Operand(8));
ar(dst, r0);
llgcr(dst, dst);
}
......@@ -4347,11 +4339,11 @@ void TurboAssembler::Popcnt64(Register dst, Register src) {
DCHECK(dst != r0);
popcnt(dst, src);
ShiftRightP(r0, dst, Operand(32));
ShiftRightU64(r0, dst, Operand(32));
AddP(dst, r0);
ShiftRightP(r0, dst, Operand(16));
ShiftRightU64(r0, dst, Operand(16));
AddP(dst, r0);
ShiftRightP(r0, dst, Operand(8));
ShiftRightU64(r0, dst, Operand(8));
AddP(dst, r0);
LoadU8(dst, dst);
}
......@@ -4509,12 +4501,12 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSmiTag == 0);
// The builtin_index register contains the builtin index as a Smi.
if (SmiValuesAre32Bits()) {
ShiftRightArithP(builtin_index, builtin_index,
Operand(kSmiShift - kSystemPointerSizeLog2));
ShiftRightS64(builtin_index, builtin_index,
Operand(kSmiShift - kSystemPointerSizeLog2));
} else {
DCHECK(SmiValuesAre31Bits());
ShiftLeftP(builtin_index, builtin_index,
Operand(kSystemPointerSizeLog2 - kSmiShift));
ShiftLeftU64(builtin_index, builtin_index,
Operand(kSystemPointerSizeLog2 - kSmiShift));
}
LoadP(builtin_index, MemOperand(kRootRegister, builtin_index,
IsolateData::builtin_entry_table_offset()));
......@@ -4561,7 +4553,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// table.
bind(&if_code_is_off_heap);
LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
ShiftLeftP(destination, scratch, Operand(kSystemPointerSizeLog2));
ShiftLeftU64(destination, scratch, Operand(kSystemPointerSizeLog2));
AddP(destination, destination, kRootRegister);
LoadP(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
......
......@@ -42,86 +42,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
// These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_S390X
// The length of the arithmetic operation is the length
// of the register.
// Length:
// H = halfword
// W = word
// arithmetics and bitwise
#define AddMI agsi
#define AddRR agr
#define SubRR sgr
#define AndRR ngr
#define OrRR ogr
#define XorRR xgr
#define LoadComplementRR lcgr
#define LoadNegativeRR lngr
// Distinct Operands
#define AddP_RRR agrk
#define AddPImm_RRI aghik
#define AddLogicalP_RRR algrk
#define SubP_RRR sgrk
#define SubLogicalP_RRR slgrk
#define AndP_RRR ngrk
#define OrP_RRR ogrk
#define XorP_RRR xgrk
// Load / Store
#define LoadAndTestRR ltgr
// Compare
#define CmpPH cghi
#define CmpLogicalPW clgfi
// Shifts
#define ShiftLeftP sllg
#define ShiftRightP srlg
#define ShiftLeftArithP slag
#define ShiftRightArithP srag
#else
// arithmetics and bitwise
// Reg2Reg
#define AddMI asi
#define AddRR ar
#define SubRR sr
#define AndRR nr
#define OrRR or_z
#define XorRR xr
#define LoadComplementRR lcr
#define LoadNegativeRR lnr
// Distinct Operands
#define AddP_RRR ark
#define AddPImm_RRI ahik
#define AddLogicalP_RRR alrk
#define SubP_RRR srk
#define SubLogicalP_RRR slrk
#define AndP_RRR nrk
#define OrP_RRR ork
#define XorP_RRR xrk
// Load / Store
#define LoadAndTestRR ltr
// Compare
#define CmpPH chi
#define CmpLogicalPW clfi
// Shifts
#define ShiftLeftP ShiftLeft
#define ShiftRightP ShiftRight
#define ShiftLeftArithP ShiftLeftArith
#define ShiftRightArithP ShiftRightArith
#endif
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
......@@ -448,14 +368,24 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchOnCount(Register r1, Label* l);
// Shifts
void ShiftLeft(Register dst, Register src, Register val);
void ShiftLeft(Register dst, Register src, const Operand& val);
void ShiftRight(Register dst, Register src, Register val);
void ShiftRight(Register dst, Register src, const Operand& val);
void ShiftLeftArith(Register dst, Register src, Register shift);
void ShiftLeftArith(Register dst, Register src, const Operand& val);
void ShiftRightArith(Register dst, Register src, Register shift);
void ShiftRightArith(Register dst, Register src, const Operand& val);
void ShiftLeftU32(Register dst, Register src, Register val,
const Operand& val2 = Operand::Zero());
void ShiftLeftU32(Register dst, Register src, const Operand& val);
void ShiftLeftU64(Register dst, Register src, Register val,
const Operand& val2 = Operand::Zero());
void ShiftLeftU64(Register dst, Register src, const Operand& val);
void ShiftRightU32(Register dst, Register src, Register val,
const Operand& val2 = Operand::Zero());
void ShiftRightU32(Register dst, Register src, const Operand& val);
void ShiftRightU64(Register dst, Register src, Register val,
const Operand& val2 = Operand::Zero());
void ShiftRightU64(Register dst, Register src, const Operand& val);
void ShiftRightS32(Register dst, Register src, Register shift,
const Operand& val2 = Operand::Zero());
void ShiftRightS32(Register dst, Register src, const Operand& val);
void ShiftRightS64(Register dst, Register src, Register shift,
const Operand& val2 = Operand::Zero());
void ShiftRightS64(Register dst, Register src, const Operand& val);
void ClearRightImm(Register dst, Register src, const Operand& val);
......@@ -894,7 +824,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Operand(shiftAmount), true);
} else {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
ShiftRightU64(dst, src, Operand(rangeEnd));
else if (dst != src) // If we didn't shift, we might need to copy
mov(dst, src);
int width = rangeStart - rangeEnd + 1;
......@@ -979,9 +909,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre31Bits()) {
ShiftRightArith(dst, src, Operand(kSmiShift));
ShiftRightS32(dst, src, Operand(kSmiShift));
} else {
ShiftRightArithP(dst, src, Operand(kSmiShift));
ShiftRightS64(dst, src, Operand(kSmiShift));
}
lgfr(dst, dst);
}
......@@ -1249,16 +1179,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Shift left by kSmiShift
void SmiTag(Register reg) { SmiTag(reg, reg); }
void SmiTag(Register dst, Register src) {
ShiftLeftP(dst, src, Operand(kSmiShift));
ShiftLeftU64(dst, src, Operand(kSmiShift));
}
void SmiToPtrArrayOffset(Register dst, Register src) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
ShiftLeftP(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
ShiftLeftU64(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
#else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
ShiftRightArithP(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
ShiftRightS64(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
#endif
}
......
......@@ -1569,7 +1569,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_ShiftLeft32:
// zero-ext
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeft), nullInstr, RRIInstr(ShiftLeft));
ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeftU32), nullInstr,
RRIInstr(ShiftLeftU32));
} else {
ASSEMBLE_BIN32_OP(RRInstr(sll), nullInstr, RIInstr(sll));
}
......@@ -1602,7 +1603,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_RotRight32: {
// zero-ext
if (HasRegisterInput(instr, 1)) {
__ LoadComplementRR(kScratchReg, i.InputRegister(1));
__ lcgr(kScratchReg, i.InputRegister(1));
__ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else {
__ rll(i.OutputRegister(), i.InputRegister(0),
......@@ -4308,7 +4309,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ CmpLogicalP(input, Operand(case_count));
__ bge(GetLabel(i.InputRpo(1)));
__ larl(kScratchReg, table);
__ ShiftLeftP(r1, input, Operand(kSystemPointerSizeLog2));
__ ShiftLeftU64(r1, input, Operand(kSystemPointerSizeLog2));
__ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
__ Jump(kScratchReg);
}
......
......@@ -23,13 +23,10 @@ namespace compiler {
V(S390_Xor64) \
V(S390_ShiftLeft32) \
V(S390_ShiftLeft64) \
V(S390_ShiftLeftPair) \
V(S390_ShiftRight32) \
V(S390_ShiftRight64) \
V(S390_ShiftRightPair) \
V(S390_ShiftRightArith32) \
V(S390_ShiftRightArith64) \
V(S390_ShiftRightArithPair) \
V(S390_RotRight32) \
V(S390_RotRight64) \
V(S390_Not32) \
......@@ -40,15 +37,12 @@ namespace compiler {
V(S390_Lay) \
V(S390_Add32) \
V(S390_Add64) \
V(S390_AddPair) \
V(S390_AddFloat) \
V(S390_AddDouble) \
V(S390_Sub32) \
V(S390_Sub64) \
V(S390_SubFloat) \
V(S390_SubDouble) \
V(S390_SubPair) \
V(S390_MulPair) \
V(S390_Mul32) \
V(S390_Mul32WithOverflow) \
V(S390_Mul64) \
......
......@@ -23,13 +23,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Xor64:
case kS390_ShiftLeft32:
case kS390_ShiftLeft64:
case kS390_ShiftLeftPair:
case kS390_ShiftRight32:
case kS390_ShiftRight64:
case kS390_ShiftRightPair:
case kS390_ShiftRightArith32:
case kS390_ShiftRightArith64:
case kS390_ShiftRightArithPair:
case kS390_RotRight32:
case kS390_RotRight64:
case kS390_Not32:
......@@ -40,13 +37,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Lay:
case kS390_Add32:
case kS390_Add64:
case kS390_AddPair:
case kS390_AddFloat:
case kS390_AddDouble:
case kS390_Sub32:
case kS390_Sub64:
case kS390_SubPair:
case kS390_MulPair:
case kS390_SubFloat:
case kS390_SubDouble:
case kS390_Mul32:
......
......@@ -155,11 +155,11 @@ void RegExpMacroAssemblerS390::AdvanceRegister(int reg, int by) {
DCHECK_GT(num_registers_, reg);
if (by != 0) {
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(by)) {
__ AddMI(register_location(reg), Operand(by));
__ agsi(register_location(reg), Operand(by));
} else {
__ LoadP(r2, register_location(reg), r0);
__ mov(r0, Operand(by));
__ AddRR(r2, r0);
__ agr(r2, r0);
__ StoreU64(r2, register_location(reg));
}
}
......@@ -726,7 +726,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ mov(r1, r4);
__ SubP(r1, current_input_offset(), Operand(char_size()));
if (mode_ == UC16) {
__ ShiftLeftP(r0, r3, Operand(1));
__ ShiftLeftU64(r0, r3, Operand(1));
__ SubP(r1, r1, r0);
} else {
__ SubP(r1, r1, r3);
......@@ -789,7 +789,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ SubP(r0, end_of_input_address(), r0);
// r0 is length of input in bytes.
if (mode_ == UC16) {
__ ShiftRightP(r0, r0, Operand(1));
__ ShiftRightU64(r0, r0, Operand(1));
}
// r0 is length of input in characters.
__ AddP(r0, r4);
......@@ -805,10 +805,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// TODO(john.yan): Can be optimized by SIMD instructions
__ LoadMultipleP(r3, r6, register_location(i + 3));
if (mode_ == UC16) {
__ ShiftRightArithP(r3, r3, Operand(1));
__ ShiftRightArithP(r4, r4, Operand(1));
__ ShiftRightArithP(r5, r5, Operand(1));
__ ShiftRightArithP(r6, r6, Operand(1));
__ ShiftRightS64(r3, r3, Operand(1));
__ ShiftRightS64(r4, r4, Operand(1));
__ ShiftRightS64(r5, r5, Operand(1));
__ ShiftRightS64(r6, r6, Operand(1));
}
__ AddP(r3, r0);
__ AddP(r4, r0);
......@@ -826,8 +826,8 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
} else {
__ LoadMultipleP(r3, r4, register_location(i + 1));
if (mode_ == UC16) {
__ ShiftRightArithP(r3, r3, Operand(1));
__ ShiftRightArithP(r4, r4, Operand(1));
__ ShiftRightS64(r3, r3, Operand(1));
__ ShiftRightS64(r4, r4, Operand(1));
}
__ AddP(r3, r0);
__ AddP(r4, r0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment