Commit f9aa377d authored by Vasili Skurydzin's avatar Vasili Skurydzin Committed by Commit Bot

[ptr-compr], [s390x] kPointerSize changed to kSystemPointerSize in src to

prepare for ptr compression

Change-Id: Ia459ad18a30fbfd8e51bd99735f0d63644d30b8f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1831788Reviewed-by: 's avatarJoran Siu <joransiu@ca.ibm.com>
Reviewed-by: 's avatarJunliang Yan <jyan@ca.ibm.com>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#64051}
parent f1bd1b6b
......@@ -103,7 +103,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// here which will cause scratch to become negative.
__ SubP(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2));
__ ShiftLeftP(r0, num_args, Operand(kSystemPointerSizeLog2));
__ CmpP(scratch, r0);
__ ble(stack_overflow); // Signed comparison.
}
......@@ -147,11 +147,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
__ beq(&no_args);
__ ShiftLeftP(scratch, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(scratch, r2, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, scratch);
__ LoadRR(r1, r2);
__ bind(&loop);
__ lay(scratch, MemOperand(scratch, -kPointerSize));
__ lay(scratch, MemOperand(scratch, -kSystemPointerSize));
__ LoadP(r0, MemOperand(scratch, r6));
__ StoreP(r0, MemOperand(scratch, sp));
__ BranchOnCount(r1, &loop);
......@@ -177,7 +177,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(scratch, scratch);
__ AddP(sp, sp, scratch);
__ AddP(sp, sp, Operand(kPointerSize));
__ AddP(sp, sp, Operand(kSystemPointerSize));
__ Ret();
__ bind(&stack_overflow);
......@@ -213,11 +213,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(r5);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
// -- sp[1*kPointerSize]: padding
// -- r3 and sp[2*kPointerSize]: constructor function
// -- sp[3*kPointerSize]: number of arguments (tagged)
// -- sp[4*kPointerSize]: context
// -- sp[0*kSystemPointerSize]: new target
// -- sp[1*kSystemPointerSize]: padding
// -- r3 and sp[2*kSystemPointerSize]: constructor function
// -- sp[3*kSystemPointerSize]: number of arguments (tagged)
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
......@@ -239,11 +239,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2: receiver
// -- Slot 4 / sp[0*kPointerSize]: new target
// -- Slot 3 / sp[1*kPointerSize]: padding
// -- Slot 2 / sp[2*kPointerSize]: constructor function
// -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
// -- Slot 0 / sp[4*kPointerSize]: context
// -- Slot 4 / sp[0*kSystemPointerSize]: new target
// -- Slot 3 / sp[1*kSystemPointerSize]: padding
// -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
// -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
// -- Slot 0 / sp[4*kSystemPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
......@@ -259,12 +259,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r5: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
// -- sp[2*kPointerSize]: padding
// -- sp[3*kPointerSize]: constructor function
// -- sp[4*kPointerSize]: number of arguments (tagged)
// -- sp[5*kPointerSize]: context
// -- sp[0*kSystemPointerSize]: implicit receiver
// -- sp[1*kSystemPointerSize]: implicit receiver
// -- sp[2*kSystemPointerSize]: padding
// -- sp[3*kSystemPointerSize]: constructor function
// -- sp[4*kSystemPointerSize]: number of arguments (tagged)
// -- sp[5*kSystemPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
......@@ -295,21 +295,21 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- r5: new target
// -- r6: pointer to last argument
// -- cr0: condition indicating whether r2 is zero
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
// -- sp[2*kPointerSize]: padding
// -- r3 and sp[3*kPointerSize]: constructor function
// -- sp[4*kPointerSize]: number of arguments (tagged)
// -- sp[5*kPointerSize]: context
// -- sp[0*kSystemPointerSize]: implicit receiver
// -- sp[1*kSystemPointerSize]: implicit receiver
// -- sp[2*kSystemPointerSize]: padding
// -- r3 and sp[3*kSystemPointerSize]: constructor function
// -- sp[4*kSystemPointerSize]: number of arguments (tagged)
// -- sp[5*kSystemPointerSize]: context
// -----------------------------------
__ ltgr(r2, r2);
__ beq(&no_args);
__ ShiftLeftP(r8, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(r8, r2, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, r8);
__ LoadRR(r1, r2);
__ bind(&loop);
__ lay(r8, MemOperand(r8, -kPointerSize));
__ lay(r8, MemOperand(r8, -kSystemPointerSize));
__ LoadP(r0, MemOperand(r8, r6));
__ StoreP(r0, MemOperand(r8, sp));
__ BranchOnCount(r1, &loop);
......@@ -321,11 +321,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0: constructor result
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: padding
// -- sp[2*kPointerSize]: constructor function
// -- sp[3*kPointerSize]: number of arguments
// -- sp[4*kPointerSize]: context
// -- sp[0*kSystemPointerSize]: implicit receiver
// -- sp[1*kSystemPointerSize]: padding
// -- sp[2*kSystemPointerSize]: constructor function
// -- sp[3*kSystemPointerSize]: number of arguments
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
......@@ -376,7 +376,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r3, r3);
__ AddP(sp, sp, r3);
__ AddP(sp, sp, Operand(kPointerSize));
__ AddP(sp, sp, Operand(kSystemPointerSize));
__ Ret();
}
......@@ -465,16 +465,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done_loop;
__ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2));
__ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2));
__ SubP(sp, r5);
// ip = stack offset
// r5 = parameter array offset
__ LoadImmP(ip, Operand::Zero());
__ SubP(r5, Operand(kPointerSize));
__ SubP(r5, Operand(kSystemPointerSize));
__ blt(&done_loop);
__ lgfi(r1, Operand(-kPointerSize));
__ lgfi(r1, Operand(-kSystemPointerSize));
__ bind(&loop);
......@@ -483,7 +483,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreP(r0, MemOperand(sp, ip));
// update offsets
__ lay(ip, MemOperand(ip, kPointerSize));
__ lay(ip, MemOperand(ip, kSystemPointerSize));
__ BranchRelativeOnIdxHighP(r5, r1, &loop);
......@@ -550,9 +550,9 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
namespace {
constexpr int kPushedStackSpace =
(kNumCalleeSaved + 2) * kPointerSize +
kNumCalleeSavedDoubles * kDoubleSize + 5 * kPointerSize +
EntryFrameConstants::kCallerFPOffset - kPointerSize;
(kNumCalleeSaved + 2) * kSystemPointerSize +
kNumCalleeSavedDoubles * kDoubleSize + 5 * kSystemPointerSize +
EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
// Called with the native C calling convention. The corresponding function
// signature is either:
......@@ -607,9 +607,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Requires us to save the callee-preserved registers r6-r13
// General convention is to also save r14 (return addr) and
// sp/r15 as well in a single STM/STMG
__ lay(sp, MemOperand(sp, -10 * kPointerSize));
__ lay(sp, MemOperand(sp, -10 * kSystemPointerSize));
__ StoreMultipleP(r6, sp, MemOperand(sp, 0));
pushed_stack_space += (kNumCalleeSaved + 2) * kPointerSize;
pushed_stack_space += (kNumCalleeSaved + 2) * kSystemPointerSize;
// Initialize the root register.
// C calling convention. The first argument is passed in r2.
......@@ -625,8 +625,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// SMI Marker
// kCEntryFPAddress
// Frame type
__ lay(sp, MemOperand(sp, -5 * kPointerSize));
pushed_stack_space += 5 * kPointerSize;
__ lay(sp, MemOperand(sp, -5 * kSystemPointerSize));
pushed_stack_space += 5 * kSystemPointerSize;
// Push a bad frame pointer to fail if it is used.
__ LoadImmP(r9, Operand(-1));
......@@ -637,16 +637,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Move(r6, ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate()));
__ LoadP(r6, MemOperand(r6));
__ StoreMultipleP(r6, r9, MemOperand(sp, kPointerSize));
__ StoreMultipleP(r6, r9, MemOperand(sp, kSystemPointerSize));
Register scrach = r8;
// Set up frame pointer for the frame to be pushed.
// Need to add kPointerSize, because sp has one extra
// Need to add kSystemPointerSize, because sp has one extra
// frame already for the frame type being pushed later.
__ lay(fp, MemOperand(
sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
pushed_stack_space += EntryFrameConstants::kCallerFPOffset - kPointerSize;
__ lay(fp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset +
kSystemPointerSize));
pushed_stack_space +=
EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
// restore r6
__ LoadRR(r6, r1);
......@@ -736,7 +737,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Reload callee-saved preserved regs, return address reg (r14) and sp
__ LoadMultipleP(r6, sp, MemOperand(sp, 0));
__ la(sp, MemOperand(sp, 10 * kPointerSize));
__ la(sp, MemOperand(sp, 10 * kSystemPointerSize));
// saving floating point registers
#if V8_TARGET_ARCH_S390X
......@@ -790,7 +791,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// here which will cause scratch1 to become negative.
__ SubP(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
__ ShiftLeftP(scratch2, argc, Operand(kPointerSizeLog2));
__ ShiftLeftP(scratch2, argc, Operand(kSystemPointerSizeLog2));
__ CmpP(scratch1, scratch2);
__ bgt(&okay); // Signed comparison.
......@@ -807,7 +808,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r4: function
// r5: receiver
// r6: argc
// [fp + kPushedStackSpace + 20 * kPointerSize]: argv
// [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv
// r0,r2,r7-r9, cp may be clobbered
// Enter an internal frame.
......@@ -831,7 +832,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r3: new.target
// r4: function
// r6: argc
// [fp + kPushedStackSpace + 20 * kPointerSize]: argv
// [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv
// r0,r2,r5,r7-r9, cp may be clobbered
// Setup new.target, argc and function.
......@@ -862,15 +863,15 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r9: scratch reg to hold index into argv
Label argLoop, argExit;
intptr_t zero = 0;
__ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2));
__ SubRR(sp, r7); // Buy the stack frame to fit args
__ LoadImmP(r9, Operand(zero)); // Initialize argv index
__ bind(&argLoop);
__ CmpPH(r7, Operand(zero));
__ beq(&argExit, Label::kNear);
__ lay(r7, MemOperand(r7, -kPointerSize));
__ lay(r7, MemOperand(r7, -kSystemPointerSize));
__ LoadP(r8, MemOperand(r9, r6)); // read next parameter
__ la(r9, MemOperand(r9, kPointerSize)); // r9++;
__ la(r9, MemOperand(r9, kSystemPointerSize)); // r9++;
__ LoadP(r0, MemOperand(r8)); // dereference handle
__ StoreP(r0, MemOperand(r7, sp)); // push parameter
__ b(&argLoop);
......@@ -1217,7 +1218,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ ShiftRightP(r4, r4, Operand(kPointerSizeLog2));
__ ShiftRightP(r4, r4, Operand(kSystemPointerSizeLog2));
__ LoadAndTestP(r4, r4);
__ beq(&no_args);
__ LoadRR(r1, r4);
......@@ -1236,7 +1237,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ CmpP(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
__ ShiftLeftP(r8, r8, Operand(kPointerSizeLog2));
__ ShiftLeftP(r8, r8, Operand(kSystemPointerSizeLog2));
__ StoreP(r5, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
......@@ -1252,7 +1253,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadlB(r5, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2));
__ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2));
__ LoadP(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, r5));
__ Call(kJavaScriptCallCodeStartRegister);
......@@ -1298,11 +1299,11 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Label loop, skip;
__ CmpP(count, Operand::Zero());
__ beq(&skip);
__ AddP(index, index, Operand(kPointerSize)); // Bias up for LoadPU
__ AddP(index, index, Operand(kSystemPointerSize)); // Bias up for LoadPU
__ LoadRR(r0, count);
__ bind(&loop);
__ LoadP(scratch, MemOperand(index, -kPointerSize));
__ lay(index, MemOperand(index, -kPointerSize));
__ LoadP(scratch, MemOperand(index, -kSystemPointerSize));
__ lay(index, MemOperand(index, -kSystemPointerSize));
__ push(scratch);
__ SubP(r0, Operand(1));
__ bne(&loop);
......@@ -1476,7 +1477,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Register scratch = temps.Acquire();
__ LoadlB(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftP(scratch, scratch, Operand(kPointerSizeLog2));
__ ShiftLeftP(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ LoadP(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
......@@ -1542,7 +1543,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
for (int i = j - 1; i >= 0; --i) {
__ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
i * kPointerSize));
i * kSystemPointerSize));
__ push(r6);
}
for (int i = 0; i < 3 - j; ++i) {
......@@ -1591,9 +1592,10 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point.
__ StoreP(
r2, MemOperand(
sp, config->num_allocatable_general_registers() * kPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
r2,
MemOperand(sp, config->num_allocatable_general_registers() *
kSystemPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
......@@ -1709,16 +1711,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ LoadRR(r4, scratch);
__ LoadP(r3, MemOperand(new_sp, 0)); // receiver
__ CmpP(arg_size, Operand(kPointerSize));
__ CmpP(arg_size, Operand(kSystemPointerSize));
__ blt(&skip);
__ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
__ LoadP(scratch, MemOperand(new_sp, 1 * -kSystemPointerSize)); // thisArg
__ beq(&skip);
__ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
__ LoadP(r4, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argArray
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
......@@ -1767,7 +1769,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r2: actual number of arguments
// 2. Get the callable to call (passed as receiver) from the stack.
__ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(r4, r2, Operand(kSystemPointerSizeLog2));
__ LoadP(r3, MemOperand(sp, r4));
// 3. Shift arguments and return address one slot down on the stack
......@@ -1782,9 +1784,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ AddP(r4, sp, r4);
__ bind(&loop);
__ LoadP(scratch, MemOperand(r4, -kPointerSize));
__ LoadP(scratch, MemOperand(r4, -kSystemPointerSize));
__ StoreP(scratch, MemOperand(r4));
__ SubP(r4, Operand(kPointerSize));
__ SubP(r4, Operand(kSystemPointerSize));
__ CmpP(r4, sp);
__ bne(&loop);
// Adjust the actual number of arguments and remove the top element
......@@ -1814,19 +1816,20 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(scratch, r3);
__ LoadRR(r4, r3);
__ CmpP(arg_size, Operand(kPointerSize));
__ CmpP(arg_size, Operand(kSystemPointerSize));
__ blt(&skip);
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
__ LoadP(r3, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target
__ beq(&skip);
__ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ LoadP(scratch,
MemOperand(new_sp, 2 * -kSystemPointerSize)); // thisArgument
__ CmpP(arg_size, Operand(2 * kSystemPointerSize));
__ beq(&skip);
__ LoadP(r4, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
__ LoadP(r4, MemOperand(new_sp, 3 * -kSystemPointerSize)); // argumentsList
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
......@@ -1864,21 +1867,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label skip;
Register arg_size = r7;
Register new_sp = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(r4, r3);
__ LoadRR(r5, r3);
__ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined)
__ CmpP(arg_size, Operand(kPointerSize));
__ CmpP(arg_size, Operand(kSystemPointerSize));
__ blt(&skip);
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
__ LoadP(r3, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target
__ LoadRR(r5, r3); // new.target defaults to target
__ beq(&skip);
__ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ LoadP(r4, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argumentsList
__ CmpP(arg_size, Operand(2 * kSystemPointerSize));
__ beq(&skip);
__ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
__ LoadP(r5, MemOperand(new_sp, 3 * -kSystemPointerSize)); // new.target
__ bind(&skip);
__ LoadRR(sp, new_sp);
}
......@@ -1914,15 +1917,15 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Function
// ArgC as SMI
// Padding <--- New SP
__ lay(sp, MemOperand(sp, -5 * kPointerSize));
__ lay(sp, MemOperand(sp, -5 * kSystemPointerSize));
// Cleanse the top nibble of 31-bit pointers.
__ CleanseP(r14);
__ StoreP(r14, MemOperand(sp, 4 * kPointerSize));
__ StoreP(fp, MemOperand(sp, 3 * kPointerSize));
__ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
__ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
__ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
__ StoreP(r14, MemOperand(sp, 4 * kSystemPointerSize));
__ StoreP(fp, MemOperand(sp, 3 * kSystemPointerSize));
__ StoreP(r6, MemOperand(sp, 2 * kSystemPointerSize));
__ StoreP(r3, MemOperand(sp, 1 * kSystemPointerSize));
__ StoreP(r2, MemOperand(sp, 0 * kSystemPointerSize));
__ Push(Smi::zero()); // Padding.
__ la(fp,
MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
......@@ -1935,7 +1938,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
__ LoadP(r3, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
int stack_adjustment = kPointerSize; // adjust for receiver
int stack_adjustment = kSystemPointerSize; // adjust for receiver
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
__ SmiToPtrArrayOffset(r3, r3);
__ lay(sp, MemOperand(sp, r3));
......@@ -1983,12 +1986,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label loop, no_args, skip;
__ CmpP(r6, Operand::Zero());
__ beq(&no_args);
__ AddP(r4, r4,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ AddP(
r4, r4,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kSystemPointerSize));
__ LoadRR(r1, r6);
__ bind(&loop);
__ LoadP(scratch, MemOperand(r4, kPointerSize));
__ la(r4, MemOperand(r4, kPointerSize));
__ LoadP(scratch, MemOperand(r4, kSystemPointerSize));
__ la(r4, MemOperand(r4, kSystemPointerSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip, Label::kNear);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
......@@ -2072,11 +2076,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
{
Label loop;
__ AddP(r6, r6, Operand(kPointerSize));
__ AddP(r6, r6, Operand(kSystemPointerSize));
__ AddP(r2, r2, r7);
__ bind(&loop);
{
__ ShiftLeftP(scratch, r7, Operand(kPointerSizeLog2));
__ ShiftLeftP(scratch, r7, Operand(kSystemPointerSizeLog2));
__ LoadP(scratch, MemOperand(r6, scratch));
__ push(scratch);
__ SubP(r7, r7, Operand(1));
......@@ -2134,7 +2138,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(r5);
} else {
Label convert_to_object, convert_receiver;
__ ShiftLeftP(r5, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(r5, r2, Operand(kSystemPointerSizeLog2));
__ LoadP(r5, MemOperand(sp, r5));
__ JumpIfSmi(r5, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
......@@ -2171,7 +2175,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ ShiftLeftP(r6, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(r6, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r5, MemOperand(sp, r6));
}
__ bind(&done_convert);
......@@ -2228,7 +2232,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label done;
__ LoadRR(scratch, sp); // preserve previous stack pointer
__ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2));
__ ShiftLeftP(r9, r6, Operand(kSystemPointerSizeLog2));
__ SubP(sp, sp, r9);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
......@@ -2258,7 +2262,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ LoadP(r0, MemOperand(scratch, r7));
__ StoreP(r0, MemOperand(sp, r7));
__ AddP(r7, r7, Operand(kPointerSize));
__ AddP(r7, r7, Operand(kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ bind(&skip);
}
......@@ -2270,10 +2274,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ AddP(r4, r4, r9);
__ LoadRR(r1, r6);
__ bind(&loop);
__ LoadP(r0, MemOperand(r4, -kPointerSize));
__ lay(r4, MemOperand(r4, -kPointerSize));
__ LoadP(r0, MemOperand(r4, -kSystemPointerSize));
__ lay(r4, MemOperand(r4, -kSystemPointerSize));
__ StoreP(r0, MemOperand(sp, r7));
__ AddP(r7, r7, Operand(kPointerSize));
__ AddP(r7, r7, Operand(kSystemPointerSize));
__ BranchOnCount(r1, &loop);
__ AddP(r2, r2, r6);
}
......@@ -2293,7 +2297,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
__ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r5, MemOperand(sp, r1));
// Push the [[BoundArguments]] onto the stack.
......@@ -2335,7 +2339,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
__ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r3, MemOperand(sp, r7));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
......@@ -2449,7 +2453,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
__ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
__ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2));
__ StoreP(r3, MemOperand(sp, r7));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3);
......@@ -2504,8 +2508,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r2, r2);
__ AddP(r2, fp);
// adjust for return address and receiver
__ AddP(r2, r2, Operand(2 * kPointerSize));
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ AddP(r2, r2, Operand(2 * kSystemPointerSize));
__ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2));
__ SubP(r6, r2, r6);
// Copy the arguments (including the receiver) to the new stack frame.
......@@ -2520,7 +2524,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ LoadP(r0, MemOperand(r2, 0));
__ push(r0);
__ CmpP(r2, r6); // Compare before moving to next argument.
__ lay(r2, MemOperand(r2, -kPointerSize));
__ lay(r2, MemOperand(r2, -kSystemPointerSize));
__ bne(&copy);
__ b(&invoke);
......@@ -2548,22 +2552,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
__ LoadP(r0, MemOperand(r2, 2 * kPointerSize));
__ LoadP(r0, MemOperand(r2, 2 * kSystemPointerSize));
__ push(r0);
__ CmpP(r2, fp); // Compare before moving to next argument.
__ lay(r2, MemOperand(r2, -kPointerSize));
__ lay(r2, MemOperand(r2, -kSystemPointerSize));
__ bne(&copy);
// Fill the remaining expected arguments with undefined.
// r3: function
// r4: expected number of argumentus
__ LoadRoot(r0, RootIndex::kUndefinedValue);
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2));
__ SubP(r6, fp, r6);
// Adjust for frame.
__ SubP(r6, r6,
Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
kPointerSize));
kSystemPointerSize));
Label fill;
__ bind(&fill);
......@@ -2608,7 +2612,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Remove superfluous parameters from the stack.
__ SubP(r6, r2, r4);
__ lgr(r2, r4);
__ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
__ ShiftLeftP(r6, r6, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r6));
__ b(&dont_adapt_arguments);
}
......@@ -2708,8 +2712,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ LoadRR(r3, r4);
} else {
// Compute the argv pointer.
__ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2));
__ lay(r3, MemOperand(r3, sp, -kPointerSize));
__ ShiftLeftP(r3, r2, Operand(kSystemPointerSizeLog2));
__ lay(r3, MemOperand(r3, sp, -kSystemPointerSize));
}
// Enter the exit frame that transitions from JavaScript to C++.
......@@ -2751,7 +2755,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// by one register each.
__ LoadRR(r4, r3);
__ LoadRR(r3, r2);
__ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
__ la(r2,
MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
isolate_reg = r5;
// Clang doesn't preserve r2 (result buffer)
// write to r8 (preserved) before entry
......@@ -2765,7 +2770,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
__ LoadRR(r2, r8);
__ LoadP(r3, MemOperand(r2, kPointerSize));
__ LoadP(r3, MemOperand(r2, kSystemPointerSize));
__ LoadP(r2, MemOperand(r2));
}
......@@ -2870,7 +2875,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Push(result_reg, scratch);
// Account for saved regs.
int argument_offset = 2 * kPointerSize;
int argument_offset = 2 * kSystemPointerSize;
// Load double input.
__ LoadDouble(double_scratch, MemOperand(sp, argument_offset));
......@@ -2884,7 +2889,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Push(scratch_high, scratch_low);
// Account for saved regs.
argument_offset += 2 * kPointerSize;
argument_offset += 2 * kSystemPointerSize;
__ LoadlW(scratch_high,
MemOperand(sp, argument_offset + Register::kExponentOffset));
......@@ -2958,7 +2963,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ bind(&done);
__ Pop(scratch_high, scratch_low);
argument_offset -= 2 * kPointerSize;
argument_offset -= 2 * kSystemPointerSize;
__ bind(&fastpath_done);
__ StoreP(result_reg, MemOperand(sp, argument_offset));
......@@ -3159,33 +3164,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
// sp[0 * kPointerSize]: kHolder
// sp[1 * kPointerSize]: kIsolate
// sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
// sp[3 * kPointerSize]: undefined (kReturnValue)
// sp[4 * kPointerSize]: kData
// sp[5 * kPointerSize]: undefined (kNewTarget)
// sp[0 * kSystemPointerSize]: kHolder
// sp[1 * kSystemPointerSize]: kIsolate
// sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
// sp[3 * kSystemPointerSize]: undefined (kReturnValue)
// sp[4 * kSystemPointerSize]: kData
// sp[5 * kSystemPointerSize]: undefined (kNewTarget)
// Reserve space on the stack.
__ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kPointerSize)));
__ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kSystemPointerSize)));
// kHolder.
__ StoreP(holder, MemOperand(sp, 0 * kPointerSize));
__ StoreP(holder, MemOperand(sp, 0 * kSystemPointerSize));
// kIsolate.
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ StoreP(scratch, MemOperand(sp, 1 * kPointerSize));
__ StoreP(scratch, MemOperand(sp, 1 * kSystemPointerSize));
// kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
__ StoreP(scratch, MemOperand(sp, 3 * kPointerSize));
__ StoreP(scratch, MemOperand(sp, 2 * kSystemPointerSize));
__ StoreP(scratch, MemOperand(sp, 3 * kSystemPointerSize));
// kData.
__ StoreP(call_data, MemOperand(sp, 4 * kPointerSize));
__ StoreP(call_data, MemOperand(sp, 4 * kSystemPointerSize));
// kNewTarget.
__ StoreP(scratch, MemOperand(sp, 5 * kPointerSize));
__ StoreP(scratch, MemOperand(sp, 5 * kSystemPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
......@@ -3207,33 +3212,34 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
__ StoreP(scratch,
MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
kSystemPointerSize));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ AddP(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
__ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2));
__ AddP(scratch, scratch,
Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
__ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2));
__ AddP(scratch, scratch, r1);
__ StoreP(scratch,
MemOperand(sp, (kStackFrameExtraParamSlot + 2) * kPointerSize));
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
kSystemPointerSize));
// FunctionCallbackInfo::length_.
__ StoreW(argc,
MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kPointerSize));
__ StoreW(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) *
kSystemPointerSize));
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
__ mov(scratch,
Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
__ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2));
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
__ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2));
__ AddP(scratch, r1);
__ StoreP(scratch,
MemOperand(sp, (kStackFrameExtraParamSlot + 4) * kPointerSize));
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
kSystemPointerSize));
// v8::InvocationCallback's argument.
__ lay(r2,
MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
......@@ -3241,11 +3247,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// TODO(jgruber): Document what these arguments are.
static constexpr int kStackSlotsAboveFCA = 2;
MemOperand return_value_operand(
fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
static constexpr int kUseStackSpaceOperand = 0;
MemOperand stack_space_operand(
sp, (kStackFrameExtraParamSlot + 4) * kPointerSize);
sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
......@@ -3293,7 +3299,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ LoadRR(r2, sp); // r2 = Handle<Name>
__ AddP(r3, r2, Operand(1 * kPointerSize)); // r3 = v8::PCI::args_
__ AddP(r3, r2, Operand(1 * kSystemPointerSize)); // r3 = v8::PCI::args_
// If ABI passes Handles (pointer-sized struct) in a register:
//
......@@ -3321,14 +3327,14 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
if (!ABI_PASSES_HANDLES_IN_REGS) {
// pass 1st arg by reference
__ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize));
__ AddP(r2, sp, Operand(arg0Slot * kPointerSize));
__ StoreP(r2, MemOperand(sp, arg0Slot * kSystemPointerSize));
__ AddP(r2, sp, Operand(arg0Slot * kSystemPointerSize));
}
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
__ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize));
__ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize));
__ StoreP(r3, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
__ AddP(r3, sp, Operand(accessorInfoSlot * kSystemPointerSize));
// r3 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
......@@ -3340,7 +3346,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
fp,
(PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
......
......@@ -51,7 +51,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
}
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kPointerSize;
bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == kSaveFPRegs) {
bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
......@@ -76,7 +76,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPush(list);
bytes += NumRegs(list) * kPointerSize;
bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == kSaveFPRegs) {
MultiPushDoubles(kCallerSavedDoubles);
......@@ -107,7 +107,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPop(list);
bytes += NumRegs(list) * kPointerSize;
bytes += NumRegs(list) * kSystemPointerSize;
return bytes;
}
......@@ -116,8 +116,8 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
const uint32_t offset =
FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
const uint32_t offset = FixedArray::kHeaderSize +
constant_index * kSystemPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
......@@ -258,7 +258,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Drop(int count) {
if (count > 0) {
int total = count * kPointerSize;
int total = count * kSystemPointerSize;
if (is_uint12(total)) {
la(sp, MemOperand(sp, total));
} else if (is_int20(total)) {
......@@ -270,7 +270,7 @@ void TurboAssembler::Drop(int count) {
}
void TurboAssembler::Drop(Register count, Register scratch) {
ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
ShiftLeftP(scratch, count, Operand(kSystemPointerSizeLog2));
AddP(sp, sp, scratch);
}
......@@ -367,12 +367,12 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kPointerSize;
int16_t stack_offset = num_to_push * kSystemPointerSize;
SubP(location, location, Operand(stack_offset));
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
stack_offset -= kSystemPointerSize;
StoreP(ToRegister(i), MemOperand(location, stack_offset));
}
}
......@@ -384,7 +384,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
LoadP(ToRegister(i), MemOperand(location, stack_offset));
stack_offset += kPointerSize;
stack_offset += kSystemPointerSize;
}
}
AddP(location, location, Operand(stack_offset));
......@@ -439,13 +439,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
// of the object, so so offset must be a multiple of kSystemPointerSize.
DCHECK(IsAligned(offset, kSystemPointerSize));
lay(dst, MemOperand(object, offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
AndP(r0, dst, Operand(kPointerSize - 1));
AndP(r0, dst, Operand(kSystemPointerSize - 1));
beq(&ok, Label::kNear);
stop();
bind(&ok);
......@@ -632,7 +632,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
Push(r14, fp);
fp_delta = 0;
}
la(fp, MemOperand(sp, fp_delta * kPointerSize));
la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
}
void TurboAssembler::PopCommonFrame(Register marker_reg) {
......@@ -653,7 +653,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Push(r14, fp, cp);
fp_delta = 1;
}
la(fp, MemOperand(sp, fp_delta * kPointerSize));
la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
}
void TurboAssembler::RestoreFrameStateForTailCall() {
......@@ -1082,9 +1082,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
DCHECK_EQ(2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
DCHECK_GT(stack_space, 0);
// This is an opportunity to build a frame to wrap
......@@ -1117,7 +1117,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// since the sp slot and code slot were pushed after the fp.
}
lay(sp, MemOperand(sp, -stack_space * kPointerSize));
lay(sp, MemOperand(sp, -stack_space * kSystemPointerSize));
// Allocate and align the frame preparing for calling the runtime
// function.
......@@ -1127,11 +1127,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
}
lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
StoreP(MemOperand(sp), Operand::Zero(), r0);
// Set the exit frame sp value to point just before the return address
// location.
lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
lay(r1, MemOperand(sp, kStackFrameSPSlot * kSystemPointerSize));
StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
......@@ -1184,7 +1184,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count.is_valid()) {
if (!argument_count_is_length) {
ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
ShiftLeftP(argument_count, argument_count,
Operand(kSystemPointerSizeLog2));
}
la(sp, MemOperand(sp, argument_count));
}
......@@ -1211,22 +1212,24 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
#endif
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We AddP kPointerSize to count the receiver
// argument which is not included into formal parameters count.
// after we drop current frame. We AddP kSystemPointerSize to count the
// receiver argument which is not included into formal parameters count.
Register dst_reg = scratch0;
ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kSystemPointerSizeLog2));
AddP(dst_reg, fp, dst_reg);
AddP(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
// Calculate the end of source area. +kSystemPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
ShiftLeftP(src_reg, callee_args_count.reg(),
Operand(kSystemPointerSizeLog2));
AddP(src_reg, sp, src_reg);
AddP(src_reg, src_reg, Operand(kPointerSize));
AddP(src_reg, src_reg, Operand(kSystemPointerSize));
} else {
mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
mov(src_reg,
Operand((callee_args_count.immediate() + 1) * kSystemPointerSize));
AddP(src_reg, src_reg, sp);
}
......@@ -1253,10 +1256,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
}
LoadRR(r1, tmp_reg);
bind(&loop);
LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
lay(src_reg, MemOperand(src_reg, -kPointerSize));
lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
LoadP(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
StoreP(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
lay(src_reg, MemOperand(src_reg, -kSystemPointerSize));
lay(dst_reg, MemOperand(dst_reg, -kSystemPointerSize));
BranchOnCount(r1, &loop);
// Leave current frame.
......@@ -1342,10 +1345,11 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
if (actual.is_reg()) {
ShiftLeftP(r6, actual.reg(), Operand(kPointerSizeLog2));
ShiftLeftP(r6, actual.reg(), Operand(kSystemPointerSizeLog2));
LoadP(r6, MemOperand(sp, r6));
} else {
LoadP(r6, MemOperand(sp, actual.immediate() << kPointerSizeLog2), ip);
LoadP(r6, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2),
ip);
}
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
......@@ -1469,8 +1473,8 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize);
// Link the current handler as the next handler.
Move(r7,
......@@ -1485,13 +1489,13 @@ void MacroAssembler::PushStackHandler() {
// Copy the old handler into the next handler slot.
MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
Operand(kPointerSize));
Operand(kSystemPointerSize));
// Set this new handler as the current one.
StoreP(sp, MemOperand(r7));
}
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
// Pop the Next Handler into r3 and store it into Handler Address reference.
......@@ -1838,18 +1842,19 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots;
if (frame_alignment > kPointerSize) {
if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for stack arguments
// -- preserving original value of sp.
LoadRR(scratch, sp);
lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
StoreP(scratch,
MemOperand(sp, (stack_passed_arguments)*kSystemPointerSize));
} else {
stack_space += stack_passed_arguments;
}
lay(sp, MemOperand(sp, (-stack_space) * kPointerSize));
lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize));
}
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
......@@ -1939,11 +1944,11 @@ void TurboAssembler::CallCFunctionHelper(Register function,
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
if (ActivationFrameAlignment() > kPointerSize) {
if (ActivationFrameAlignment() > kSystemPointerSize) {
// Load the original stack pointer (pre-alignment) from the stack
LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
} else {
la(sp, MemOperand(sp, stack_space * kPointerSize));
la(sp, MemOperand(sp, stack_space * kSystemPointerSize));
}
}
......@@ -1961,20 +1966,20 @@ void TurboAssembler::CheckPageFlag(
uint32_t shifted_mask = mask;
// Determine the byte offset to be tested
if (mask <= 0x80) {
byte_offset = kPointerSize - 1;
byte_offset = kSystemPointerSize - 1;
} else if (mask < 0x8000) {
byte_offset = kPointerSize - 2;
byte_offset = kSystemPointerSize - 2;
shifted_mask = mask >> 8;
} else if (mask < 0x800000) {
byte_offset = kPointerSize - 3;
byte_offset = kSystemPointerSize - 3;
shifted_mask = mask >> 16;
} else {
byte_offset = kPointerSize - 4;
byte_offset = kSystemPointerSize - 4;
shifted_mask = mask >> 24;
}
#if V8_TARGET_LITTLE_ENDIAN
// Reverse the byte_offset if emulating on little endian platform
byte_offset = kPointerSize - byte_offset - 1;
byte_offset = kSystemPointerSize - byte_offset - 1;
#endif
tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
Operand(shifted_mask));
......@@ -4153,7 +4158,7 @@ void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
// Clear right most # of bits
void TurboAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) {
int numBitsToClear = val.immediate() % (kPointerSize * 8);
int numBitsToClear = val.immediate() % (kSystemPointerSize * 8);
// Try to use RISBG if possible
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
......@@ -4431,7 +4436,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Label return_label;
larl(r14, &return_label); // Generate the return addr of call later.
StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
// zLinux ABI requires caller's frame to have sufficient space for callee
// preserved regsiter save area.
......
......@@ -515,26 +515,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void push(DoubleRegister src) {
lay(sp, MemOperand(sp, -kPointerSize));
lay(sp, MemOperand(sp, -kSystemPointerSize));
StoreDouble(src, MemOperand(sp));
}
void push(Register src) {
lay(sp, MemOperand(sp, -kPointerSize));
lay(sp, MemOperand(sp, -kSystemPointerSize));
StoreP(src, MemOperand(sp));
}
void pop(DoubleRegister dst) {
LoadDouble(dst, MemOperand(sp));
la(sp, MemOperand(sp, kPointerSize));
la(sp, MemOperand(sp, kSystemPointerSize));
}
void pop(Register dst) {
LoadP(dst, MemOperand(sp));
la(sp, MemOperand(sp, kPointerSize));
la(sp, MemOperand(sp, kSystemPointerSize));
}
void pop() { la(sp, MemOperand(sp, kPointerSize)); }
void pop() { la(sp, MemOperand(sp, kSystemPointerSize)); }
void Push(Register src) { push(src); }
......@@ -544,25 +544,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
lay(sp, MemOperand(sp, -kPointerSize * 2));
StoreP(src1, MemOperand(sp, kPointerSize));
lay(sp, MemOperand(sp, -kSystemPointerSize * 2));
StoreP(src1, MemOperand(sp, kSystemPointerSize));
StoreP(src2, MemOperand(sp, 0));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
lay(sp, MemOperand(sp, -kPointerSize * 3));
StoreP(src1, MemOperand(sp, kPointerSize * 2));
StoreP(src2, MemOperand(sp, kPointerSize));
lay(sp, MemOperand(sp, -kSystemPointerSize * 3));
StoreP(src1, MemOperand(sp, kSystemPointerSize * 2));
StoreP(src2, MemOperand(sp, kSystemPointerSize));
StoreP(src3, MemOperand(sp, 0));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
lay(sp, MemOperand(sp, -kPointerSize * 4));
StoreP(src1, MemOperand(sp, kPointerSize * 3));
StoreP(src2, MemOperand(sp, kPointerSize * 2));
StoreP(src3, MemOperand(sp, kPointerSize));
lay(sp, MemOperand(sp, -kSystemPointerSize * 4));
StoreP(src1, MemOperand(sp, kSystemPointerSize * 3));
StoreP(src2, MemOperand(sp, kSystemPointerSize * 2));
StoreP(src3, MemOperand(sp, kSystemPointerSize));
StoreP(src4, MemOperand(sp, 0));
}
......@@ -580,11 +580,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DCHECK(src3 != src5);
DCHECK(src4 != src5);
lay(sp, MemOperand(sp, -kPointerSize * 5));
StoreP(src1, MemOperand(sp, kPointerSize * 4));
StoreP(src2, MemOperand(sp, kPointerSize * 3));
StoreP(src3, MemOperand(sp, kPointerSize * 2));
StoreP(src4, MemOperand(sp, kPointerSize));
lay(sp, MemOperand(sp, -kSystemPointerSize * 5));
StoreP(src1, MemOperand(sp, kSystemPointerSize * 4));
StoreP(src2, MemOperand(sp, kSystemPointerSize * 3));
StoreP(src3, MemOperand(sp, kSystemPointerSize * 2));
StoreP(src4, MemOperand(sp, kSystemPointerSize));
StoreP(src5, MemOperand(sp, 0));
}
......@@ -593,36 +593,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
LoadP(src2, MemOperand(sp, 0));
LoadP(src1, MemOperand(sp, kPointerSize));
la(sp, MemOperand(sp, 2 * kPointerSize));
LoadP(src1, MemOperand(sp, kSystemPointerSize));
la(sp, MemOperand(sp, 2 * kSystemPointerSize));
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
LoadP(src3, MemOperand(sp, 0));
LoadP(src2, MemOperand(sp, kPointerSize));
LoadP(src1, MemOperand(sp, 2 * kPointerSize));
la(sp, MemOperand(sp, 3 * kPointerSize));
LoadP(src2, MemOperand(sp, kSystemPointerSize));
LoadP(src1, MemOperand(sp, 2 * kSystemPointerSize));
la(sp, MemOperand(sp, 3 * kSystemPointerSize));
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4) {
LoadP(src4, MemOperand(sp, 0));
LoadP(src3, MemOperand(sp, kPointerSize));
LoadP(src2, MemOperand(sp, 2 * kPointerSize));
LoadP(src1, MemOperand(sp, 3 * kPointerSize));
la(sp, MemOperand(sp, 4 * kPointerSize));
LoadP(src3, MemOperand(sp, kSystemPointerSize));
LoadP(src2, MemOperand(sp, 2 * kSystemPointerSize));
LoadP(src1, MemOperand(sp, 3 * kSystemPointerSize));
la(sp, MemOperand(sp, 4 * kSystemPointerSize));
}
// Pop five registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4,
Register src5) {
LoadP(src5, MemOperand(sp, 0));
LoadP(src4, MemOperand(sp, kPointerSize));
LoadP(src3, MemOperand(sp, 2 * kPointerSize));
LoadP(src2, MemOperand(sp, 3 * kPointerSize));
LoadP(src1, MemOperand(sp, 4 * kPointerSize));
la(sp, MemOperand(sp, 5 * kPointerSize));
LoadP(src4, MemOperand(sp, kSystemPointerSize));
LoadP(src3, MemOperand(sp, 2 * kSystemPointerSize));
LoadP(src2, MemOperand(sp, 3 * kSystemPointerSize));
LoadP(src1, MemOperand(sp, 4 * kSystemPointerSize));
la(sp, MemOperand(sp, 5 * kSystemPointerSize));
}
// Push a fixed frame, consisting of lr, fp, constant pool.
......@@ -1183,11 +1183,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void SmiToPtrArrayOffset(Register dst, Register src) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
ShiftLeftP(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
#else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
ShiftRightArithP(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
#endif
}
......@@ -1208,7 +1208,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
#endif
#if V8_TARGET_LITTLE_ENDIAN
#define SmiWordOffset(offset) (offset + kPointerSize / 2)
#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
#else
#define SmiWordOffset(offset) offset
#endif
......
......@@ -40,7 +40,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Push all GPRs onto the stack
__ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
__ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
__ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
__ mov(r1, Operand(ExternalReference::Create(
......@@ -48,7 +48,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ StoreP(fp, MemOperand(r1));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
// The bailout id is passed using r10
__ LoadRR(r4, r10);
......@@ -79,7 +79,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// r6: Fp-to-sp delta.
// Parm6: isolate is passed on the stack.
__ mov(r7, Operand(ExternalReference::isolate_address(isolate)));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
// Call Deoptimizer::New().
{
......@@ -94,13 +94,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
// __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
// MemOperand(sp), kNumberOfRegisters * kPointerSize);
// MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// TODO(john.yan): optimize the following code by using mvc instruction
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ LoadP(r4, MemOperand(sp, i * kPointerSize));
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r4, MemOperand(r3, offset));
}
......@@ -110,7 +111,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
// TODO(joransiu): MVC opportunity
__ LoadDouble(d0, MemOperand(sp, src_offset));
__ StoreDouble(d0, MemOperand(r3, dst_offset));
......@@ -145,7 +147,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ bind(&pop_loop);
__ pop(r6);
__ StoreP(r6, MemOperand(r5, 0));
__ la(r5, MemOperand(r5, kPointerSize));
__ la(r5, MemOperand(r5, kSystemPointerSize));
__ bind(&pop_loop_header);
__ CmpP(r4, sp);
__ bne(&pop_loop);
......@@ -169,7 +171,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// r3 = one past the last FrameDescription**.
__ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
__ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
__ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
__ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
__ AddP(r3, r6, r3);
__ b(&outer_loop_header, Label::kNear);
......@@ -189,7 +191,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ CmpP(r5, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
__ AddP(r6, r6, Operand(kPointerSize));
__ AddP(r6, r6, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ CmpP(r6, r3);
__ blt(&outer_push_loop);
......@@ -211,7 +213,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Restore the registers from the last output frame.
__ LoadRR(r1, r4);
for (int i = kNumberOfRegisters - 1; i > 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(r1, offset));
}
......
......@@ -14,7 +14,7 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
-(StandardFrameConstants::kFixedFrameSizeFromFp + kSystemPointerSize);
// Stack offsets for arguments passed to JSEntry.
static constexpr int kArgvOffset = 20 * kSystemPointerSize;
};
......@@ -25,13 +25,13 @@ class ExitFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(1);
// The caller fields are below the frame pointer on the stack.
static constexpr int kCallerFPOffset = 0 * kPointerSize;
static constexpr int kCallerFPOffset = 0 * kSystemPointerSize;
// The calling JS function is below FP.
static constexpr int kCallerPCOffset = 1 * kPointerSize;
static constexpr int kCallerPCOffset = 1 * kSystemPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
static constexpr int kCallerSPDisplacement = 2 * kSystemPointerSize;
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
......@@ -47,7 +47,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
kNumberOfSavedGpParamRegs * kPointerSize +
kNumberOfSavedGpParamRegs * kSystemPointerSize +
kNumberOfSavedFpParamRegs * kDoubleSize;
};
......@@ -56,13 +56,13 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static constexpr int kLocal0Offset =
StandardFrameConstants::kExpressionsOffset;
static constexpr int kLastParameterOffset = +2 * kPointerSize;
static constexpr int kLastParameterOffset = +2 * kSystemPointerSize;
static constexpr int kFunctionOffset =
StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static constexpr int kParam0Offset = -2 * kPointerSize;
static constexpr int kReceiverOffset = -1 * kPointerSize;
static constexpr int kParam0Offset = -2 * kSystemPointerSize;
static constexpr int kReceiverOffset = -1 * kSystemPointerSize;
};
} // namespace internal
......
......@@ -208,7 +208,7 @@ void RegExpMacroAssemblerS390::CheckGreedyLoop(Label* on_equal) {
Label backtrack_non_equal;
__ CmpP(current_input_offset(), MemOperand(backtrack_stackpointer(), 0));
__ bne(&backtrack_non_equal);
__ AddP(backtrack_stackpointer(), Operand(kPointerSize));
__ AddP(backtrack_stackpointer(), Operand(kSystemPointerSize));
BranchOrBacktrack(al, on_equal);
__ bind(&backtrack_non_equal);
......@@ -639,11 +639,11 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Requires us to save the callee-preserved registers r6-r13
// General convention is to also save r14 (return addr) and
// sp/r15 as well in a single STM/STMG
__ StoreMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize));
__ StoreMultipleP(r6, sp, MemOperand(sp, 6 * kSystemPointerSize));
// Load stack parameters from caller stack frame
__ LoadMultipleP(r7, r9,
MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ LoadMultipleP(
r7, r9, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
// r7 = capture array size
// r8 = stack area base
// r9 = direct call
......@@ -658,7 +658,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ LoadRR(frame_pointer(), sp);
__ lay(sp, MemOperand(sp, -10 * kPointerSize));
__ lay(sp, MemOperand(sp, -10 * kSystemPointerSize));
__ mov(r1, Operand::Zero()); // success counter
__ LoadRR(r0, r1); // offset of location
__ StoreMultipleP(r0, r9, MemOperand(sp, 0));
......@@ -676,7 +676,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ ble(&stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
__ CmpLogicalP(r2, Operand(num_registers_ * kPointerSize));
__ CmpLogicalP(r2, Operand(num_registers_ * kSystemPointerSize));
__ bge(&stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
......@@ -692,7 +692,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ bind(&stack_ok);
// Allocate space on stack for registers.
__ lay(sp, MemOperand(sp, (-num_registers_ * kPointerSize)));
__ lay(sp, MemOperand(sp, (-num_registers_ * kSystemPointerSize)));
// Load string end.
__ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Load input start.
......@@ -735,12 +735,13 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1
if (num_saved_registers_ > 8) {
// One slot beyond address of register 0.
__ lay(r3, MemOperand(frame_pointer(), kRegisterZero + kPointerSize));
__ lay(r3,
MemOperand(frame_pointer(), kRegisterZero + kSystemPointerSize));
__ Load(r4, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
__ StoreP(r1, MemOperand(r3, -kPointerSize));
__ lay(r3, MemOperand(r3, -kPointerSize));
__ StoreP(r1, MemOperand(r3, -kSystemPointerSize));
__ lay(r3, MemOperand(r3, -kSystemPointerSize));
__ BranchOnCount(r4, &init_loop);
} else {
for (int i = 0; i < num_saved_registers_; i++) {
......@@ -875,7 +876,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Skip sp past regexp registers and local variables..
__ LoadRR(sp, frame_pointer());
// Restore registers r6..r15.
__ LoadMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize));
__ LoadMultipleP(r6, sp, MemOperand(sp, 6 * kSystemPointerSize));
__ b(r14);
......@@ -1091,17 +1092,19 @@ void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
// Code of self.
__ mov(r3, Operand(masm_->CodeObject()));
// r2 becomes return address pointer.
__ lay(r2, MemOperand(sp, kStackFrameRASlot * kPointerSize));
__ lay(r2, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
__ mov(ip, Operand(stack_guard_check));
__ StoreReturnAddressAndCall(ip);
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
__ LoadP(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
__ LoadP(
sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize)));
} else {
__ la(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
__ la(sp,
MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize)));
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
......@@ -1110,7 +1113,7 @@ void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
DCHECK_EQ(kPointerSize, sizeof(T));
DCHECK_EQ(kSystemPointerSize, sizeof(T));
#ifdef V8_TARGET_ARCH_S390X
return reinterpret_cast<T&>(Memory<uint64_t>(re_frame + frame_offset));
#else
......@@ -1144,7 +1147,7 @@ MemOperand RegExpMacroAssemblerS390::register_location(int register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
kRegisterZero - register_index * kPointerSize);
kRegisterZero - register_index * kSystemPointerSize);
}
void RegExpMacroAssemblerS390::CheckPosition(int cp_offset,
......@@ -1204,7 +1207,7 @@ void RegExpMacroAssemblerS390::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerS390::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
__ lay(backtrack_stackpointer(),
MemOperand(backtrack_stackpointer(), -kPointerSize));
MemOperand(backtrack_stackpointer(), -kSystemPointerSize));
__ StoreP(source, MemOperand(backtrack_stackpointer()));
}
......@@ -1212,7 +1215,7 @@ void RegExpMacroAssemblerS390::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
__ LoadP(target, MemOperand(backtrack_stackpointer()));
__ la(backtrack_stackpointer(),
MemOperand(backtrack_stackpointer(), kPointerSize));
MemOperand(backtrack_stackpointer(), kSystemPointerSize));
}
void RegExpMacroAssemblerS390::CheckPreemption() {
......@@ -1239,13 +1242,15 @@ void RegExpMacroAssemblerS390::CallCFunctionUsingStub(
__ mov(code_pointer(), Operand(function));
Label ret;
__ larl(r14, &ret);
__ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
__ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize));
__ b(code_pointer());
__ bind(&ret);
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
__ LoadP(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
__ LoadP(
sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize)));
} else {
__ la(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
__ la(sp,
MemOperand(sp, (kNumRequiredStackFrameSlots * kSystemPointerSize)));
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
}
......
......@@ -95,26 +95,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
kStoredRegisters + kCalleeRegisterSaveAreaSize;
// Stack parameters placed by caller.
static const int kCaptureArraySize = kCallerFrame;
static const int kStackAreaBase = kCallerFrame + kPointerSize;
static const int kStackAreaBase = kCallerFrame + kSystemPointerSize;
// kDirectCall again
static const int kIsolate = kStackAreaBase + 2 * kPointerSize;
static const int kIsolate = kStackAreaBase + 2 * kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kDirectCall = kFramePointer - kPointerSize;
static const int kStackHighEnd = kDirectCall - kPointerSize;
static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
static const int kInputEnd = kRegisterOutput - kPointerSize;
static const int kInputStart = kInputEnd - kPointerSize;
static const int kStartIndex = kInputStart - kPointerSize;
static const int kInputString = kStartIndex - kPointerSize;
static const int kDirectCall = kFramePointer - kSystemPointerSize;
static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
static const int kInputStart = kInputEnd - kSystemPointerSize;
static const int kStartIndex = kInputStart - kSystemPointerSize;
static const int kInputString = kStartIndex - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
static const int kRegisterZero = kStringStartMinusOne - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment