Commit 634a29a0 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup][ia32] Fix kPointerSize in ia32 builtins

Bug: v8:8477, v8:8834
Change-Id: If613bc4a32cdce68d9bcf747bf0bf528e3c2a90c
Reviewed-on: https://chromium-review.googlesource.com/c/1473290Reviewed-by: 's avatarBenedikt Meurer <bmeurer@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59620}
parent b3e0efc7
......@@ -111,13 +111,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- edx: new target
// -- esi: pointer to last argument
// -- ecx: counter
// -- sp[0*kPointerSize]: the hole (receiver)
// -- sp[1*kPointerSize]: number of arguments (tagged)
// -- sp[2*kPointerSize]: context
// -- sp[0*kSystemPointerSize]: the hole (receiver)
// -- sp[1*kSystemPointerSize]: number of arguments (tagged)
// -- sp[2*kSystemPointerSize]: context
// -----------------------------------
__ jmp(&entry);
__ bind(&loop);
__ push(Operand(esi, ecx, times_4, 0));
__ push(Operand(esi, ecx, times_system_pointer_size, 0));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
......@@ -141,7 +141,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ PopReturnAddressTo(ecx);
__ lea(esp, Operand(esp, edx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ lea(esp, Operand(esp, edx, times_half_system_pointer_size,
1 * kSystemPointerSize)); // 1 ~ receiver
__ PushReturnAddressFrom(ecx);
__ ret(0);
}
......@@ -159,11 +160,11 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
__ mov(scratch, __ ExternalReferenceAsOperand(real_stack_limit, scratch));
__ sub(scratch, esp);
// Add the size of the arguments.
static_assert(kPointerSize == 4,
"The next instruction assumes kPointerSize == 4");
__ lea(scratch, Operand(scratch, num_args, times_4, 0));
static_assert(kSystemPointerSize == 4,
"The next instruction assumes kSystemPointerSize == 4");
__ lea(scratch, Operand(scratch, num_args, times_system_pointer_size, 0));
if (include_receiver) {
__ add(scratch, Immediate(kPointerSize));
__ add(scratch, Immediate(kSystemPointerSize));
}
// See if we overflowed, i.e. scratch is positive.
__ cmp(scratch, Immediate(0));
......@@ -197,11 +198,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(edx);
// ----------- S t a t e -------------
// -- sp[0*kPointerSize]: new target
// -- sp[1*kPointerSize]: padding
// -- edi and sp[2*kPointerSize]: constructor function
// -- sp[3*kPointerSize]: argument count
// -- sp[4*kPointerSize]: context
// -- sp[0*kSystemPointerSize]: new target
// -- sp[1*kSystemPointerSize]: padding
// -- edi and sp[2*kSystemPointerSize]: constructor function
// -- sp[3*kSystemPointerSize]: argument count
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
......@@ -222,11 +223,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: implicit receiver
// -- Slot 4 / sp[0*kPointerSize]: new target
// -- Slot 3 / sp[1*kPointerSize]: padding
// -- Slot 2 / sp[2*kPointerSize]: constructor function
// -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
// -- Slot 0 / sp[4*kPointerSize]: context
// -- Slot 4 / sp[0*kSystemPointerSize]: new target
// -- Slot 3 / sp[1*kSystemPointerSize]: padding
// -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
// -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
// -- Slot 0 / sp[4*kSystemPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
......@@ -244,12 +245,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx: new target
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
// -- sp[2*kPointerSize]: padding
// -- sp[3*kPointerSize]: constructor function
// -- sp[4*kPointerSize]: number of arguments (tagged)
// -- sp[5*kPointerSize]: context
// -- sp[0*kSystemPointerSize]: implicit receiver
// -- sp[1*kSystemPointerSize]: implicit receiver
// -- sp[2*kSystemPointerSize]: padding
// -- sp[3*kSystemPointerSize]: constructor function
// -- sp[4*kSystemPointerSize]: number of arguments (tagged)
// -- sp[5*kSystemPointerSize]: context
// -----------------------------------
// Restore argument count.
......@@ -282,16 +283,16 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- edx: new target
// -- edi: pointer to last argument
// -- ecx: counter (tagged)
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: implicit receiver
// -- sp[2*kPointerSize]: padding
// -- sp[3*kPointerSize]: constructor function
// -- sp[4*kPointerSize]: number of arguments (tagged)
// -- sp[5*kPointerSize]: context
// -- sp[0*kSystemPointerSize]: implicit receiver
// -- sp[1*kSystemPointerSize]: implicit receiver
// -- sp[2*kSystemPointerSize]: padding
// -- sp[3*kSystemPointerSize]: constructor function
// -- sp[4*kSystemPointerSize]: number of arguments (tagged)
// -- sp[5*kSystemPointerSize]: context
// -----------------------------------
__ jmp(&entry, Label::kNear);
__ bind(&loop);
__ Push(Operand(edi, ecx, times_pointer_size, 0));
__ Push(Operand(edi, ecx, times_system_pointer_size, 0));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
......@@ -303,11 +304,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: constructor result
// -- sp[0*kPointerSize]: implicit receiver
// -- sp[1*kPointerSize]: padding
// -- sp[2*kPointerSize]: constructor function
// -- sp[3*kPointerSize]: number of arguments
// -- sp[4*kPointerSize]: context
// -- sp[0*kSystemPointerSize]: implicit receiver
// -- sp[1*kSystemPointerSize]: padding
// -- sp[2*kSystemPointerSize]: constructor function
// -- sp[3*kSystemPointerSize]: number of arguments
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
......@@ -344,7 +345,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
__ mov(eax, Operand(esp, 0 * kPointerSize));
__ mov(eax, Operand(esp, 0 * kSystemPointerSize));
__ JumpIfRoot(eax, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_frame);
......@@ -355,7 +356,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ pop(ecx);
__ lea(esp, Operand(esp, edx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ lea(esp, Operand(esp, edx, times_half_system_pointer_size,
1 * kSystemPointerSize)); // 1 ~ receiver
__ push(ecx);
__ ret(0);
}
......@@ -397,7 +399,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ push(Immediate(StackFrame::TypeToMarker(type)));
// Reserve a slot for the context. It is filled after the root register has
// been set up.
__ sub(esp, Immediate(kPointerSize));
__ sub(esp, Immediate(kSystemPointerSize));
// Save callee-saved registers (C calling conventions).
__ push(edi);
__ push(esi);
......@@ -418,7 +420,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
__ mov(edi, __ ExternalReferenceAsOperand(context_address, edi));
static constexpr int kOffsetToContextSlot = -2 * kPointerSize;
static constexpr int kOffsetToContextSlot = -2 * kSystemPointerSize;
__ mov(Operand(ebp, kOffsetToContextSlot), edi);
// If this is the outermost JS call, set js_entry_sp value.
......@@ -478,7 +480,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ pop(ebx);
__ pop(esi);
__ pop(edi);
__ add(esp, Immediate(2 * kPointerSize)); // remove markers
__ add(esp, Immediate(2 * kSystemPointerSize)); // remove markers
// Restore frame pointer and return.
__ pop(ebp);
......@@ -545,7 +547,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ jmp(&entry, Label::kNear);
__ bind(&loop);
// Push the parameter from argv.
__ mov(scratch2, Operand(scratch1, ecx, times_4, 0));
__ mov(scratch2, Operand(scratch1, ecx, times_system_pointer_size, 0));
__ push(Operand(scratch2, 0)); // dereference handle
__ inc(ecx);
__ bind(&entry);
......@@ -671,8 +673,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&loop);
__ cmp(edi, ecx);
__ j(greater_equal, &done_loop);
__ Push(
FieldOperand(ebx, edi, times_pointer_size, FixedArray::kHeaderSize));
__ Push(FieldOperand(ebx, edi, times_system_pointer_size,
FixedArray::kHeaderSize));
__ add(edi, Immediate(1));
__ jmp(&loop);
......@@ -942,7 +944,8 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
#undef JUMP_IF_EQUAL
// Otherwise, load the size of the current bytecode and advance the offset.
__ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
__ add(bytecode_offset,
Operand(bytecode_size_table, bytecode, times_int_size, 0));
}
// Generate code for entering a JS function with the interpreter.
......@@ -1054,7 +1057,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(eax);
// Continue loop if not done.
__ bind(&loop_check);
__ sub(frame_size, Immediate(kPointerSize));
__ sub(frame_size, Immediate(kSystemPointerSize));
__ j(greater_equal, &loop_header);
}
......@@ -1066,7 +1069,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ test(eax, eax);
__ j(zero, &no_incoming_new_target_or_generator_register);
__ mov(Operand(ebp, eax, times_pointer_size, 0), edx);
__ mov(Operand(ebp, eax, times_system_pointer_size, 0), edx);
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator and bytecode offset into registers.
......@@ -1083,9 +1086,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ movzx_b(ecx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(
kJavaScriptCallCodeStartRegister,
Operand(kInterpreterDispatchTableRegister, ecx, times_pointer_size, 0));
__ mov(kJavaScriptCallCodeStartRegister,
Operand(kInterpreterDispatchTableRegister, ecx,
times_system_pointer_size, 0));
__ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
......@@ -1129,7 +1132,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ jmp(&loop_check);
__ bind(&loop_header);
__ Push(Operand(start_address, 0));
__ sub(start_address, Immediate(kPointerSize));
__ sub(start_address, Immediate(kSystemPointerSize));
__ bind(&loop_check);
__ cmp(start_address, array_limit);
__ j(greater, &loop_header, Label::kNear);
......@@ -1171,7 +1174,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Find the address of the last argument.
__ shl(scratch, kPointerSizeLog2);
__ shl(scratch, kSystemPointerSizeLog2);
__ neg(scratch);
__ add(scratch, argv);
Generate_InterpreterPushArgs(masm, scratch, argv);
......@@ -1228,7 +1231,8 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Step 1 - Update the stack pointer.
__ lea(scratch1, Operand(num_args, times_4, kPointerSize));
__ lea(scratch1,
Operand(num_args, times_system_pointer_size, kSystemPointerSize));
__ AllocateStackFrame(scratch1);
// Step 2 move return_address and slots around it to the correct locations.
......@@ -1236,16 +1240,16 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// basically when the source and destination overlap. We at least need one
// extra slot for receiver, so no extra checks are required to avoid copy.
for (int i = 0; i < num_slots_to_move + 1; i++) {
__ mov(scratch1,
Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize));
__ mov(Operand(esp, i * kPointerSize), scratch1);
__ mov(scratch1, Operand(esp, num_args, times_system_pointer_size,
(i + 1) * kSystemPointerSize));
__ mov(Operand(esp, i * kSystemPointerSize), scratch1);
}
// Step 3 copy arguments to correct locations.
// Slot meant for receiver contains return address. Reset it so that
// we will not incorrectly interpret return address as an object.
__ mov(Operand(esp, num_args, times_pointer_size,
(num_slots_to_move + 1) * kPointerSize),
__ mov(Operand(esp, num_args, times_system_pointer_size,
(num_slots_to_move + 1) * kSystemPointerSize),
Immediate(0));
__ mov(scratch1, num_args);
......@@ -1253,10 +1257,10 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ jmp(&loop_check);
__ bind(&loop_header);
__ mov(scratch2, Operand(start_addr, 0));
__ mov(Operand(esp, scratch1, times_pointer_size,
num_slots_to_move * kPointerSize),
__ mov(Operand(esp, scratch1, times_system_pointer_size,
num_slots_to_move * kSystemPointerSize),
scratch2);
__ sub(start_addr, Immediate(kPointerSize));
__ sub(start_addr, Immediate(kSystemPointerSize));
__ sub(scratch1, Immediate(1));
__ bind(&loop_check);
__ cmp(scratch1, Immediate(0));
......@@ -1406,8 +1410,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ movzx_b(scratch, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(kJavaScriptCallCodeStartRegister,
Operand(kInterpreterDispatchTableRegister, scratch, times_pointer_size,
0));
Operand(kInterpreterDispatchTableRegister, scratch,
times_system_pointer_size, 0));
__ jmp(kJavaScriptCallCodeStartRegister);
}
......@@ -1470,8 +1474,8 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ j(not_equal, &over, Label::kNear);
}
for (int i = j - 1; i >= 0; --i) {
__ Push(Operand(
ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
__ Push(Operand(ebp, StandardFrameConstants::kCallerSPOffset +
i * kSystemPointerSize));
}
for (int i = 0; i < 3 - j; ++i) {
__ PushRoot(RootIndex::kUndefinedValue);
......@@ -1495,7 +1499,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ PopReturnAddressTo(edx);
__ inc(ecx);
__ lea(esp, Operand(esp, ecx, times_pointer_size, 0));
__ lea(esp, Operand(esp, ecx, times_system_pointer_size, 0));
__ PushReturnAddressFrom(edx);
__ ret(0);
......@@ -1522,9 +1526,9 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point.
__ mov(Operand(esp,
config->num_allocatable_general_registers() * kPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize),
__ mov(Operand(esp, config->num_allocatable_general_registers() *
kSystemPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize),
eax);
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
......@@ -1538,9 +1542,10 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
ebp,
Operand(esp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
const int offsetToPC =
BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp - kPointerSize;
BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
kSystemPointerSize;
__ pop(Operand(esp, offsetToPC));
__ Drop(offsetToPC / kPointerSize);
__ Drop(offsetToPC / kSystemPointerSize);
__ add(Operand(esp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ ret(0);
}
......@@ -1572,8 +1577,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ ret(1 * kPointerSize); // Remove eax.
__ mov(eax, Operand(esp, 1 * kSystemPointerSize));
__ ret(1 * kSystemPointerSize); // Remove eax.
}
// static
......@@ -1592,22 +1597,25 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
{
Label no_arg_array, no_this_arg;
// Spill receiver to allow the usage of edi as a scratch register.
__ movd(xmm0, Operand(esp, eax, times_pointer_size, kPointerSize));
__ movd(xmm0,
Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ LoadRoot(edx, RootIndex::kUndefinedValue);
__ mov(edi, edx);
__ test(eax, eax);
__ j(zero, &no_this_arg, Label::kNear);
{
__ mov(edi, Operand(esp, eax, times_pointer_size, 0));
__ mov(edi, Operand(esp, eax, times_system_pointer_size, 0));
__ cmp(eax, Immediate(1));
__ j(equal, &no_arg_array, Label::kNear);
__ mov(edx, Operand(esp, eax, times_pointer_size, -kPointerSize));
__ mov(edx,
Operand(esp, eax, times_system_pointer_size, -kSystemPointerSize));
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
__ PopReturnAddressTo(ecx);
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ lea(esp,
Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ Push(edi);
__ PushReturnAddressFrom(ecx);
......@@ -1669,7 +1677,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
}
// 2. Get the callable to call (passed as receiver) from the stack.
__ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
__ mov(edi, Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
......@@ -1678,8 +1686,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Label loop;
__ mov(ecx, eax);
__ bind(&loop);
__ mov(edx, Operand(esp, ecx, times_pointer_size, 0));
__ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), edx);
__ mov(edx, Operand(esp, ecx, times_system_pointer_size, 0));
__ mov(Operand(esp, ecx, times_system_pointer_size, kSystemPointerSize),
edx);
__ dec(ecx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
__ pop(edx); // Discard copy of return address.
......@@ -1710,19 +1719,23 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
__ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
__ mov(edi, Operand(esp, eax, times_system_pointer_size,
-0 * kSystemPointerSize));
__ j(equal, &done, Label::kNear);
__ mov(ecx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
__ mov(ecx, Operand(esp, eax, times_system_pointer_size,
-1 * kSystemPointerSize));
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
__ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
__ mov(edx, Operand(esp, eax, times_system_pointer_size,
-2 * kSystemPointerSize));
__ bind(&done);
// Spill argumentsList to use edx as a scratch register.
__ movd(xmm0, edx);
__ PopReturnAddressTo(edx);
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ lea(esp,
Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ Push(ecx);
__ PushReturnAddressFrom(edx);
......@@ -1767,20 +1780,24 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ mov(ecx, edi);
__ cmp(eax, Immediate(1));
__ j(below, &done, Label::kNear);
__ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
__ mov(edi, Operand(esp, eax, times_system_pointer_size,
-0 * kSystemPointerSize));
__ mov(edx, edi);
__ j(equal, &done, Label::kNear);
__ mov(ecx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
__ mov(ecx, Operand(esp, eax, times_system_pointer_size,
-1 * kSystemPointerSize));
__ cmp(eax, Immediate(3));
__ j(below, &done, Label::kNear);
__ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
__ mov(edx, Operand(esp, eax, times_system_pointer_size,
-2 * kSystemPointerSize));
__ bind(&done);
// Spill argumentsList to use ecx as a scratch register.
__ movd(xmm0, ecx);
__ PopReturnAddressTo(ecx);
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ lea(esp,
Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(ecx);
......@@ -1864,7 +1881,8 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Remove caller arguments from the stack.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ PopReturnAddressTo(ecx);
__ lea(esp, Operand(esp, edi, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ lea(esp, Operand(esp, edi, times_half_system_pointer_size,
1 * kSystemPointerSize)); // 1 ~ receiver
__ PushReturnAddressFrom(ecx);
}
......@@ -1927,7 +1945,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmp(eax, kArgumentsLength);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
__ mov(edi, FieldOperand(kArgumentsList, eax, times_pointer_size,
__ mov(edi, FieldOperand(kArgumentsList, eax, times_system_pointer_size,
FixedArray::kHeaderSize));
__ CompareRoot(edi, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
......@@ -2030,7 +2048,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ PopReturnAddressTo(ecx);
__ bind(&loop);
{
__ Push(Operand(scratch, edx, times_pointer_size, 1 * kPointerSize));
__ Push(Operand(scratch, edx, times_system_pointer_size,
1 * kSystemPointerSize));
__ dec(edx);
__ j(not_zero, &loop);
}
......@@ -2090,13 +2109,15 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(ecx);
} else {
Label convert_to_object, convert_receiver;
__ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
__ mov(ecx,
Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
__ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx); // Clobbers ecx.
__ j(above_equal, &done_convert);
// Reload the receiver (it was clobbered by CmpObjectType).
__ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
__ mov(ecx,
Operand(esp, eax, times_system_pointer_size, kSystemPointerSize));
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
__ JumpIfRoot(ecx, RootIndex::kUndefinedValue, &convert_global_proxy,
......@@ -2132,7 +2153,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
__ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize),
ecx);
}
__ bind(&done_convert);
......@@ -2187,7 +2209,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
__ lea(ecx, Operand(edx, times_pointer_size, 0));
__ lea(ecx, Operand(edx, times_system_pointer_size, 0));
__ sub(esp, ecx);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
......@@ -2195,7 +2217,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ CompareRealStackLimit(esp);
__ j(above_equal, &done, Label::kNear);
// Restore the stack pointer.
__ lea(esp, Operand(esp, edx, times_pointer_size, 0));
__ lea(esp, Operand(esp, edx, times_system_pointer_size, 0));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
......@@ -2211,10 +2233,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ Set(ecx, 0);
__ lea(edx, Operand(esp, edx, times_pointer_size, 0));
__ lea(edx, Operand(esp, edx, times_system_pointer_size, 0));
__ bind(&loop);
__ movd(xmm1, Operand(edx, ecx, times_pointer_size, 0));
__ movd(Operand(esp, ecx, times_pointer_size, 0), xmm1);
__ movd(xmm1, Operand(edx, ecx, times_system_pointer_size, 0));
__ movd(Operand(esp, ecx, times_system_pointer_size, 0), xmm1);
__ inc(ecx);
__ cmp(ecx, eax);
__ j(less, &loop);
......@@ -2228,9 +2250,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SmiUntag(edx);
__ bind(&loop);
__ dec(edx);
__ movd(xmm1, FieldOperand(ecx, edx, times_pointer_size,
__ movd(xmm1, FieldOperand(ecx, edx, times_tagged_size,
FixedArray::kHeaderSize));
__ movd(Operand(esp, eax, times_pointer_size, 0), xmm1);
__ movd(Operand(esp, eax, times_system_pointer_size, 0), xmm1);
__ lea(eax, Operand(eax, 1));
__ j(greater, &loop);
}
......@@ -2257,7 +2279,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
__ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
__ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize), ecx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
......@@ -2305,7 +2327,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// not we raise an exception).
__ bind(&non_function);
// Overwrite the original receiver with the (original) target.
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
__ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
__ Jump(masm->isolate()->builtins()->CallFunction(
......@@ -2424,7 +2446,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
__ mov(Operand(esp, eax, times_system_pointer_size, kSystemPointerSize),
edi);
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, edi);
__ Jump(masm->isolate()->builtins()->CallFunction(),
......@@ -2465,14 +2488,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
__ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
__ mov(eax, -1); // account for receiver
Label copy;
__ bind(&copy);
__ inc(eax);
__ push(Operand(edi, 0));
__ sub(edi, Immediate(kPointerSize));
__ sub(edi, Immediate(kSystemPointerSize));
__ cmp(eax, kExpectedNumberOfArgumentsRegister);
__ j(less, &copy);
// eax now contains the expected number of arguments.
......@@ -2492,7 +2515,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
__ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
// ecx = expected - actual.
__ sub(kExpectedNumberOfArgumentsRegister, eax);
// eax = -actual - 1
......@@ -2503,7 +2526,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&copy);
__ inc(eax);
__ push(Operand(edi, 0));
__ sub(edi, Immediate(kPointerSize));
__ sub(edi, Immediate(kSystemPointerSize));
__ test(eax, eax);
__ j(not_zero, &copy);
......@@ -2713,10 +2736,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CheckStackAlignment();
}
// Call C function.
__ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
__ mov(Operand(esp, 0 * kSystemPointerSize), edi); // argc.
__ mov(Operand(esp, 1 * kSystemPointerSize), esi); // argv.
__ Move(ecx, Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 2 * kPointerSize), ecx);
__ mov(Operand(esp, 2 * kSystemPointerSize), ecx);
__ call(kRuntimeCallFunctionRegister);
// Result is in eax or edx:eax - do not destroy these registers!
......@@ -2766,11 +2789,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, eax);
__ mov(Operand(esp, 0 * kPointerSize), Immediate(0)); // argc.
__ mov(Operand(esp, 1 * kPointerSize), Immediate(0)); // argv.
__ mov(Operand(esp, 0 * kSystemPointerSize), Immediate(0)); // argc.
__ mov(Operand(esp, 1 * kSystemPointerSize), Immediate(0)); // argv.
__ Move(esi,
Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 2 * kPointerSize), esi);
__ mov(Operand(esp, 2 * kSystemPointerSize), esi);
__ CallCFunction(find_handler, 3);
}
......@@ -2798,7 +2821,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label check_negative, process_64_bits, done;
// Account for return address and saved regs.
const int kArgumentOffset = 4 * kPointerSize;
const int kArgumentOffset = 4 * kSystemPointerSize;
MemOperand mantissa_operand(MemOperand(esp, kArgumentOffset));
MemOperand exponent_operand(
......@@ -2934,7 +2957,7 @@ namespace {
// Generates an Operand for saving parameters after PrepareCallApiFunction.
Operand ApiParameterOperand(int index) {
return Operand(esp, index * kPointerSize);
return Operand(esp, index * kSystemPointerSize);
}
// Prepares stack to put arguments (aligns and so on). Reserves
......@@ -2952,7 +2975,7 @@ void PrepareCallApiFunction(MacroAssembler* masm, int argc, Register scratch) {
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Clobbers esi, edi and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
// stack_space * kSystemPointerSize (GCed).
void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
ExternalReference thunk_ref,
Operand thunk_last_arg, int stack_space,
......@@ -3080,7 +3103,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
if (stack_space_operand == nullptr) {
DCHECK_NE(stack_space, 0);
__ ret(stack_space * kPointerSize);
__ ret(stack_space * kSystemPointerSize);
} else {
DCHECK_EQ(0, stack_space);
__ pop(ecx);
......@@ -3149,43 +3172,43 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// esp[0]: return address
//
// Target state:
// esp[0 * kPointerSize]: return address
// esp[1 * kPointerSize]: kHolder
// esp[2 * kPointerSize]: kIsolate
// esp[3 * kPointerSize]: undefined (kReturnValueDefaultValue)
// esp[4 * kPointerSize]: undefined (kReturnValue)
// esp[5 * kPointerSize]: kData
// esp[6 * kPointerSize]: undefined (kNewTarget)
// esp[0 * kSystemPointerSize]: return address
// esp[1 * kSystemPointerSize]: kHolder
// esp[2 * kSystemPointerSize]: kIsolate
// esp[3 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
// esp[4 * kSystemPointerSize]: undefined (kReturnValue)
// esp[5 * kSystemPointerSize]: kData
// esp[6 * kSystemPointerSize]: undefined (kNewTarget)
// Reserve space on the stack.
__ sub(esp, Immediate(FCA::kArgsLength * kPointerSize));
__ sub(esp, Immediate(FCA::kArgsLength * kSystemPointerSize));
// Return address (the old stack location is overwritten later on).
__ mov(scratch, Operand(esp, FCA::kArgsLength * kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), scratch);
__ mov(scratch, Operand(esp, FCA::kArgsLength * kSystemPointerSize));
__ mov(Operand(esp, 0 * kSystemPointerSize), scratch);
// kHolder.
__ mov(Operand(esp, 1 * kPointerSize), holder);
__ mov(Operand(esp, 1 * kSystemPointerSize), holder);
// kIsolate.
__ Move(scratch,
Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 2 * kPointerSize), scratch);
__ mov(Operand(esp, 2 * kSystemPointerSize), scratch);
// kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ mov(Operand(esp, 3 * kPointerSize), scratch);
__ mov(Operand(esp, 4 * kPointerSize), scratch);
__ mov(Operand(esp, 3 * kSystemPointerSize), scratch);
__ mov(Operand(esp, 4 * kSystemPointerSize), scratch);
// kData.
__ movd(Operand(esp, 5 * kPointerSize), call_data);
__ movd(Operand(esp, 5 * kSystemPointerSize), call_data);
// kNewTarget.
__ mov(Operand(esp, 6 * kPointerSize), scratch);
__ mov(Operand(esp, 6 * kSystemPointerSize), scratch);
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
__ lea(scratch, Operand(esp, 1 * kPointerSize));
__ lea(scratch, Operand(esp, 1 * kSystemPointerSize));
// The API function takes a reference to v8::Arguments. If the CPU profiler
// is enabled, a wrapper function will be called and we need to pass
......@@ -3204,8 +3227,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ lea(scratch, Operand(scratch, argc, times_pointer_size,
(FCA::kArgsLength - 1) * kPointerSize));
__ lea(scratch, Operand(scratch, argc, times_system_pointer_size,
(FCA::kArgsLength - 1) * kSystemPointerSize));
__ mov(ApiParameterOperand(kApiArgc + 1), scratch);
// FunctionCallbackInfo::length_.
......@@ -3214,8 +3237,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
__ lea(scratch,
Operand(argc, times_pointer_size,
(FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
Operand(argc, times_system_pointer_size,
(FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
__ mov(ApiParameterOperand(kApiArgc + 3), scratch);
// v8::InvocationCallback's argument.
......@@ -3228,7 +3251,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// the stored ebp (pushed by EnterApiExitFrame), and the return address.
static constexpr int kStackSlotsAboveFCA = 2;
Operand return_value_operand(
ebp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
ebp,
(kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
static constexpr int kUseStackSpaceOperand = 0;
Operand stack_space_operand = ApiParameterOperand(kApiArgc + 3);
......@@ -3278,15 +3302,15 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
PrepareCallApiFunction(masm, kApiArgc, scratch);
// Load address of v8::PropertyAccessorInfo::args_ array. The value in ebp
// here corresponds to esp + kPointersize before PrepareCallApiFunction.
__ lea(scratch, Operand(ebp, kPointerSize + 2 * kPointerSize));
// here corresponds to esp + kSystemPointerSize before PrepareCallApiFunction.
__ lea(scratch, Operand(ebp, kSystemPointerSize + 2 * kSystemPointerSize));
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
Operand info_object = ApiParameterOperand(3);
__ mov(info_object, scratch);
// Name as handle.
__ sub(scratch, Immediate(kPointerSize));
__ sub(scratch, Immediate(kSystemPointerSize));
__ mov(ApiParameterOperand(0), scratch);
// Arguments pointer.
__ lea(scratch, info_object);
......@@ -3303,7 +3327,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
FieldOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
ebp,
(PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
Operand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
kStackUnwindSpace, kUseStackSpaceConstant,
......@@ -3394,9 +3419,9 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// esp[4]: First argument, destination pointer.
// esp[0]: return address
const int kDestinationOffset = 1 * kPointerSize;
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
const int kDestinationOffset = 1 * kSystemPointerSize;
const int kSourceOffset = 2 * kSystemPointerSize;
const int kSizeOffset = 3 * kSystemPointerSize;
// When copying up to this many bytes, use special "small" handlers.
const size_t kSmallCopySize = 8;
......@@ -3414,7 +3439,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
Label forward_much_overlap, small_size, medium_size, pop_and_return;
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
stack_offset += 2 * kSystemPointerSize;
Register dst = edi;
Register src = esi;
Register count = ecx;
......
......@@ -4012,7 +4012,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Label* const table = AddJumpTable(cases, case_count);
__ cmp(input, Immediate(case_count));
__ j(above_equal, GetLabel(i.InputRpo(1)));
__ jmp(Operand::JumpTable(input, times_4, table));
__ jmp(Operand::JumpTable(input, times_system_pointer_size, table));
}
// The calling convention for JSFunctions on IA32 passes arguments on the
......@@ -4307,7 +4307,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
Register pop_reg = g.ToRegister(pop);
Register scratch_reg = pop_reg == ecx ? edx : ecx;
__ pop(scratch_reg);
__ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
__ lea(esp, Operand(esp, pop_reg, times_system_pointer_size,
static_cast<int>(pop_size)));
__ jmp(scratch_reg);
}
}
......
......@@ -204,9 +204,11 @@ enum ScaleFactor {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
times_half_pointer_size = times_2,
times_pointer_size = times_4,
times_twice_pointer_size = times_8
times_half_system_pointer_size = times_2,
times_system_pointer_size = times_4,
times_tagged_size = times_4,
};
class V8_EXPORT_PRIVATE Operand {
......@@ -298,7 +300,7 @@ class V8_EXPORT_PRIVATE Operand {
friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kPointerSize,
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
"Operand must be small enough to pass it by value");
// -----------------------------------------------------------------------------
......@@ -417,11 +419,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
static constexpr int kSpecialTargetSize = kPointerSize;
static constexpr int kSpecialTargetSize = kSystemPointerSize;
// Distance between the address of the code target in the call instruction
// and the return address
static constexpr int kCallTargetAddressOffset = kPointerSize;
static constexpr int kCallTargetAddressOffset = kSystemPointerSize;
// One byte opcode for test al, 0xXX.
static constexpr byte kTestAlByte = 0xA8;
......
......@@ -33,7 +33,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movsd(Operand(esp, offset), xmm_reg);
}
STATIC_ASSERT(kFloatSize == kPointerSize);
STATIC_ASSERT(kFloatSize == kSystemPointerSize);
const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
__ sub(esp, Immediate(kFloatRegsSize));
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
......@@ -49,15 +49,15 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
__ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kPointerSize + kDoubleRegsSize + kFloatRegsSize;
const int kSavedRegistersAreaSize = kNumberOfRegisters * kSystemPointerSize +
kDoubleRegsSize + kFloatRegsSize;
// The bailout id is passed in ebx by the caller.
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize));
__ sub(edx, ebp);
__ neg(edx);
......@@ -70,13 +70,13 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ JumpIfSmi(edi, &context_check);
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kPointerSize),
__ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
__ mov(Operand(esp, 1 * kSystemPointerSize),
Immediate(static_cast<int>(deopt_kind)));
__ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
__ mov(Operand(esp, 2 * kSystemPointerSize), ebx); // Bailout id.
__ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0.
__ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate)));
{
AllowExternalCallThatCantCauseGC scope(masm);
......@@ -89,7 +89,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Fill in the input registers.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ pop(Operand(esi, offset));
}
......@@ -116,7 +117,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ fnclex();
// Remove the return address and the double registers.
__ add(esp, Immediate(kDoubleRegsSize + 1 * kPointerSize));
__ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
......@@ -140,7 +141,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, esi);
__ mov(Operand(esp, 0 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kSystemPointerSize), eax);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
......@@ -156,7 +157,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// past the last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_4, 0));
__ lea(edx, Operand(eax, edx, times_system_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: esi = current FrameDescription*, ecx = loop
......@@ -170,7 +171,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kPointerSize));
__ add(eax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
......@@ -189,7 +190,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ push(Operand(esi, offset));
}
......
......@@ -15,21 +15,21 @@ class EntryFrameConstants : public AllStatic {
public:
// This is the offset to where JSEntry pushes the current value of
// Isolate::c_entry_fp onto the stack.
static constexpr int kCallerFPOffset = -6 * kPointerSize;
static constexpr int kCallerFPOffset = -6 * kSystemPointerSize;
// EntryFrame is used by JSEntry, JSConstructEntry and JSRunMicrotasksEntry.
// All of them take |root_register_value| as the first parameter.
static constexpr int kRootRegisterValueOffset = +2 * kPointerSize;
static constexpr int kRootRegisterValueOffset = +2 * kSystemPointerSize;
// Rest of parameters passed to JSEntry and JSConstructEntry.
static constexpr int kNewTargetArgOffset = +3 * kPointerSize;
static constexpr int kFunctionArgOffset = +4 * kPointerSize;
static constexpr int kReceiverArgOffset = +5 * kPointerSize;
static constexpr int kArgcOffset = +6 * kPointerSize;
static constexpr int kArgvOffset = +7 * kPointerSize;
static constexpr int kNewTargetArgOffset = +3 * kSystemPointerSize;
static constexpr int kFunctionArgOffset = +4 * kSystemPointerSize;
static constexpr int kReceiverArgOffset = +5 * kSystemPointerSize;
static constexpr int kArgcOffset = +6 * kSystemPointerSize;
static constexpr int kArgvOffset = +7 * kSystemPointerSize;
// Rest of parameters passed to JSRunMicrotasksEntry.
static constexpr int kMicrotaskQueueArgOffset = +3 * kPointerSize;
static constexpr int kMicrotaskQueueArgOffset = +3 * kSystemPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
......@@ -38,12 +38,12 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
static constexpr int kCallerFPOffset = 0 * kPointerSize;
static constexpr int kCallerPCOffset = +1 * kPointerSize;
static constexpr int kCallerFPOffset = 0 * kSystemPointerSize;
static constexpr int kCallerPCOffset = +1 * kSystemPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static constexpr int kCallerSPDisplacement = +2 * kPointerSize;
static constexpr int kCallerSPDisplacement = +2 * kSystemPointerSize;
static constexpr int kConstantPoolOffset = 0; // Not used
};
......@@ -57,7 +57,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
kNumberOfSavedGpParamRegs * kPointerSize +
kNumberOfSavedGpParamRegs * kSystemPointerSize +
kNumberOfSavedFpParamRegs * kSimd128Size;
};
......@@ -66,13 +66,13 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static constexpr int kLocal0Offset =
StandardFrameConstants::kExpressionsOffset;
static constexpr int kLastParameterOffset = +2 * kPointerSize;
static constexpr int kLastParameterOffset = +2 * kSystemPointerSize;
static constexpr int kFunctionOffset =
StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static constexpr int kParam0Offset = -2 * kPointerSize;
static constexpr int kReceiverOffset = -1 * kPointerSize;
static constexpr int kParam0Offset = -2 * kSystemPointerSize;
static constexpr int kReceiverOffset = -1 * kSystemPointerSize;
};
} // namespace internal
......
......@@ -193,8 +193,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
mov(destination,
FieldOperand(destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
}
void TurboAssembler::LoadRootRegisterOffset(Register destination,
......@@ -235,7 +234,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
bytes += kPointerSize;
bytes += kSystemPointerSize;
}
}
......@@ -257,7 +256,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register reg = saved_regs[i];
if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
push(reg);
bytes += kPointerSize;
bytes += kSystemPointerSize;
}
}
......@@ -293,7 +292,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register reg = saved_regs[i];
if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
pop(reg);
bytes += kPointerSize;
bytes += kSystemPointerSize;
}
}
......@@ -326,13 +325,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
// of the object, so so offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize));
lea(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
test_b(dst, Immediate(kPointerSize - 1));
test_b(dst, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
......@@ -792,17 +791,17 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
DCHECK_EQ(+2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(+1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
push(ebp);
mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
push(Immediate(StackFrame::TypeToMarker(frame_type)));
DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
push(Immediate(0)); // Saved entry sp, patched before call.
DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
DCHECK_EQ(-3 * kSystemPointerSize, ExitFrameConstants::kCodeOffset);
Move(scratch, CodeObject());
push(scratch); // Accessed from ExitFrame::code_slot.
......@@ -827,7 +826,8 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
int space =
XMMRegister::kNumRegisters * kDoubleSize + argc * kSystemPointerSize;
sub(esp, Immediate(space));
const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
......@@ -835,7 +835,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
sub(esp, Immediate(argc * kSystemPointerSize));
}
// Get the required frame alignment for the OS.
......@@ -854,9 +854,9 @@ void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
EnterExitFramePrologue(frame_type, edi);
// Set up argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
mov(edi, eax);
lea(esi, Operand(ebp, eax, times_4, offset));
lea(esi, Operand(ebp, eax, times_system_pointer_size, offset));
// Reserve space for argc, argv and isolate.
EnterExitFrameEpilogue(argc, save_doubles);
......@@ -880,11 +880,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
if (pop_arguments) {
// Get the return address from the stack and restore the frame pointer.
mov(ecx, Operand(ebp, 1 * kPointerSize));
mov(ebp, Operand(ebp, 0 * kPointerSize));
mov(ecx, Operand(ebp, 1 * kSystemPointerSize));
mov(ebp, Operand(ebp, 0 * kSystemPointerSize));
// Pop the arguments and the receiver from the caller stack.
lea(esp, Operand(esi, 1 * kPointerSize));
lea(esp, Operand(esi, 1 * kSystemPointerSize));
// Push the return address to get ready to return.
push(ecx);
......@@ -923,7 +923,7 @@ void MacroAssembler::LeaveApiExitFrame() {
void MacroAssembler::PushStackHandler(Register scratch) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
push(Immediate(0)); // Padding.
......@@ -942,7 +942,7 @@ void MacroAssembler::PopStackHandler(Register scratch) {
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
pop(ExternalReferenceAsOperand(handler_address, scratch));
add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
add(esp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
......@@ -1033,15 +1033,17 @@ void TurboAssembler::PrepareForTailCall(
if (callee_args_count.is_reg()) {
sub(caller_args_count_reg, callee_args_count.reg());
lea(new_sp_reg,
Operand(ebp, caller_args_count_reg, times_pointer_size,
Operand(ebp, caller_args_count_reg, times_system_pointer_size,
StandardFrameConstants::kCallerPCOffset -
number_of_temp_values_after_return_address * kPointerSize));
number_of_temp_values_after_return_address *
kSystemPointerSize));
} else {
lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
StandardFrameConstants::kCallerPCOffset -
(callee_args_count.immediate() +
number_of_temp_values_after_return_address) *
kPointerSize));
lea(new_sp_reg,
Operand(ebp, caller_args_count_reg, times_system_pointer_size,
StandardFrameConstants::kCallerPCOffset -
(callee_args_count.immediate() +
number_of_temp_values_after_return_address) *
kSystemPointerSize));
}
if (FLAG_debug_code) {
......@@ -1054,7 +1056,8 @@ void TurboAssembler::PrepareForTailCall(
// place.
Register tmp_reg = scratch1;
mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
mov(Operand(esp,
number_of_temp_values_after_return_address * kSystemPointerSize),
tmp_reg);
// Restore caller's frame pointer now as it could be overwritten by
......@@ -1078,8 +1081,8 @@ void TurboAssembler::PrepareForTailCall(
jmp(&entry, Label::kNear);
bind(&loop);
dec(count_reg);
mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
mov(tmp_reg, Operand(esp, count_reg, times_system_pointer_size, 0));
mov(Operand(new_sp_reg, count_reg, times_system_pointer_size, 0), tmp_reg);
bind(&entry);
cmp(count_reg, Immediate(0));
j(not_equal, &loop, Label::kNear);
......@@ -1184,9 +1187,10 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
Push(fun);
Operand receiver_op =
actual.is_reg()
? Operand(ebp, actual.reg(), times_pointer_size, kPointerSize * 2)
: Operand(ebp, actual.immediate() * times_pointer_size +
kPointerSize * 2);
? Operand(ebp, actual.reg(), times_system_pointer_size,
kSystemPointerSize * 2)
: Operand(ebp, actual.immediate() * times_system_pointer_size +
kSystemPointerSize * 2);
Push(receiver_op);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
......@@ -1308,7 +1312,7 @@ void TurboAssembler::Push(Immediate value) {
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
add(esp, Immediate(stack_elements * kPointerSize));
add(esp, Immediate(stack_elements * kSystemPointerSize));
}
}
......@@ -1720,7 +1724,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) {
void TurboAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
if (frame_alignment > kSystemPointerSize) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
test(esp, Immediate(frame_alignment_mask));
......@@ -1775,12 +1779,12 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
sub(esp, Immediate((num_arguments + 1) * kPointerSize));
sub(esp, Immediate((num_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
mov(Operand(esp, num_arguments * kSystemPointerSize), scratch);
} else {
sub(esp, Immediate(num_arguments * kPointerSize));
sub(esp, Immediate(num_arguments * kSystemPointerSize));
}
}
......@@ -1833,9 +1837,9 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
if (base::OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
mov(esp, Operand(esp, num_arguments * kSystemPointerSize));
} else {
add(esp, Immediate(num_arguments * kPointerSize));
add(esp, Immediate(num_arguments * kSystemPointerSize));
}
}
......@@ -1866,10 +1870,12 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
STATIC_ASSERT(kSmiTag == 0);
// The builtin_pointer register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below (we use times_2 instead
// of times_4 since smis are already shifted by one).
mov(builtin_pointer, Operand(kRootRegister, builtin_pointer, times_2,
IsolateData::builtin_entry_table_offset()));
// Untagging is folded into the indexing operand below (we use
// times_half_system_pointer_size instead of times_system_pointer_size since
// smis are already shifted by one).
mov(builtin_pointer,
Operand(kRootRegister, builtin_pointer, times_half_system_pointer_size,
IsolateData::builtin_entry_table_offset()));
call(builtin_pointer);
}
......@@ -1905,8 +1911,9 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// table.
bind(&if_code_is_builtin);
mov(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
mov(destination, Operand(kRootRegister, destination, times_pointer_size,
IsolateData::builtin_entry_table_offset()));
mov(destination,
Operand(kRootRegister, destination, times_system_pointer_size,
IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
......
......@@ -708,18 +708,12 @@ inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
return Operand(object, index, scale, offset - kHeapObjectTag);
}
inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
inline Operand ContextOperand(Register context, Register index) {
return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
return Operand(context, index, times_tagged_size, Context::SlotOffset(0));
}
inline Operand NativeContextOperand() {
......
......@@ -187,7 +187,7 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
Label fallthrough;
__ cmp(edi, Operand(backtrack_stackpointer(), 0));
__ j(not_equal, &fallthrough);
__ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
__ add(backtrack_stackpointer(), Immediate(kSystemPointerSize)); // Pop.
BranchOrBacktrack(no_condition, on_equal);
__ bind(&fallthrough);
}
......@@ -278,7 +278,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Restore original value before continuing.
__ pop(backtrack_stackpointer());
// Drop original value of character position.
__ add(esp, Immediate(kPointerSize));
__ add(esp, Immediate(kSystemPointerSize));
// Compute new value of character position after the matched part.
__ sub(edi, esi);
if (read_backward) {
......@@ -306,15 +306,15 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Set isolate.
#ifdef V8_INTL_SUPPORT
if (unicode) {
__ mov(Operand(esp, 3 * kPointerSize), Immediate(0));
__ mov(Operand(esp, 3 * kSystemPointerSize), Immediate(0));
} else // NOLINT
#endif // V8_INTL_SUPPORT
{
__ mov(Operand(esp, 3 * kPointerSize),
__ mov(Operand(esp, 3 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
}
// Set byte_length.
__ mov(Operand(esp, 2 * kPointerSize), ebx);
__ mov(Operand(esp, 2 * kSystemPointerSize), ebx);
// Set byte_offset2.
// Found by adding negative string-end offset of current position (edi)
// to end of string.
......@@ -322,11 +322,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
if (read_backward) {
__ sub(edi, ebx); // Offset by length when matching backwards.
}
__ mov(Operand(esp, 1 * kPointerSize), edi);
__ mov(Operand(esp, 1 * kSystemPointerSize), edi);
// Set byte_offset1.
// Start of capture, where edx already holds string-end negative offset.
__ add(edx, esi);
__ mov(Operand(esp, 0 * kPointerSize), edx);
__ mov(Operand(esp, 0 * kSystemPointerSize), edx);
{
AllowExternalCallThatCantCauseGC scope(masm_);
......@@ -691,7 +691,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
__ cmp(ecx, num_registers_ * kPointerSize);
__ cmp(ecx, num_registers_ * kSystemPointerSize);
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
......@@ -709,7 +709,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(ebx, Operand(ebp, kStartIndex));
// Allocate space on stack for registers.
__ sub(esp, Immediate(num_registers_ * kPointerSize));
__ sub(esp, Immediate(num_registers_ * kSystemPointerSize));
// Load string length.
__ mov(esi, Operand(ebp, kInputEnd));
// Load input position.
......@@ -733,7 +733,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Ensure that we write to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
const int kRegistersPerPage = kPageSize / kPointerSize;
const int kRegistersPerPage = kPageSize / kSystemPointerSize;
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
......@@ -764,8 +764,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, 0), eax);
__ sub(ecx, Immediate(kPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
__ sub(ecx, Immediate(kSystemPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kSystemPointerSize);
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
......@@ -805,7 +805,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
if (mode_ == UC16) {
__ sar(eax, 1); // Convert byte index to character index.
}
__ mov(Operand(ebx, i * kPointerSize), eax);
__ mov(Operand(ebx, i * kSystemPointerSize), eax);
}
}
......@@ -824,7 +824,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(Operand(ebp, kNumOutputRegisters), ecx);
// Advance the location for output.
__ add(Operand(ebp, kRegisterOutput),
Immediate(num_saved_registers_ * kPointerSize));
Immediate(num_saved_registers_ * kSystemPointerSize));
// Prepare eax to initialize registers with its value in the next run.
__ mov(eax, Operand(ebp, kStringStartMinusOne));
......@@ -911,11 +911,11 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer())
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, ebx);
__ mov(Operand(esp, 2 * kPointerSize),
__ mov(Operand(esp, 2 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
__ lea(eax, Operand(ebp, kStackHighEnd));
__ mov(Operand(esp, 1 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
__ mov(Operand(esp, 1 * kSystemPointerSize), eax);
__ mov(Operand(esp, 0 * kSystemPointerSize), backtrack_stackpointer());
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
__ CallCFunction(grow_stack, num_arguments);
......@@ -1097,12 +1097,12 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
__ mov(Operand(esp, 2 * kPointerSize), ebp);
__ mov(Operand(esp, 2 * kSystemPointerSize), ebp);
// Code of self.
__ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
__ mov(Operand(esp, 1 * kSystemPointerSize), Immediate(masm_->CodeObject()));
// Next address on the stack (will be address of return address).
__ lea(eax, Operand(esp, -kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax);
__ lea(eax, Operand(esp, -kSystemPointerSize));
__ mov(Operand(esp, 0 * kSystemPointerSize), eax);
ExternalReference check_stack_guard =
ExternalReference::re_check_stack_guard_state(isolate());
__ CallCFunction(check_stack_guard, num_arguments);
......@@ -1143,7 +1143,7 @@ Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
return Operand(ebp, kRegisterZero - register_index * kPointerSize);
return Operand(ebp, kRegisterZero - register_index * kSystemPointerSize);
}
......@@ -1201,14 +1201,14 @@ void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerIA32::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
// Notice: This updates flags, unlike normal Push.
__ sub(backtrack_stackpointer(), Immediate(kPointerSize));
__ sub(backtrack_stackpointer(), Immediate(kSystemPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerIA32::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
__ sub(backtrack_stackpointer(), Immediate(kPointerSize));
__ sub(backtrack_stackpointer(), Immediate(kSystemPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), value);
}
......@@ -1217,7 +1217,7 @@ void RegExpMacroAssemblerIA32::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
__ mov(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
__ add(backtrack_stackpointer(), Immediate(kPointerSize));
__ add(backtrack_stackpointer(), Immediate(kSystemPointerSize));
}
......
......@@ -96,31 +96,32 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// Offsets from ebp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
static const int kReturn_eip = kFramePointer + kPointerSize;
static const int kFrameAlign = kReturn_eip + kPointerSize;
static const int kReturn_eip = kFramePointer + kSystemPointerSize;
static const int kFrameAlign = kReturn_eip + kSystemPointerSize;
// Parameters.
static const int kInputString = kFrameAlign;
static const int kStartIndex = kInputString + kPointerSize;
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
static const int kStartIndex = kInputString + kSystemPointerSize;
static const int kInputStart = kStartIndex + kSystemPointerSize;
static const int kInputEnd = kInputStart + kSystemPointerSize;
static const int kRegisterOutput = kInputEnd + kSystemPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kSystemPointerSize;
static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
static const int kIsolate = kDirectCall + kSystemPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kBackup_esi = kFramePointer - kPointerSize;
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kBackup_esi = kFramePointer - kSystemPointerSize;
static const int kBackup_edi = kBackup_esi - kSystemPointerSize;
static const int kBackup_ebx = kBackup_edi - kSystemPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kSystemPointerSize;
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
static const int kRegisterZero = kStringStartMinusOne - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
......
......@@ -250,8 +250,8 @@ TEST(AssemblerIa326) {
Assembler assm(AssemblerOptions{},
ExternalAssemblerBuffer(buffer, sizeof buffer));
__ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ movsd(xmm1, Operand(esp, 3 * kPointerSize));
__ movsd(xmm0, Operand(esp, 1 * kSystemPointerSize));
__ movsd(xmm1, Operand(esp, 3 * kSystemPointerSize));
__ addsd(xmm0, xmm1);
__ mulsd(xmm0, xmm1);
__ subsd(xmm0, xmm1);
......@@ -522,8 +522,8 @@ TEST(AssemblerIa32SSE) {
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
__ movss(xmm0, Operand(esp, kPointerSize));
__ movss(xmm1, Operand(esp, 2 * kPointerSize));
__ movss(xmm0, Operand(esp, kSystemPointerSize));
__ movss(xmm1, Operand(esp, 2 * kSystemPointerSize));
__ shufps(xmm0, xmm0, 0x0);
__ shufps(xmm1, xmm1, 0x0);
__ movaps(xmm2, xmm1);
......@@ -559,8 +559,8 @@ TEST(AssemblerIa32SSE3) {
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
{
CpuFeatureScope fscope(&assm, SSE3);
__ movss(xmm0, Operand(esp, kPointerSize));
__ movss(xmm1, Operand(esp, 2 * kPointerSize));
__ movss(xmm0, Operand(esp, kSystemPointerSize));
__ movss(xmm1, Operand(esp, 2 * kSystemPointerSize));
__ shufps(xmm0, xmm0, 0x0);
__ shufps(xmm1, xmm1, 0x0);
__ haddps(xmm1, xmm0);
......@@ -594,9 +594,9 @@ TEST(AssemblerX64FMA_sd) {
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
__ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ movsd(xmm1, Operand(esp, 3 * kPointerSize));
__ movsd(xmm2, Operand(esp, 5 * kPointerSize));
__ movsd(xmm0, Operand(esp, 1 * kSystemPointerSize));
__ movsd(xmm1, Operand(esp, 3 * kSystemPointerSize));
__ movsd(xmm2, Operand(esp, 5 * kSystemPointerSize));
// argument in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
__ movaps(xmm3, xmm0);
......@@ -823,9 +823,9 @@ TEST(AssemblerX64FMA_ss) {
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
__ movss(xmm0, Operand(esp, 1 * kPointerSize));
__ movss(xmm1, Operand(esp, 2 * kPointerSize));
__ movss(xmm2, Operand(esp, 3 * kPointerSize));
__ movss(xmm0, Operand(esp, 1 * kSystemPointerSize));
__ movss(xmm1, Operand(esp, 2 * kSystemPointerSize));
__ movss(xmm2, Operand(esp, 3 * kSystemPointerSize));
// arguments in xmm0, xmm1 and xmm2
// xmm0 * xmm1 + xmm2
__ movaps(xmm3, xmm0);
......@@ -1403,7 +1403,7 @@ TEST(AssemblerIa32JumpTables1) {
Label done, table;
__ mov(eax, Operand(esp, 4));
__ jmp(Operand::JumpTable(eax, times_4, &table));
__ jmp(Operand::JumpTable(eax, times_system_pointer_size, &table));
__ ud2();
__ bind(&table);
for (int i = 0; i < kNumCases; ++i) {
......@@ -1450,7 +1450,7 @@ TEST(AssemblerIa32JumpTables2) {
Label done, table;
__ mov(eax, Operand(esp, 4));
__ jmp(Operand::JumpTable(eax, times_4, &table));
__ jmp(Operand::JumpTable(eax, times_system_pointer_size, &table));
__ ud2();
for (int i = 0; i < kNumCases; ++i) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment