Commit 0243ba80 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup][x32] Remove x32 leftovers from x64 assembler, pt.1

addp, andp, cmpp, decp, incp, leap, negp, orp, subp, testp, xorp,
shrp, sarp, shlp are replaced with respective quad-word instructions.

Some wrongly-used xxxp instructions in regexp code are replaced with xxxl.

Bug: v8:8621, v8:8562
Change-Id: If5fe3229a35805b8ef84d3e1ffa05cf9ed91ceef
Reviewed-on: https://chromium-review.googlesource.com/c/1446451Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59217}
parent d928d25c
......@@ -96,7 +96,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
__ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
......@@ -115,7 +115,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ bind(&loop);
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
__ decp(rcx);
__ decq(rcx);
__ j(greater_equal, &loop, Label::kNear);
// Call the function.
......@@ -136,7 +136,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
__ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
__ ret(0);
......@@ -153,10 +153,10 @@ void Generate_StackOverflowCheck(
__ movp(scratch, rsp);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ subp(scratch, kScratchRegister);
__ sarp(scratch, Immediate(kSystemPointerSizeLog2));
__ subq(scratch, kScratchRegister);
__ sarq(scratch, Immediate(kSystemPointerSizeLog2));
// Check if the arguments will overflow the stack.
__ cmpp(scratch, num_args);
__ cmpq(scratch, num_args);
// Signed comparison.
__ j(less_equal, stack_overflow, stack_overflow_distance);
}
......@@ -250,7 +250,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
__ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Check if we have enough stack space to push all arguments.
// Argument count in rax. Clobbers rcx.
......@@ -286,7 +286,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&loop);
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
__ decp(rcx);
__ decq(rcx);
__ j(greater_equal, &loop, Label::kNear);
// Call the function.
......@@ -347,7 +347,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
__ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
__ ret(0);
}
......@@ -387,7 +387,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Push(Immediate(StackFrame::TypeToMarker(type)));
// Reserve a slot for the context. It is filled after the root register has
// been set up.
__ subp(rsp, Immediate(kSystemPointerSize));
__ subq(rsp, Immediate(kSystemPointerSize));
// Save callee-saved registers (X64/X32/Win64 calling conventions).
__ pushq(r12);
__ pushq(r13);
......@@ -401,7 +401,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef _WIN64
// On Win64 XMM6-XMM15 are callee-save.
__ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
__ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
......@@ -442,7 +442,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference js_entry_sp = ExternalReference::Create(
IsolateAddressId::kJSEntrySPAddress, masm->isolate());
__ Load(rax, js_entry_sp);
__ testp(rax, rax);
__ testq(rax, rax);
__ j(not_zero, &not_outermost_js);
__ Push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ movp(rax, rbp);
......@@ -486,7 +486,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
__ Pop(rbx);
__ cmpp(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ cmpq(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
__ Move(kScratchRegister, js_entry_sp);
__ movp(Operand(kScratchRegister, 0), Immediate(0));
......@@ -511,7 +511,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
__ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
__ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
__ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
__ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
#endif
__ popq(rbx);
......@@ -524,7 +524,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ popq(r14);
__ popq(r13);
__ popq(r12);
__ addp(rsp, Immediate(2 * kSystemPointerSize)); // remove markers
__ addq(rsp, Immediate(2 * kSystemPointerSize)); // remove markers
// Restore frame pointer and return.
__ popq(rbp);
......@@ -648,9 +648,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&loop);
__ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
__ Push(Operand(kScratchRegister, 0)); // dereference handle
__ addp(rcx, Immediate(1));
__ addq(rcx, Immediate(1));
__ bind(&entry);
__ cmpp(rcx, rax);
__ cmpq(rcx, rax);
__ j(not_equal, &loop, Label::kNear);
// Invoke the builtin code.
......@@ -739,7 +739,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference::debug_suspended_generator_address(masm->isolate());
Operand debug_suspended_generator_operand =
masm->ExternalReferenceAsOperand(debug_suspended_generator);
__ cmpp(rdx, debug_suspended_generator_operand);
__ cmpq(rdx, debug_suspended_generator_operand);
__ j(equal, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
......@@ -887,7 +887,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Drop receiver + arguments.
__ PopReturnAddressTo(return_pc);
__ addp(rsp, args_count);
__ addq(rsp, args_count);
__ PushReturnAddressFrom(return_pc);
}
......@@ -1037,7 +1037,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the next bytecode and update table to the wide scaled table.
__ incl(bytecode_offset);
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ addp(bytecode_size_table,
__ addq(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&process_bytecode, Label::kNear);
......@@ -1045,7 +1045,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Load the next bytecode and update table to the extra wide scaled table.
__ incl(bytecode_offset);
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ addp(bytecode_size_table,
__ addq(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ bind(&process_bytecode);
......@@ -1154,7 +1154,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ movp(rax, rsp);
__ subp(rax, rcx);
__ subq(rax, rcx);
__ CompareRoot(rax, RootIndex::kRealStackLimit);
__ j(above_equal, &ok, Label::kNear);
__ CallRuntime(Runtime::kThrowStackOverflow);
......@@ -1170,7 +1170,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rax);
// Continue loop if not done.
__ bind(&loop_check);
__ subp(rcx, Immediate(kSystemPointerSize));
__ subq(rcx, Immediate(kSystemPointerSize));
__ j(greater_equal, &loop_header, Label::kNear);
}
......@@ -1240,18 +1240,18 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register scratch) {
// Find the address of the last argument.
__ Move(scratch, num_args);
__ shlp(scratch, Immediate(kSystemPointerSizeLog2));
__ negp(scratch);
__ addp(scratch, start_address);
__ shlq(scratch, Immediate(kSystemPointerSizeLog2));
__ negq(scratch);
__ addq(scratch, start_address);
// Push the arguments.
Label loop_header, loop_check;
__ j(always, &loop_check, Label::kNear);
__ bind(&loop_header);
__ Push(Operand(start_address, 0));
__ subp(start_address, Immediate(kSystemPointerSize));
__ subq(start_address, Immediate(kSystemPointerSize));
__ bind(&loop_check);
__ cmpp(start_address, scratch);
__ cmpq(start_address, scratch);
__ j(greater, &loop_header, Label::kNear);
}
......@@ -1403,7 +1403,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ movp(rbx,
FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
__ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ addq(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline);
......@@ -1416,7 +1416,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
kScratchRegister));
__ bind(&trampoline_loaded);
__ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value()));
__ addq(rbx, Immediate(interpreter_entry_return_pc_offset->value()));
__ Push(rbx);
// Initialize dispatch table register.
......@@ -1512,7 +1512,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
for (int j = 0; j < 4; ++j) {
Label over;
if (j < 3) {
__ cmpp(rcx, Immediate(j));
__ cmpq(rcx, Immediate(j));
__ j(not_equal, &over, Label::kNear);
}
for (int i = j - 1; i >= 0; --i) {
......@@ -1540,8 +1540,8 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
scope.GenerateLeaveFrame();
__ PopReturnAddressTo(rbx);
__ incp(rcx);
__ leap(rsp, Operand(rsp, rcx, times_pointer_size, 0));
__ incq(rcx);
__ leaq(rsp, Operand(rsp, rcx, times_pointer_size, 0));
__ PushReturnAddressFrom(rbx);
__ ret(0);
......@@ -1646,18 +1646,18 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(rdx, RootIndex::kUndefinedValue);
__ movp(rbx, rdx);
__ movp(rdi, args.GetReceiverOperand());
__ testp(rax, rax);
__ testq(rax, rax);
__ j(zero, &no_this_arg, Label::kNear);
{
__ movp(rdx, args.GetArgumentOperand(1));
__ cmpp(rax, Immediate(1));
__ cmpq(rax, Immediate(1));
__ j(equal, &no_arg_array, Label::kNear);
__ movp(rbx, args.GetArgumentOperand(2));
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
__ PopReturnAddressTo(rcx);
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
__ leaq(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
}
......@@ -1707,12 +1707,12 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
{
Label done;
__ testp(rax, rax);
__ testq(rax, rax);
__ j(not_zero, &done, Label::kNear);
__ PopReturnAddressTo(rbx);
__ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(rbx);
__ incp(rax);
__ incq(rax);
__ bind(&done);
}
......@@ -1732,10 +1732,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rbx, args.GetArgumentOperand(1));
__ movp(args.GetArgumentOperand(0), rbx);
__ decp(rcx);
__ decq(rcx);
__ j(not_zero, &loop); // While non-zero.
__ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
__ decp(rax); // One fewer argument (first argument is new receiver).
__ decq(rax); // One fewer argument (first argument is new receiver).
}
// 4. Call the callable.
......@@ -1763,17 +1763,17 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movp(rdx, rdi);
__ movp(rbx, rdi);
__ cmpp(rax, Immediate(1));
__ cmpq(rax, Immediate(1));
__ j(below, &done, Label::kNear);
__ movp(rdi, args.GetArgumentOperand(1)); // target
__ j(equal, &done, Label::kNear);
__ movp(rdx, args.GetArgumentOperand(2)); // thisArgument
__ cmpp(rax, Immediate(3));
__ cmpq(rax, Immediate(3));
__ j(below, &done, Label::kNear);
__ movp(rbx, args.GetArgumentOperand(3)); // argumentsList
__ bind(&done);
__ PopReturnAddressTo(rcx);
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
__ leaq(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
}
......@@ -1814,18 +1814,18 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movp(rdx, rdi);
__ movp(rbx, rdi);
__ cmpp(rax, Immediate(1));
__ cmpq(rax, Immediate(1));
__ j(below, &done, Label::kNear);
__ movp(rdi, args.GetArgumentOperand(1)); // target
__ movp(rdx, rdi); // new.target defaults to target
__ j(equal, &done, Label::kNear);
__ movp(rbx, args.GetArgumentOperand(2)); // argumentsList
__ cmpp(rax, Immediate(3));
__ cmpq(rax, Immediate(3));
__ j(below, &done, Label::kNear);
__ movp(rdx, args.GetArgumentOperand(3)); // new.target
__ bind(&done);
__ PopReturnAddressTo(rcx);
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
__ leaq(rsp, Operand(rsp, rax, times_pointer_size, kSystemPointerSize));
__ PushRoot(RootIndex::kUndefinedValue);
__ PushReturnAddressFrom(rcx);
}
......@@ -1911,7 +1911,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Remove caller arguments from the stack.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
__ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
}
......@@ -1927,9 +1927,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
Label invoke, dont_adapt_arguments, stack_overflow, enough, too_few;
__ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
__ cmpp(rax, rbx);
__ cmpq(rax, rbx);
__ j(less, &too_few);
{ // Enough parameters: Actual >= expected.
......@@ -1940,15 +1940,15 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
__ leaq(rax, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
__ incp(r8);
__ incq(r8);
__ Push(Operand(rax, 0));
__ subp(rax, Immediate(kSystemPointerSize));
__ cmpp(r8, rbx);
__ subq(rax, Immediate(kSystemPointerSize));
__ cmpq(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
......@@ -1962,24 +1962,24 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ leaq(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
__ incp(r8);
__ incq(r8);
__ Push(Operand(rdi, 0));
__ subp(rdi, Immediate(kSystemPointerSize));
__ cmpp(r8, rax);
__ subq(rdi, Immediate(kSystemPointerSize));
__ cmpq(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
__ bind(&fill);
__ incp(r8);
__ incq(r8);
__ Push(kScratchRegister);
__ cmpp(r8, rbx);
__ cmpq(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
......@@ -2129,7 +2129,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ cmpp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
__ cmpq(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &arguments_adaptor, Label::kNear);
{
......@@ -2335,15 +2335,15 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
__ leap(kScratchRegister, Operand(rbx, times_pointer_size, 0));
__ subp(rsp, kScratchRegister);
__ leaq(kScratchRegister, Operand(rbx, times_pointer_size, 0));
__ subq(rsp, kScratchRegister);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
__ CompareRoot(rsp, RootIndex::kRealStackLimit);
__ j(above_equal, &done, Label::kNear);
// Restore the stack pointer.
__ leap(rsp, Operand(rsp, rbx, times_pointer_size, 0));
__ leaq(rsp, Operand(rsp, rbx, times_pointer_size, 0));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
......@@ -2359,7 +2359,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ Set(rcx, 0);
__ leap(rbx, Operand(rsp, rbx, times_pointer_size, 0));
__ leaq(rbx, Operand(rsp, rbx, times_pointer_size, 0));
__ bind(&loop);
__ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
__ movp(Operand(rsp, rcx, times_pointer_size, 0), kScratchRegister);
......@@ -2527,7 +2527,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
{
Label done;
__ cmpp(rdi, rdx);
__ cmpq(rdi, rdx);
__ j(not_equal, &done, Label::kNear);
__ LoadTaggedPointerField(
rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
......@@ -2613,7 +2613,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
Label skip;
// If the code object is null, just return to the caller.
__ testp(rax, rax);
__ testq(rax, rax);
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
......@@ -2633,7 +2633,7 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
__ leap(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
__ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
// Overwrite the return address on the stack.
__ movq(StackOperandForReturnAddress(0), rax);
......@@ -2663,7 +2663,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
arraysize(wasm::kFpParamRegisters),
"frame size mismatch");
__ subp(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
__ subq(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
int offset = 0;
for (DoubleRegister reg : wasm::kFpParamRegisters) {
__ movdqu(Operand(rsp, offset), reg);
......@@ -2695,7 +2695,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ movdqu(reg, Operand(rsp, offset));
}
DCHECK_EQ(0, offset);
__ addp(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
__ addq(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
__ Pop(reg);
}
......@@ -2778,7 +2778,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
} else {
DCHECK_LE(result_size, 2);
// Pass a pointer to the result location as the first argument.
__ leap(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
__ leaq(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
// Pass a pointer to the Arguments object as the second argument.
__ movp(kCCallArg1, r14); // argc.
__ movp(kCCallArg2, r15); // argv.
......@@ -2809,7 +2809,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalReferenceAsOperand(pending_exception_address);
__ cmpp(r14, pending_exception_operand);
__ cmpq(r14, pending_exception_operand);
__ j(equal, &okay, Label::kNear);
__ int3();
__ bind(&okay);
......@@ -2853,7 +2853,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (rsi == 0) for non-JS frames.
Label skip;
__ testp(rsi, rsi);
__ testq(rsi, rsi);
__ j(zero, &skip, Label::kNear);
__ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
__ bind(&skip);
......@@ -2966,7 +2966,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ bind(&fast_power);
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ subp(rsp, Immediate(kDoubleSize));
__ subq(rsp, Immediate(kDoubleSize));
__ Movsd(Operand(rsp, 0), double_exponent);
__ fld_d(Operand(rsp, 0)); // E
__ Movsd(Operand(rsp, 0), double_base);
......@@ -2993,12 +2993,12 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(rsp, 0));
__ Movsd(double_result, Operand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize));
__ addq(rsp, Immediate(kDoubleSize));
__ jmp(&done);
__ bind(&fast_power_failed);
__ fninit();
__ addp(rsp, Immediate(kDoubleSize));
__ addq(rsp, Immediate(kDoubleSize));
__ jmp(&call_runtime);
// Calculate power with integer exponent.
......@@ -3106,7 +3106,7 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ Assert(equal, AbortReason::kInvalidElementsKindForInternalPackedArray);
// No arguments should be passed.
__ testp(rax, rax);
__ testq(rax, rax);
__ Assert(zero, AbortReason::kWrongNumberOfArgumentsForInternalPackedArray);
}
......@@ -3207,7 +3207,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// previous handle scope.
__ subl(Operand(base_reg, kLevelOffset), Immediate(1));
__ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
__ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
__ cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
__ j(not_equal, &delete_allocated_handles);
// Leave the API exit frame.
......@@ -3349,7 +3349,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// rsp[6 * kSystemPointerSize]: undefined (kNewTarget)
// Reserve space on the stack.
__ subp(rsp, Immediate(FCA::kArgsLength * kSystemPointerSize));
__ subq(rsp, Immediate(FCA::kArgsLength * kSystemPointerSize));
// Return address (the old stack location is overwritten later on).
__ movp(kScratchRegister,
......@@ -3382,7 +3382,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
Register scratch = rbx;
__ leap(scratch, Operand(rsp, 1 * kSystemPointerSize));
__ leaq(scratch, Operand(rsp, 1 * kSystemPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
......@@ -3394,7 +3394,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ leap(scratch, Operand(scratch, argc, times_pointer_size,
__ leaq(scratch, Operand(scratch, argc, times_pointer_size,
(FCA::kArgsLength - 1) * kSystemPointerSize));
__ movp(StackSpaceOperand(1), scratch);
......@@ -3417,7 +3417,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(api_function_address != arguments_arg);
// v8::InvocationCallback's argument.
__ leap(arguments_arg, StackSpaceOperand(0));
__ leaq(arguments_arg, StackSpaceOperand(0));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
......@@ -3486,7 +3486,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
const int kArgStackSpace = 1;
// Load address of v8::PropertyAccessorInfo::args_ array.
__ leap(scratch, Operand(rsp, 2 * kSystemPointerSize));
__ leaq(scratch, Operand(rsp, 2 * kSystemPointerSize));
__ EnterApiExitFrame(kArgStackSpace);
......@@ -3495,10 +3495,10 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Operand info_object = StackSpaceOperand(0);
__ movp(info_object, scratch);
__ leap(name_arg, Operand(scratch, -kSystemPointerSize));
__ leaq(name_arg, Operand(scratch, -kSystemPointerSize));
// The context register (rsi) has been saved in EnterApiExitFrame and
// could be used to pass arguments.
__ leap(accessor_info_arg, info_object);
__ leaq(accessor_info_arg, info_object);
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
......
......@@ -201,7 +201,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
zone_(gen->zone()) {}
void Generate() final {
__ subp(rsp, Immediate(kDoubleSize));
__ subq(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
......@@ -214,7 +214,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
}
__ movl(result_, MemOperand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize));
__ addq(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kDoubleSize);
}
......@@ -250,7 +250,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit());
__ leap(scratch1_, operand_);
__ leaq(scratch1_, operand_);
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
......@@ -592,7 +592,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Label done;
// Check if current frame is an arguments adaptor frame.
__ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
__ cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &done, Label::kNear);
......@@ -708,7 +708,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(rbx);
__ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
__ cmpp(kJavaScriptCallCodeStartRegister, rbx);
__ cmpq(kJavaScriptCallCodeStartRegister, rbx);
__ movp(rbx, Immediate(-1));
__ cmovq(equal, kSpeculationPoisonRegister, rbx);
}
......@@ -3741,7 +3741,7 @@ void CodeGenerator::AssembleConstructFrame() {
const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
const int stack_size = saves_fp_count * kQuadWordSize;
// Adjust the stack pointer.
__ subp(rsp, Immediate(stack_size));
__ subq(rsp, Immediate(stack_size));
// Store the registers on the stack.
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
......@@ -3793,7 +3793,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
slot_idx++;
}
// Adjust the stack pointer.
__ addp(rsp, Immediate(stack_size));
__ addq(rsp, Immediate(stack_size));
}
unwinding_info_writer_.MarkBlockWillExit();
......
......@@ -142,7 +142,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
DCHECK_LE(0, reg);
DCHECK_GT(num_registers_, reg);
if (by != 0) {
__ addp(register_location(reg), Immediate(by));
__ addq(register_location(reg), Immediate(by));
}
}
......@@ -151,7 +151,7 @@ void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
// Pop Code offset from backtrack stack, add Code and jump to location.
Pop(rbx);
__ addp(rbx, code_object_pointer());
__ addq(rbx, code_object_pointer());
__ jmp(rbx);
}
......@@ -174,16 +174,16 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
__ leap(rax, Operand(rdi, -char_size()));
__ cmpp(rax, Operand(rbp, kStringStartMinusOne));
__ leaq(rax, Operand(rdi, -char_size()));
__ cmpq(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(equal, on_at_start);
}
void RegExpMacroAssemblerX64::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
__ leap(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
__ cmpp(rax, Operand(rbp, kStringStartMinusOne));
__ leaq(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
__ cmpq(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(not_equal, on_not_at_start);
}
......@@ -209,7 +209,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
Label fallthrough;
ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
__ subp(rbx, rdx); // Length of capture.
__ subq(rbx, rdx); // Length of capture.
// -----------------------
// rdx = Start offset of capture.
......@@ -241,12 +241,12 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
on_no_match = &backtrack_label_;
}
__ leap(r9, Operand(rsi, rdx, times_1, 0));
__ leap(r11, Operand(rsi, rdi, times_1, 0));
__ leaq(r9, Operand(rsi, rdx, times_1, 0));
__ leaq(r11, Operand(rsi, rdi, times_1, 0));
if (read_backward) {
__ subp(r11, rbx); // Offset by length when matching backwards.
__ subq(r11, rbx); // Offset by length when matching backwards.
}
__ addp(rbx, r9); // End of capture
__ addq(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
// r9 - current capture character address
......@@ -264,8 +264,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Mismatch, try case-insensitive match (converting letters to lower-case).
// I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
// a match.
__ orp(rax, Immediate(0x20)); // Convert match character to lower-case.
__ orp(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ orq(rax, Immediate(0x20)); // Convert match character to lower-case.
__ orq(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ cmpb(rax, rdx);
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
......@@ -279,10 +279,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(equal, on_no_match);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
__ addp(r11, Immediate(1));
__ addp(r9, Immediate(1));
__ addq(r11, Immediate(1));
__ addq(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
__ cmpp(r9, rbx);
__ cmpq(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
......@@ -315,9 +315,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
DCHECK(rcx == arg_reg_1);
DCHECK(rdx == arg_reg_2);
// Compute and set byte_offset1 (start of capture).
__ leap(rcx, Operand(rsi, rdx, times_1, 0));
__ leaq(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
__ leap(rdx, Operand(rsi, rdi, times_1, 0));
__ leaq(rdx, Operand(rsi, rdi, times_1, 0));
if (read_backward) {
__ subq(rdx, rbx);
}
......@@ -325,9 +325,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
DCHECK(rdi == arg_reg_1);
DCHECK(rsi == arg_reg_2);
// Compute byte_offset2 (current position = rsi+rdi).
__ leap(rax, Operand(rsi, rdi, times_1, 0));
__ leaq(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture).
__ leap(rdi, Operand(rsi, rdx, times_1, 0));
__ leaq(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
__ movp(rsi, rax);
if (read_backward) {
......@@ -364,7 +364,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
#endif
// Check if function returned non-zero for success or zero for failure.
__ testp(rax, rax);
__ testq(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, advance position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
......@@ -386,7 +386,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
// Find length of back-referenced capture.
ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
ReadPositionFromRegister(rax, start_reg + 1); // Offset of end of capture
__ subp(rax, rdx); // Length to check.
__ subq(rax, rdx); // Length to check.
// At this point, the capture registers are either both set or both cleared.
// If the capture length is zero, then the capture is either empty or cleared.
......@@ -409,12 +409,12 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
}
// Compute pointers to match string and capture string
__ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
__ leaq(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
if (read_backward) {
__ subq(rbx, rax); // Offset by length when matching backwards.
}
__ addp(rdx, rsi); // Start of capture.
__ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
__ addq(rdx, rsi); // Start of capture.
__ leaq(r9, Operand(rdx, rax, times_1, 0)); // End of capture
// -----------------------
// rbx - current capture character address.
......@@ -433,10 +433,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
}
BranchOrBacktrack(not_equal, on_no_match);
// Increment pointers into capture and match string.
__ addp(rbx, Immediate(char_size()));
__ addp(rdx, Immediate(char_size()));
__ addq(rbx, Immediate(char_size()));
__ addq(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
__ cmpp(rdx, r9);
__ cmpq(rdx, r9);
__ j(below, &loop);
// Success.
......@@ -467,7 +467,7 @@ void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
__ andp(rax, current_character());
__ andq(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(equal, on_equal);
......@@ -481,7 +481,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
__ andp(rax, current_character());
__ andq(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(not_equal, on_not_equal);
......@@ -494,8 +494,8 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 mask,
Label* on_not_equal) {
DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
__ leap(rax, Operand(current_character(), -minus));
__ andp(rax, Immediate(mask));
__ leal(rax, Operand(current_character(), -minus));
__ andl(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal);
}
......@@ -528,7 +528,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
Register index = current_character();
if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
__ movp(rbx, current_character());
__ andp(rbx, Immediate(kTableMask));
__ andq(rbx, Immediate(kTableMask));
index = rbx;
}
__ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
......@@ -541,8 +541,8 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check, using the sequence:
// leap(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
// cmp(rax, Immediate(max - min))
// leal(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
// cmpl(rax, Immediate(max - min))
switch (type) {
case 's':
// Match space-characters
......@@ -552,7 +552,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear);
// Check range 0x09..0x0D
__ leap(rax, Operand(current_character(), -'\t'));
__ leal(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
......@@ -567,20 +567,20 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
__ leap(rax, Operand(current_character(), -'0'));
__ leal(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
__ leap(rax, Operand(current_character(), -'0'));
__ leal(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
__ xorp(rax, Immediate(0x01));
__ xorl(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
__ subl(rax, Immediate(0x0B));
__ cmpl(rax, Immediate(0x0C - 0x0B));
......@@ -598,7 +598,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
__ xorp(rax, Immediate(0x01));
__ xorl(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C
__ subl(rax, Immediate(0x0B));
__ cmpl(rax, Immediate(0x0C - 0x0B));
......@@ -724,12 +724,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference::address_of_stack_limit(isolate());
__ movp(rcx, rsp);
__ Move(kScratchRegister, stack_limit);
__ subp(rcx, Operand(kScratchRegister, 0));
__ subq(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
__ cmpp(rcx, Immediate(num_registers_ * kSystemPointerSize));
__ cmpq(rcx, Immediate(num_registers_ * kSystemPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
......@@ -739,14 +739,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
__ testp(rax, rax);
__ testq(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
__ bind(&stack_ok);
// Allocate space on stack for registers.
__ subp(rsp, Immediate(num_registers_ * kSystemPointerSize));
__ subq(rsp, Immediate(num_registers_ * kSystemPointerSize));
// Load string length.
__ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
......@@ -758,9 +758,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rbx, Operand(rbp, kStartIndex));
__ negq(rbx);
if (mode_ == UC16) {
__ leap(rax, Operand(rdi, rbx, times_2, -char_size()));
__ leaq(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
__ leap(rax, Operand(rdi, rbx, times_1, -char_size()));
__ leaq(rax, Operand(rdi, rbx, times_1, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
......@@ -829,11 +829,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rdx, Operand(rbp, kStartIndex));
__ movp(rbx, Operand(rbp, kRegisterOutput));
__ movp(rcx, Operand(rbp, kInputEnd));
__ subp(rcx, Operand(rbp, kInputStart));
__ subq(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
__ leap(rcx, Operand(rcx, rdx, times_2, 0));
__ leaq(rcx, Operand(rcx, rdx, times_2, 0));
} else {
__ addp(rcx, rdx);
__ addq(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
__ movp(rax, register_location(i));
......@@ -841,9 +841,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Keep capture start in rdx for the zero-length check later.
__ movp(rdx, rax);
}
__ addp(rax, rcx); // Convert to index from start, not end.
__ addq(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
__ sarp(rax, Immediate(1)); // Convert byte index to character index.
__ sarq(rax, Immediate(1)); // Convert byte index to character index.
}
__ movl(Operand(rbx, i * kIntSize), rax);
}
......@@ -852,18 +852,18 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ incp(Operand(rbp, kSuccessfulCaptures));
__ incq(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
__ subp(rcx, Immediate(num_saved_registers_));
__ subq(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmpp(rcx, Immediate(num_saved_registers_));
__ cmpq(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ movp(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
__ addp(Operand(rbp, kRegisterOutput),
__ addq(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
......@@ -872,11 +872,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
__ cmpp(rdi, rdx);
__ cmpq(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
__ testp(rdi, rdi);
__ testq(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
Label advance;
......@@ -904,7 +904,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&return_rax);
#ifdef _WIN64
// Restore callee save registers.
__ leap(rsp, Operand(rbp, kLastCalleeSaveRegister));
__ leaq(rsp, Operand(rbp, kLastCalleeSaveRegister));
__ popq(rbx);
__ popq(rdi);
__ popq(rsi);
......@@ -935,7 +935,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ pushq(rdi);
CallCheckStackGuardState();
__ testp(rax, rax);
__ testq(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
......@@ -968,12 +968,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
__ leap(rdx, Operand(rbp, kStackHighEnd)); // Second argument
__ leaq(rdx, Operand(rbp, kStackHighEnd)); // Second argument
__ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
__ movp(rdi, backtrack_stackpointer()); // First argument.
__ leap(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ leaq(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
ExternalReference grow_stack =
......@@ -981,7 +981,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ testp(rax, rax);
__ testq(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ movp(backtrack_stackpointer(), rax);
......@@ -1022,7 +1022,7 @@ void RegExpMacroAssemblerX64::GoTo(Label* to) {
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
__ cmpp(register_location(reg), Immediate(comparand));
__ cmpq(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
......@@ -1030,14 +1030,14 @@ void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
__ cmpp(register_location(reg), Immediate(comparand));
__ cmpq(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
__ cmpp(rdi, register_location(reg));
__ cmpq(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
......@@ -1118,13 +1118,13 @@ void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) {
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
__ movp(backtrack_stackpointer(), register_location(reg));
__ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
__ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
}
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ cmpp(rdi, Immediate(-by * char_size()));
__ cmpq(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
......@@ -1152,7 +1152,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
if (cp_offset == 0) {
__ movp(register_location(reg), rdi);
} else {
__ leap(rax, Operand(rdi, cp_offset * char_size()));
__ leaq(rax, Operand(rdi, cp_offset * char_size()));
__ movp(register_location(reg), rax);
}
}
......@@ -1169,7 +1169,7 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
__ movp(rax, backtrack_stackpointer());
__ subp(rax, Operand(rbp, kStackHighEnd));
__ subq(rax, Operand(rbp, kStackHighEnd));
__ movp(register_location(reg), rax);
}
......@@ -1188,7 +1188,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
__ leap(rcx, Operand(rsp, -kSystemPointerSize));
__ leaq(rcx, Operand(rsp, -kSystemPointerSize));
#else
// Third argument: RegExp code frame pointer.
__ movp(rdx, rbp);
......@@ -1196,7 +1196,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
__ leap(rdi, Operand(rsp, -kRegisterSize));
__ leaq(rdi, Operand(rsp, -kRegisterSize));
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state(isolate());
......@@ -1245,8 +1245,8 @@ void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
__ cmpl(rdi, Immediate(-cp_offset * char_size()));
BranchOrBacktrack(greater_equal, on_outside_input);
} else {
__ leap(rax, Operand(rdi, cp_offset * char_size()));
__ cmpp(rax, Operand(rbp, kStringStartMinusOne));
__ leaq(rax, Operand(rdi, cp_offset * char_size()));
__ cmpq(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(less_equal, on_outside_input);
}
}
......@@ -1277,12 +1277,12 @@ void RegExpMacroAssemblerX64::SafeCall(Label* to) {
void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
__ bind(label);
__ subp(Operand(rsp, 0), code_object_pointer());
__ subq(Operand(rsp, 0), code_object_pointer());
}
void RegExpMacroAssemblerX64::SafeReturn() {
__ addp(Operand(rsp, 0), code_object_pointer());
__ addq(Operand(rsp, 0), code_object_pointer());
__ ret(0);
}
......@@ -1290,14 +1290,14 @@ void RegExpMacroAssemblerX64::SafeReturn() {
void RegExpMacroAssemblerX64::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
// Notice: This updates flags, unlike normal Push.
__ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ subq(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerX64::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
__ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ subq(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), value);
}
......@@ -1320,7 +1320,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
__ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ subq(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
MarkPositionForCodeRelativeFixup();
}
......@@ -1330,12 +1330,12 @@ void RegExpMacroAssemblerX64::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
__ movsxlq(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
__ addp(backtrack_stackpointer(), Immediate(kIntSize));
__ addq(backtrack_stackpointer(), Immediate(kIntSize));
}
void RegExpMacroAssemblerX64::Drop() {
__ addp(backtrack_stackpointer(), Immediate(kIntSize));
__ addq(backtrack_stackpointer(), Immediate(kIntSize));
}
......@@ -1345,7 +1345,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
__ cmpp(rsp, rax);
__ cmpq(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
......@@ -1359,7 +1359,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
__ cmpp(backtrack_stackpointer(), rax);
__ cmpq(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
......
......@@ -112,11 +112,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->pushq(reg.gp());
break;
case kWasmF32:
assm->subp(rsp, Immediate(kSystemPointerSize));
assm->subq(rsp, Immediate(kSystemPointerSize));
assm->Movss(Operand(rsp, 0), reg.fp());
break;
case kWasmF64:
assm->subp(rsp, Immediate(kSystemPointerSize));
assm->subq(rsp, Immediate(kSystemPointerSize));
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
default:
......@@ -698,9 +698,9 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
if (lhs.gp() != dst.gp()) {
leap(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0));
leaq(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0));
} else {
addp(dst.gp(), rhs.gp());
addq(dst.gp(), rhs.gp());
}
}
......@@ -1412,7 +1412,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
cmpp(rsp, Operand(limit_address, 0));
cmpq(rsp, Operand(limit_address, 0));
j(below_equal, ool_code);
}
......@@ -1435,7 +1435,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList fp_regs = regs & kFpCacheRegList;
unsigned num_fp_regs = fp_regs.GetNumRegsSet();
if (num_fp_regs) {
subp(rsp, Immediate(num_fp_regs * kStackSlotSize));
subq(rsp, Immediate(num_fp_regs * kStackSlotSize));
unsigned offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
......@@ -1456,7 +1456,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
fp_regs.clear(reg);
fp_offset += sizeof(double);
}
if (fp_offset) addp(rsp, Immediate(fp_offset));
if (fp_offset) addq(rsp, Immediate(fp_offset));
LiftoffRegList gp_regs = regs & kGpCacheRegList;
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
......@@ -1476,7 +1476,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
subp(rsp, Immediate(stack_bytes));
subq(rsp, Immediate(stack_bytes));
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
......@@ -1510,7 +1510,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type);
}
addp(rsp, Immediate(stack_bytes));
addq(rsp, Immediate(stack_bytes));
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
......@@ -1538,12 +1538,12 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
subp(rsp, Immediate(size));
subq(rsp, Immediate(size));
movp(addr, rsp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addp(rsp, Immediate(size));
addq(rsp, Immediate(size));
}
void LiftoffStackSlots::Construct() {
......
......@@ -642,10 +642,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mulq(Register src);
#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
void instruction##p(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kSystemPointerSize); \
} \
\
void instruction##l(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt32Size); \
} \
......@@ -654,10 +650,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
shift(dst, imm8, subcode, kInt64Size); \
} \
\
void instruction##p(Operand dst, Immediate imm8) { \
shift(dst, imm8, subcode, kSystemPointerSize); \
} \
\
void instruction##l(Operand dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt32Size); \
} \
......@@ -666,18 +658,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
shift(dst, imm8, subcode, kInt64Size); \
} \
\
void instruction##p_cl(Register dst) { \
shift(dst, subcode, kSystemPointerSize); \
} \
\
void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
\
void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
\
void instruction##p_cl(Operand dst) { \
shift(dst, subcode, kSystemPointerSize); \
} \
\
void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
\
void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
......
......@@ -24,7 +24,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
__ subp(rsp, Immediate(kDoubleRegsSize));
__ subq(rsp, Immediate(kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
......@@ -35,7 +35,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
__ subp(rsp, Immediate(kFloatRegsSize));
__ subq(rsp, Immediate(kFloatRegsSize));
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);
......@@ -69,10 +69,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
__ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
__ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
__ subp(arg5, rbp);
__ negp(arg5);
__ subq(arg5, rbp);
__ negq(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
......@@ -119,7 +119,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movl(rcx, Operand(rsp, src_offset));
__ movl(Operand(rbx, dst_offset), rcx);
}
__ addp(rsp, Immediate(kFloatRegsSize));
__ addq(rsp, Immediate(kFloatRegsSize));
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
......@@ -129,25 +129,25 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Remove the return address from the stack.
__ addp(rsp, Immediate(kPCOnStackSize));
__ addq(rsp, Immediate(kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addp(rcx, rsp);
__ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
__ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ Pop(Operand(rdx, 0));
__ addp(rdx, Immediate(sizeof(intptr_t)));
__ addq(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
__ cmpp(rcx, rsp);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
......@@ -170,7 +170,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movp(rax, Operand(rax, Deoptimizer::output_offset()));
__ leap(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ leaq(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
......@@ -178,14 +178,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subp(rcx, Immediate(sizeof(intptr_t)));
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ testp(rcx, rcx);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addp(rax, Immediate(kSystemPointerSize));
__ addq(rax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmpp(rax, rdx);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
......
......@@ -119,7 +119,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
if (offset == 0) {
Move(destination, kRootRegister);
} else {
leap(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
leaq(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
}
}
......@@ -132,7 +132,7 @@ void TurboAssembler::LoadAddress(Register destination,
if (root_array_available_ && options().enable_root_array_delta_access) {
intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
if (is_int32(delta)) {
leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
leaq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
......@@ -199,7 +199,7 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) {
Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
} else {
// Some smi roots contain system pointer size values like stack limits.
cmpp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
cmpq(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
}
......@@ -212,7 +212,7 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
cmp_tagged(with, kScratchRegister);
} else {
// Some smi roots contain system pointer size values like stack limits.
cmpp(with, kScratchRegister);
cmpq(with, kScratchRegister);
}
}
......@@ -376,7 +376,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so the offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize));
leap(dst, FieldOperand(object, offset));
leaq(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
testb(dst, Immediate(kTaggedSize - 1));
......@@ -571,7 +571,7 @@ void TurboAssembler::CheckStackAlignment() {
if (frame_alignment > kSystemPointerSize) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
testp(rsp, Immediate(frame_alignment_mask));
testq(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
......@@ -719,7 +719,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
int delta = kDoubleSize * XMMRegister::kNumRegisters;
subp(rsp, Immediate(delta));
subq(rsp, Immediate(delta));
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
Movsd(Operand(rsp, i * kDoubleSize), reg);
......@@ -739,7 +739,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Movsd(reg, Operand(rsp, i * kDoubleSize));
}
int delta = kDoubleSize * XMMRegister::kNumRegisters;
addp(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
bytes += delta;
}
......@@ -1179,7 +1179,7 @@ void MacroAssembler::SmiTag(Register dst, Register src) {
movp(dst, src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
shlp(dst, Immediate(kSmiShift));
shlq(dst, Immediate(kSmiShift));
}
void TurboAssembler::SmiUntag(Register dst, Register src) {
......@@ -1188,7 +1188,7 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
movp(dst, src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
sarp(dst, Immediate(kSmiShift));
sarq(dst, Immediate(kSmiShift));
}
void TurboAssembler::SmiUntag(Register dst, Operand src) {
......@@ -1199,14 +1199,14 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
} else {
DCHECK(SmiValuesAre31Bits());
movp(dst, src);
sarp(dst, Immediate(kSmiShift));
sarq(dst, Immediate(kSmiShift));
}
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
cmpp(smi1, smi2);
cmpq(smi1, smi2);
}
void MacroAssembler::SmiCompare(Register dst, Smi src) {
......@@ -1300,7 +1300,7 @@ void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
movq(dst, kScratchRegister);
} else {
DCHECK_EQ(kSmiShiftSize, 32);
addp(dst, Immediate(constant));
addq(dst, Immediate(constant));
}
}
}
......@@ -1317,9 +1317,9 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
movp(dst, src);
}
if (shift < kSmiShift) {
sarp(dst, Immediate(kSmiShift - shift));
sarq(dst, Immediate(kSmiShift - shift));
} else {
shlp(dst, Immediate(shift - kSmiShift));
shlq(dst, Immediate(shift - kSmiShift));
}
return SmiIndex(dst, times_1);
} else {
......@@ -1494,7 +1494,7 @@ void TurboAssembler::MoveStringConstant(Register result,
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addp(rsp, Immediate(stack_elements * kSystemPointerSize));
addq(rsp, Immediate(stack_elements * kSystemPointerSize));
}
}
......@@ -1739,7 +1739,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// A non-builtin Code object, the entry point is at
// Code::raw_instruction_start().
Move(destination, code_object);
addp(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
jmp(&out);
// A builtin Code object, the entry point is loaded from the builtin entry
......@@ -1752,7 +1752,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
bind(&out);
} else {
Move(destination, code_object);
addp(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
}
}
......@@ -2017,7 +2017,7 @@ void MacroAssembler::Pushad() {
// Use lea for symmetry with Popad.
int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
kSystemPointerSize;
leap(rsp, Operand(rsp, -sp_delta));
leaq(rsp, Operand(rsp, -sp_delta));
}
......@@ -2025,7 +2025,7 @@ void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
kSystemPointerSize;
leap(rsp, Operand(rsp, sp_delta));
leaq(rsp, Operand(rsp, sp_delta));
Pop(r15);
Pop(r14);
Pop(r12);
......@@ -2085,7 +2085,7 @@ void MacroAssembler::PopStackHandler() {
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
Pop(ExternalReferenceAsOperand(handler_address));
addp(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
}
void TurboAssembler::Ret() { ret(0); }
......@@ -2095,7 +2095,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
PopReturnAddressTo(scratch);
addp(rsp, Immediate(bytes_dropped));
addq(rsp, Immediate(bytes_dropped));
PushReturnAddressFrom(scratch);
ret(0);
}
......@@ -2237,7 +2237,7 @@ void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
cmpl(in_out, Immediate(kClearedWeakHeapObjectLower32));
j(equal, target_if_cleared);
andp(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
andq(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
......@@ -2272,7 +2272,7 @@ void MacroAssembler::MaybeDropFrames() {
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
Load(rbx, restart_fp);
testp(rbx, rbx);
testq(rbx, rbx);
Label dont_drop;
j(zero, &dont_drop, Label::kNear);
......@@ -2297,18 +2297,18 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// after we drop current frame.
Register new_sp_reg = scratch0;
if (callee_args_count.is_reg()) {
subp(caller_args_count_reg, callee_args_count.reg());
leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
subq(caller_args_count_reg, callee_args_count.reg());
leaq(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
StandardFrameConstants::kCallerPCOffset));
} else {
leap(new_sp_reg,
leaq(new_sp_reg,
Operand(rbp, caller_args_count_reg, times_pointer_size,
StandardFrameConstants::kCallerPCOffset -
callee_args_count.immediate() * kSystemPointerSize));
}
if (FLAG_debug_code) {
cmpp(rsp, new_sp_reg);
cmpq(rsp, new_sp_reg);
Check(below, AbortReason::kStackAccessBelowStackPointer);
}
......@@ -2326,7 +2326,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// +2 here is to copy both receiver and return address.
Register count_reg = caller_args_count_reg;
if (callee_args_count.is_reg()) {
leap(count_reg, Operand(callee_args_count.reg(), 2));
leaq(count_reg, Operand(callee_args_count.reg(), 2));
} else {
movp(count_reg, Immediate(callee_args_count.immediate() + 2));
// TODO(ishell): Unroll copying loop for small immediate values.
......@@ -2337,11 +2337,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Label loop, entry;
jmp(&entry, Label::kNear);
bind(&loop);
decp(count_reg);
decq(count_reg);
movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0));
movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
bind(&entry);
cmpp(count_reg, Immediate(0));
cmpq(count_reg, Immediate(0));
j(not_equal, &loop, Label::kNear);
// Leave current frame.
......@@ -2440,13 +2440,13 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// case when we invoke function values without going through the
// IC mechanism.
Set(rax, actual.immediate());
cmpp(expected.reg(), Immediate(actual.immediate()));
cmpq(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
DCHECK(expected.reg() == rbx);
} else if (expected.reg() != actual.reg()) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
cmpp(expected.reg(), actual.reg());
cmpq(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
DCHECK(actual.reg() == rax);
DCHECK(expected.reg() == rbx);
......@@ -2537,7 +2537,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
Check(equal, AbortReason::kStackFrameTypesMustMatch);
}
......@@ -2592,7 +2592,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (save_doubles) {
int space = XMMRegister::kNumRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
subp(rsp, Immediate(space));
subq(rsp, Immediate(space));
int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
......@@ -2601,7 +2601,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
subp(rsp, Immediate(arg_stack_space * kRegisterSize));
subq(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
......@@ -2609,7 +2609,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (kFrameAlignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
DCHECK(is_int8(kFrameAlignment));
andp(rsp, Immediate(-kFrameAlignment));
andq(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
......@@ -2623,7 +2623,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
leap(r15, Operand(rbp, r14, times_pointer_size, offset));
leaq(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
......@@ -2655,7 +2655,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
leap(rsp, Operand(r15, 1 * kSystemPointerSize));
leaq(rsp, Operand(r15, 1 * kSystemPointerSize));
PushReturnAddressFrom(rcx);
} else {
......@@ -2732,8 +2732,8 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
andp(rsp, Immediate(-frame_alignment));
subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
andq(rsp, Immediate(-frame_alignment));
movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
......@@ -2787,10 +2787,10 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
if (scratch == object) {
andp(scratch, Immediate(~kPageAlignmentMask));
andq(scratch, Immediate(~kPageAlignmentMask));
} else {
movp(scratch, Immediate(~kPageAlignmentMask));
andp(scratch, object);
andq(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
......
......@@ -770,9 +770,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift;
if (shift != 0) {
shrp(reg, Immediate(shift));
shrq(reg, Immediate(shift));
}
andp(reg, Immediate(mask));
andq(reg, Immediate(mask));
}
// Abort execution if argument is a smi, enabled via --debug-code.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment