Commit 0243ba80 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup][x32] Remove x32 leftovers from x64 assembler, pt.1

addp, andp, cmpp, decp, incp, leap, negp, orp, subp, testp, xorp,
shrp, sarp, shlp are replaced with respective quad-word instructions.

Some wrongly-used xxxp instructions in regexp code are replaced with xxxl.

Bug: v8:8621, v8:8562
Change-Id: If5fe3229a35805b8ef84d3e1ffa05cf9ed91ceef
Reviewed-on: https://chromium-review.googlesource.com/c/1446451Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59217}
parent d928d25c
This diff is collapsed.
......@@ -201,7 +201,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
zone_(gen->zone()) {}
void Generate() final {
__ subp(rsp, Immediate(kDoubleSize));
__ subq(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
......@@ -214,7 +214,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
}
__ movl(result_, MemOperand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize));
__ addq(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kDoubleSize);
}
......@@ -250,7 +250,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit());
__ leap(scratch1_, operand_);
__ leaq(scratch1_, operand_);
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
......@@ -592,7 +592,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Label done;
// Check if current frame is an arguments adaptor frame.
__ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
__ cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &done, Label::kNear);
......@@ -708,7 +708,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(rbx);
__ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
__ cmpp(kJavaScriptCallCodeStartRegister, rbx);
__ cmpq(kJavaScriptCallCodeStartRegister, rbx);
__ movp(rbx, Immediate(-1));
__ cmovq(equal, kSpeculationPoisonRegister, rbx);
}
......@@ -3741,7 +3741,7 @@ void CodeGenerator::AssembleConstructFrame() {
const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
const int stack_size = saves_fp_count * kQuadWordSize;
// Adjust the stack pointer.
__ subp(rsp, Immediate(stack_size));
__ subq(rsp, Immediate(stack_size));
// Store the registers on the stack.
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
......@@ -3793,7 +3793,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
slot_idx++;
}
// Adjust the stack pointer.
__ addp(rsp, Immediate(stack_size));
__ addq(rsp, Immediate(stack_size));
}
unwinding_info_writer_.MarkBlockWillExit();
......
......@@ -112,11 +112,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->pushq(reg.gp());
break;
case kWasmF32:
assm->subp(rsp, Immediate(kSystemPointerSize));
assm->subq(rsp, Immediate(kSystemPointerSize));
assm->Movss(Operand(rsp, 0), reg.fp());
break;
case kWasmF64:
assm->subp(rsp, Immediate(kSystemPointerSize));
assm->subq(rsp, Immediate(kSystemPointerSize));
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
default:
......@@ -698,9 +698,9 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
if (lhs.gp() != dst.gp()) {
leap(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0));
leaq(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0));
} else {
addp(dst.gp(), rhs.gp());
addq(dst.gp(), rhs.gp());
}
}
......@@ -1412,7 +1412,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
cmpp(rsp, Operand(limit_address, 0));
cmpq(rsp, Operand(limit_address, 0));
j(below_equal, ool_code);
}
......@@ -1435,7 +1435,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList fp_regs = regs & kFpCacheRegList;
unsigned num_fp_regs = fp_regs.GetNumRegsSet();
if (num_fp_regs) {
subp(rsp, Immediate(num_fp_regs * kStackSlotSize));
subq(rsp, Immediate(num_fp_regs * kStackSlotSize));
unsigned offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
......@@ -1456,7 +1456,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
fp_regs.clear(reg);
fp_offset += sizeof(double);
}
if (fp_offset) addp(rsp, Immediate(fp_offset));
if (fp_offset) addq(rsp, Immediate(fp_offset));
LiftoffRegList gp_regs = regs & kGpCacheRegList;
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
......@@ -1476,7 +1476,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
subp(rsp, Immediate(stack_bytes));
subq(rsp, Immediate(stack_bytes));
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
......@@ -1510,7 +1510,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type);
}
addp(rsp, Immediate(stack_bytes));
addq(rsp, Immediate(stack_bytes));
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
......@@ -1538,12 +1538,12 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
subp(rsp, Immediate(size));
subq(rsp, Immediate(size));
movp(addr, rsp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addp(rsp, Immediate(size));
addq(rsp, Immediate(size));
}
void LiftoffStackSlots::Construct() {
......
......@@ -642,10 +642,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mulq(Register src);
#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
void instruction##p(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kSystemPointerSize); \
} \
\
void instruction##l(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt32Size); \
} \
......@@ -654,10 +650,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
shift(dst, imm8, subcode, kInt64Size); \
} \
\
void instruction##p(Operand dst, Immediate imm8) { \
shift(dst, imm8, subcode, kSystemPointerSize); \
} \
\
void instruction##l(Operand dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt32Size); \
} \
......@@ -666,18 +658,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
shift(dst, imm8, subcode, kInt64Size); \
} \
\
void instruction##p_cl(Register dst) { \
shift(dst, subcode, kSystemPointerSize); \
} \
\
void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
\
void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
\
void instruction##p_cl(Operand dst) { \
shift(dst, subcode, kSystemPointerSize); \
} \
\
void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
\
void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
......
......@@ -24,7 +24,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
__ subp(rsp, Immediate(kDoubleRegsSize));
__ subq(rsp, Immediate(kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
......@@ -35,7 +35,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
__ subp(rsp, Immediate(kFloatRegsSize));
__ subq(rsp, Immediate(kFloatRegsSize));
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);
......@@ -69,10 +69,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
__ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
__ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
__ subp(arg5, rbp);
__ negp(arg5);
__ subq(arg5, rbp);
__ negq(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
......@@ -119,7 +119,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movl(rcx, Operand(rsp, src_offset));
__ movl(Operand(rbx, dst_offset), rcx);
}
__ addp(rsp, Immediate(kFloatRegsSize));
__ addq(rsp, Immediate(kFloatRegsSize));
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
......@@ -129,25 +129,25 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Remove the return address from the stack.
__ addp(rsp, Immediate(kPCOnStackSize));
__ addq(rsp, Immediate(kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addp(rcx, rsp);
__ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
__ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ Pop(Operand(rdx, 0));
__ addp(rdx, Immediate(sizeof(intptr_t)));
__ addq(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
__ cmpp(rcx, rsp);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
......@@ -170,7 +170,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movp(rax, Operand(rax, Deoptimizer::output_offset()));
__ leap(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ leaq(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
......@@ -178,14 +178,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subp(rcx, Immediate(sizeof(intptr_t)));
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ testp(rcx, rcx);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addp(rax, Immediate(kSystemPointerSize));
__ addq(rax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmpp(rax, rdx);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
......
This diff is collapsed.
......@@ -770,9 +770,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift;
if (shift != 0) {
shrp(reg, Immediate(shift));
shrq(reg, Immediate(shift));
}
andp(reg, Immediate(mask));
andq(reg, Immediate(mask));
}
// Abort execution if argument is a smi, enabled via --debug-code.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment