Commit 0d809f9d authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup][x32] Remove x32 leftovers from x64 assembler, pt.2

movXXXp are replaced with respective movXXXq.

Drive-by cleanup: unified the way we generate movq with other mov instructions.

Bug: v8:8621, v8:8562
Change-Id: I5c65dccf4e460cad5c3cee3dfabfd6ce39abc244
Reviewed-on: https://chromium-review.googlesource.com/c/1446096
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59243}
parent 75eb52c7
This diff is collapsed.
......@@ -709,7 +709,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
__ ComputeCodeStartAddress(rbx);
__ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
__ cmpq(kJavaScriptCallCodeStartRegister, rbx);
__ movp(rbx, Immediate(-1));
__ movq(rbx, Immediate(-1));
__ cmovq(equal, kSpeculationPoisonRegister, rbx);
}
......@@ -1015,7 +1015,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = new (zone())
OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1,
mode, DetermineStubCallMode());
__ movp(operand, value);
__ movq(operand, value);
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
......@@ -3844,7 +3844,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
switch (src.type()) {
case Constant::kInt32: {
if (RelocInfo::IsWasmReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
__ movq(dst, Immediate64(src.ToInt64(), src.rmode()));
} else {
int32_t value = src.ToInt32();
if (value == 0) {
......@@ -3857,7 +3857,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kInt64:
if (RelocInfo::IsWasmReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
__ movq(dst, Immediate64(src.ToInt64(), src.rmode()));
} else {
__ Set(dst, src.ToInt64());
}
......
......@@ -38,8 +38,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
__ movp(rbp, rbx);
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rbp, rbx);
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ leave();
__ LoadTaggedPointerField(
......
......@@ -170,7 +170,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
if (RelocInfo::IsNone(rmode)) {
TurboAssembler::Set(reg.gp(), value.to_i64());
} else {
movq(reg.gp(), value.to_i64(), rmode);
movq(reg.gp(), Immediate64(value.to_i64(), rmode));
}
break;
case kWasmF32:
......@@ -187,7 +187,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
DCHECK_LE(offset, kMaxInt);
movp(dst, liftoff::GetInstanceOperand());
movq(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
movl(dst, Operand(dst, offset));
......@@ -199,16 +199,16 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
uint32_t offset) {
DCHECK_LE(offset, kMaxInt);
movp(dst, liftoff::GetInstanceOperand());
movq(dst, liftoff::GetInstanceOperand());
LoadTaggedPointerField(dst, Operand(dst, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
movp(liftoff::GetInstanceOperand(), instance);
movq(liftoff::GetInstanceOperand(), instance);
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
movp(dst, liftoff::GetInstanceOperand());
movq(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
......@@ -1486,7 +1486,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
DCHECK_LE(arg_bytes, stack_bytes);
// Pass a pointer to the buffer with the arguments to the C function.
movp(arg_reg_1, rsp);
movq(arg_reg_1, rsp);
constexpr int kNumCCallArgs = 1;
......@@ -1539,7 +1539,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
subq(rsp, Immediate(size));
movp(addr, rsp);
movq(addr, rsp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
......
......@@ -28,21 +28,11 @@ void Assembler::emitl(uint32_t x) {
pc_ += sizeof(uint32_t);
}
void Assembler::emitp(Address x, RelocInfo::Mode rmode) {
WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
if (!RelocInfo::IsNone(rmode)) {
RecordRelocInfo(rmode, x);
}
pc_ += sizeof(uintptr_t);
}
void Assembler::emitq(uint64_t x) {
WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint64_t);
}
void Assembler::emitw(uint16_t x) {
WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint16_t);
......@@ -61,16 +51,21 @@ void Assembler::emit(Immediate x) {
emitl(x.value_);
}
void Assembler::emit(Immediate64 x) {
if (!RelocInfo::IsNone(x.rmode_)) {
RecordRelocInfo(x.rmode_);
}
emitq(static_cast<uint64_t>(x.value_));
}
void Assembler::emit_rex_64(Register reg, Register rm_reg) {
emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
}
void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
......
......@@ -1648,27 +1648,15 @@ void Assembler::emit_lea(Register dst, Operand src, int size) {
void Assembler::load_rax(Address value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
if (kSystemPointerSize == kInt64Size) {
emit(0x48); // REX.W
emit(0xA1);
emitp(value, mode);
} else {
DCHECK_EQ(kSystemPointerSize, kInt32Size);
emit(0xA1);
emitp(value, mode);
// In 64-bit mode, need to zero extend the operand to 8 bytes.
// See 2.2.1.4 in Intel64 and IA32 Architectures Software
// Developer's Manual Volume 2.
emitl(0);
}
emit(0x48); // REX.W
emit(0xA1);
emit(Immediate64(value, mode));
}
void Assembler::load_rax(ExternalReference ref) {
load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
}
void Assembler::leave() {
EnsureSpace ensure_space(this);
emit(0xC9);
......@@ -1793,55 +1781,35 @@ void Assembler::emit_mov(Operand dst, Immediate value, int size) {
emit(value);
}
void Assembler::movp(Register dst, Address value, RelocInfo::Mode rmode) {
if (constpool_.TryRecordEntry(value, rmode)) {
void Assembler::emit_mov(Register dst, Immediate64 value, int size) {
DCHECK_EQ(size, kInt64Size);
if (constpool_.TryRecordEntry(value.value_, value.rmode_)) {
// Emit rip-relative move with offset = 0
Label label;
emit_mov(dst, Operand(&label, 0), kSystemPointerSize);
emit_mov(dst, Operand(&label, 0), size);
bind(&label);
} else {
EnsureSpace ensure_space(this);
emit_rex(dst, kSystemPointerSize);
emit_rex(dst, size);
emit(0xB8 | dst.low_bits());
emitp(value, rmode);
emit(value);
}
}
void Assembler::movp_heap_number(Register dst, double value) {
void Assembler::movq_heap_number(Register dst, double value) {
EnsureSpace ensure_space(this);
emit_rex(dst, kSystemPointerSize);
emit_rex(dst, kInt64Size);
emit(0xB8 | dst.low_bits());
RequestHeapObject(HeapObjectRequest(value));
emitp(0, RelocInfo::EMBEDDED_OBJECT);
emit(Immediate64(kNullAddress, RelocInfo::EMBEDDED_OBJECT));
}
void Assembler::movp_string(Register dst, const StringConstantBase* str) {
void Assembler::movq_string(Register dst, const StringConstantBase* str) {
EnsureSpace ensure_space(this);
emit_rex(dst, kSystemPointerSize);
emit_rex(dst, kInt64Size);
emit(0xB8 | dst.low_bits());
RequestHeapObject(HeapObjectRequest(str));
emitp(0, RelocInfo::EMBEDDED_OBJECT);
}
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
if (constpool_.TryRecordEntry(value, rmode)) {
// Emit rip-relative move with offset = 0
Label label;
emit_mov(dst, Operand(&label, 0), kInt64Size);
bind(&label);
} else {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
if (!RelocInfo::IsNone(rmode)) {
RecordRelocInfo(rmode, value);
}
emitq(value);
}
}
void Assembler::movq(Register dst, uint64_t value, RelocInfo::Mode rmode) {
movq(dst, static_cast<int64_t>(value), rmode);
emit(Immediate64(kNullAddress, RelocInfo::EMBEDDED_OBJECT));
}
// Loads the ip-relative location of the src label into the target location
......@@ -2321,22 +2289,11 @@ void Assembler::emit_xchg(Register dst, Operand src, int size) {
void Assembler::store_rax(Address dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
if (kSystemPointerSize == kInt64Size) {
emit(0x48); // REX.W
emit(0xA3);
emitp(dst, mode);
} else {
DCHECK_EQ(kSystemPointerSize, kInt32Size);
emit(0xA3);
emitp(dst, mode);
// In 64-bit mode, need to zero extend the operand to 8 bytes.
// See 2.2.1.4 in Intel64 and IA32 Architectures Software
// Developer's Manual Volume 2.
emitl(0);
}
emit(0x48); // REX.W
emit(0xA3);
emit(Immediate64(dst, mode));
}
void Assembler::store_rax(ExternalReference ref) {
store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
}
......@@ -4966,8 +4923,8 @@ void Assembler::dq(Label* label) {
EnsureSpace ensure_space(this);
if (label->is_bound()) {
internal_reference_positions_.push_back(pc_offset());
emitp(reinterpret_cast<Address>(buffer_start_) + label->pos(),
RelocInfo::INTERNAL_REFERENCE);
emit(Immediate64(reinterpret_cast<Address>(buffer_start_) + label->pos(),
RelocInfo::INTERNAL_REFERENCE));
} else {
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
emitl(0); // Zero for the first 32bit marks it as 64bit absolute address.
......
......@@ -131,6 +131,21 @@ ASSERT_TRIVIALLY_COPYABLE(Immediate);
static_assert(sizeof(Immediate) <= kSystemPointerSize,
"Immediate must be small enough to pass it by value");
class Immediate64 {
public:
explicit constexpr Immediate64(int64_t value) : value_(value) {}
explicit constexpr Immediate64(int64_t value, RelocInfo::Mode rmode)
: value_(value), rmode_(rmode) {}
explicit constexpr Immediate64(Address value, RelocInfo::Mode rmode)
: value_(static_cast<int64_t>(value)), rmode_(rmode) {}
private:
const int64_t value_;
const RelocInfo::Mode rmode_ = RelocInfo::NONE;
friend class Assembler;
};
// -----------------------------------------------------------------------------
// Machine instruction Operands
......@@ -414,11 +429,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#define DECLARE_INSTRUCTION(instruction) \
template <class P1> \
void instruction##p(P1 p1) { \
emit_##instruction(p1, kSystemPointerSize); \
} \
\
template <class P1> \
void instruction##_tagged(P1 p1) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
......@@ -436,11 +446,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
\
template <class P1, class P2> \
void instruction##p(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kSystemPointerSize); \
} \
\
template <class P1, class P2> \
void instruction##_tagged(P1 p1, P2 p2) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
......@@ -459,19 +464,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
\
template <class P1, class P2, class P3> \
void instruction##p(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kSystemPointerSize); \
} \
\
template <class P1, class P2, class P3> \
void instruction##_tagged(P1 p1, P2 p2, P3 p3) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
emit_##instruction(p1, p2, p3, \
COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
} \
\
template <class P1, class P2, class P3> \
void instruction##l(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt32Size); \
} \
......@@ -527,9 +519,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// position (after the move) to the destination.
void movl(Operand dst, Label* src);
// Loads a pointer into a register with a relocation mode.
void movp(Register dst, Address ptr, RelocInfo::Mode rmode);
// Load a heap number into a register.
// The heap number will not be allocated and embedded into the code right
// away. Instead, we emit the load of a dummy object. Later, when calling
......@@ -537,15 +526,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// patched by replacing the dummy with the actual object. The RelocInfo for
// the embedded object gets already recorded correctly when emitting the dummy
// move.
void movp_heap_number(Register dst, double value);
void movq_heap_number(Register dst, double value);
void movp_string(Register dst, const StringConstantBase* str);
void movq_string(Register dst, const StringConstantBase* str);
// Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value,
RelocInfo::Mode rmode = RelocInfo::NONE);
void movq(Register dst, uint64_t value,
RelocInfo::Mode rmode = RelocInfo::NONE);
void movq(Register dst, int64_t value) { movq(dst, Immediate64(value)); }
void movq(Register dst, uint64_t value) {
movq(dst, Immediate64(static_cast<int64_t>(value)));
}
void movsxbl(Register dst, Register src);
void movsxbl(Register dst, Operand src);
......@@ -1814,11 +1803,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void emit(byte x) { *pc_++ = x; }
inline void emitl(uint32_t x);
inline void emitp(Address x, RelocInfo::Mode rmode);
inline void emitq(uint64_t x);
inline void emitw(uint16_t x);
inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
inline void emit(Immediate x);
inline void emit(Immediate64 x);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of both register codes.
......@@ -2130,6 +2119,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void emit_mov(Operand dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(Operand dst, Immediate value, int size);
void emit_mov(Register dst, Immediate64 value, int size);
void emit_movzxb(Register dst, Operand src, int size);
void emit_movzxb(Register dst, Register src, int size);
......
......@@ -64,11 +64,11 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
Register arg5 = r11;
// The bailout id is passed using r13 on the stack.
__ movp(arg_reg_3, r13);
__ movq(arg_reg_3, r13);
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
__ movq(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
__ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
__ subq(arg5, rbp);
......@@ -76,13 +76,13 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
__ movp(rax, Immediate(0));
__ movq(rax, Immediate(0));
Label context_check;
__ movp(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(rdi, &context_check);
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ movp(arg_reg_1, rax);
__ movq(arg_reg_1, rax);
__ Set(arg_reg_2, static_cast<int>(deopt_kind));
// Args 3 and 4 are already in the right registers.
......@@ -93,7 +93,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
__ movq(Operand(rsp, 5 * kRegisterSize), arg5);
#else
__ movp(r8, arg5);
__ movq(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate));
#endif
......@@ -103,7 +103,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movp(rbx, Operand(rax, Deoptimizer::input_offset()));
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
......@@ -133,7 +133,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
......@@ -153,7 +153,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Compute the output frame in the deoptimizer.
__ pushq(rax);
__ PrepareCallCFunction(2);
__ movp(arg_reg_1, rax);
__ movq(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
{
AllowExternalCallThatCantCauseGC scope(masm);
......@@ -161,7 +161,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
__ popq(rax);
__ movp(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
__ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
......@@ -169,13 +169,13 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movp(rax, Operand(rax, Deoptimizer::output_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
__ leaq(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movp(rbx, Operand(rax, 0));
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
......
This diff is collapsed.
......@@ -189,7 +189,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRoot(Register destination, RootIndex index) override;
void LoadRoot(Operand destination, RootIndex index) {
LoadRoot(kScratchRegister, index);
movp(destination, kScratchRegister);
movq(destination, kScratchRegister);
}
void Push(Register src);
......@@ -295,7 +295,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Operand dst, Smi source) {
Register constant = GetSmiConstant(source);
movp(dst, constant);
movq(dst, constant);
}
void Move(Register dst, ExternalReference ext);
......@@ -318,7 +318,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
movp(dst, ptr, rmode);
movq(dst, Immediate64(ptr, rmode));
}
void MoveStringConstant(Register result, const StringConstantBase* string,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment