Commit de3f3cf0 authored by Georgia Kouveli's avatar Georgia Kouveli Committed by Commit Bot

[arm64] Clean up of move operations.

* Perform the lookups into the builtins constant table and external reference
  table in the generic version of Mov that accepts an Operand source. This
  makes sure we do not miss looking up constants and external references when
  the generic Mov is called.
* Remove Mov(ExternalReference) as ExternalReference can be implicitly converted
  to an Operand.
* Remove two Move functions that are unused in architecture-independent code.
  Replace their uses in arm64-specific code with the generic Mov, which does all
  the necessary work now.

Change-Id: Ibbcee6ba22f661984cd830717e14c9b35a1fba0a
Reviewed-on: https://chromium-review.googlesource.com/1172351
Commit-Queue: Georgia Kouveli <georgia.kouveli@arm.com>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55121}
parent ae09fa83
......@@ -276,7 +276,7 @@ Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
shift_amount_(shift_amount) {
DCHECK(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
DCHECK(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
DCHECK(!reg.IsSP());
DCHECK_IMPLIES(reg.IsSP(), shift_amount == 0);
}
......
......@@ -780,18 +780,6 @@ void TurboAssembler::Mneg(const Register& rd, const Register& rn,
mneg(rd, rn, rm);
}
void TurboAssembler::Mov(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
// Emit a register move only if the registers are distinct, or if they are
// not X registers. Note that mov(w0, w0) is not a no-op because it clears
// the top word of x0.
if (!rd.Is(rn) || !rd.Is64Bits()) {
Assembler::mov(rd, rn);
}
}
void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
......
......@@ -306,23 +306,35 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
if (operand.NeedsRelocation(this)) {
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
if (operand.ImmediateRMode() == RelocInfo::EXTERNAL_REFERENCE) {
Address addr = static_cast<Address>(operand.ImmediateValue());
ExternalReference reference = bit_cast<ExternalReference>(addr);
IndirectLoadExternalReference(rd, reference);
return;
} else if (operand.ImmediateRMode() == RelocInfo::EMBEDDED_OBJECT) {
Handle<HeapObject> x(
reinterpret_cast<HeapObject**>(operand.ImmediateValue()));
IndirectLoadConstant(rd, x);
return;
}
}
}
Ldr(dst, operand);
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
Mov(dst, operand.ImmediateValue());
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Emit a shift instruction if moving a shifted register. This operation
// could also be achieved using an orr instruction (like orn used by Mvn),
// but using a shift instruction makes the disassembly clearer.
EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
} else if (operand.IsExtendedRegister()) {
// Emit an extend instruction if moving an extended register. This handles
// extend with post-shift operations, too.
EmitExtendShift(dst, operand.reg(), operand.extend(),
operand.shift_amount());
} else {
// Otherwise, emit a register move only if the registers are distinct, or
// if they are not X registers.
......@@ -348,17 +360,6 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
}
void TurboAssembler::Mov(const Register& rd, ExternalReference reference) {
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadExternalReference(rd, reference);
return;
}
}
// The Immediate in Operand sets the relocation mode.
Mov(rd, Operand(reference));
}
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
int byte1 = (imm & 0xFF);
......@@ -1525,24 +1526,12 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
Move(result, Handle<HeapObject>::cast(object));
Mov(result, Handle<HeapObject>::cast(object));
} else {
Mov(result, Operand(Smi::cast(*object)));
}
}
void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(dst, value);
return;
}
}
Mov(dst, value);
}
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
......@@ -1831,9 +1820,9 @@ void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
Move(destination, kRootRegister);
Mov(destination, kRootRegister);
} else {
Add(destination, kRootRegister, Operand(offset));
Add(destination, kRootRegister, offset);
}
}
......@@ -2452,7 +2441,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
Move(scratch, CodeObject());
Mov(scratch, CodeObject());
Push(scratch, padreg);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
......@@ -2846,8 +2835,8 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter, object_parameter);
Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreRegisters(registers);
......@@ -2971,7 +2960,7 @@ void TurboAssembler::Abort(AbortReason reason) {
// Avoid infinite recursion; Push contains some assertions that use Abort.
HardAbortScope hard_aborts(this);
Move(x1, Smi::FromInt(static_cast<int>(reason)));
Mov(x1, Smi::FromInt(static_cast<int>(reason)));
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
......
......@@ -212,9 +212,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Mov(const Register& rd, const Operand& operand,
DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
void Mov(const Register& rd, ExternalReference reference);
void Mov(const Register& rd, uint64_t imm);
inline void Mov(const Register& rd, const Register& rm);
void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
int vn_index) {
DCHECK(allow_macro_instructions());
......@@ -235,8 +233,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is required for compatibility with architecture independent code.
// Remove if not needed.
void Move(Register dst, Register src);
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Smi* src);
// Register swap. Note that the register operands should be distinct.
......
......@@ -76,7 +76,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ PushArgument(x1);
__ CallRuntime(function_id, 1);
__ Move(x2, x0);
__ Mov(x2, x0);
// Restore target function and new target.
__ Pop(padreg, x3, x1, x0);
......@@ -496,8 +496,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
__ Move(x3, x1);
__ Move(x1, x4);
__ Mov(x3, x1);
__ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
......@@ -2698,7 +2698,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(cp, Smi::kZero);
__ Mov(cp, Smi::kZero);
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, x2);
// The entrypoint address is the return value.
__ mov(x8, kReturnRegister0);
......
......@@ -2582,10 +2582,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
__ Move(dst, src_object);
__ Mov(dst, src_object);
}
} else if (src.type() == Constant::kExternalReference) {
__ Mov(dst, src.ToExternalReference());
} else {
__ Mov(dst, g.ToImmediate(source));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment