Commit 16f2bcdb authored by Pierre Langlois's avatar Pierre Langlois Committed by Commit Bot

[turbofan] Refactor AssembleMove and AssembleSwap

The way the code generator's AssembleMove and AssembleSwap methods are written
makes it easy to forget which sort of move is being implemented when looking at
a sequence of instructions. This patch is an attempt to address this by
rewriting those methods using switch/case instead of a string of if/else.

To do this, introduce new utility functions to detect what type of move to
perform given a pair of InstructionOperands.

Bug: 
Change-Id: I32b146c86409e595b7b59a66bf43220899024fdd
Reviewed-on: https://chromium-review.googlesource.com/749201
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50966}
parent 396e7bc8
......@@ -241,22 +241,6 @@ void TurboAssembler::Ret(int drop, Condition cond) {
Ret(cond);
}
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch,
Condition cond) {
if (scratch == no_reg) {
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
} else {
mov(scratch, reg1, LeaveCC, cond);
mov(reg1, reg2, LeaveCC, cond);
mov(reg2, scratch, LeaveCC, cond);
}
}
void TurboAssembler::Call(Label* target) { bl(target); }
void TurboAssembler::Push(Handle<HeapObject> handle) {
......@@ -305,9 +289,17 @@ void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
}
}
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
if (srcdst0 == srcdst1) return; // Swapping aliased registers emits nothing.
void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
DCHECK(srcdst0 != srcdst1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, srcdst0);
mov(srcdst0, srcdst1);
mov(srcdst1, scratch);
}
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
DCHECK(srcdst0 != srcdst1);
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
......@@ -323,9 +315,8 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
}
void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
if (srcdst0 != srcdst1) {
DCHECK(srcdst0 != srcdst1);
vswp(srcdst0, srcdst1);
}
}
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
......
......@@ -482,7 +482,8 @@ class TurboAssembler : public Assembler {
void VmovExtended(int dst_code, const MemOperand& src);
void VmovExtended(const MemOperand& dst, int src_code);
// Register swap.
// Register swap. Note that the register operands should be distinct.
void Swap(Register srcdst0, Register srcdst1);
void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
......@@ -580,11 +581,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg,
Condition cond = al);
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
......
......@@ -1571,6 +1571,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, rhs);
Mov(rhs, lhs);
Mov(lhs, temp);
}
void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
UseScratchRegisterScope temps(this);
VRegister temp = VRegister::no_reg();
if (lhs.IsS()) {
temp = temps.AcquireS();
} else if (lhs.IsD()) {
temp = temps.AcquireD();
} else {
DCHECK(lhs.IsQ());
temp = temps.AcquireQ();
}
Mov(temp, rhs);
Mov(rhs, lhs);
Mov(lhs, temp);
}
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
......
......@@ -255,6 +255,10 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Handle<HeapObject> x);
void Move(Register dst, Smi* src);
// Register swap. Note that the register operands should be distinct.
void Swap(Register lhs, Register rhs);
void Swap(VRegister lhs, VRegister rhs);
// NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmla, Fmla) \
......@@ -2104,6 +2108,7 @@ class UseScratchRegisterScope {
Register AcquireX() { return AcquireNextAvailable(available_).X(); }
VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
VRegister AcquireV(VectorFormat format) {
return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
}
......
......@@ -44,21 +44,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
}
Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
}
UNREACHABLE();
return ToImmediate(instr_->InputAt(index));
}
Operand InputOperand2(size_t first_index) {
......@@ -124,6 +110,30 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return InputOffset(&first_index);
}
Operand ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
if (RelocInfo::IsWasmReference(constant.rmode())) {
return Operand(constant.ToInt32(), constant.rmode());
} else {
return Operand(constant.ToInt32());
}
case Constant::kFloat32:
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
case Constant::kInt64:
case Constant::kHeapObject:
// TODO(dcarney): loading RPO constants on arm.
case Constant::kRpoNumber:
break;
}
UNREACHABLE();
}
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
......@@ -2927,52 +2937,9 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
__ mov(g.ToRegister(destination), src);
} else {
__ str(src, g.ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsRegister()) {
__ ldr(g.ToRegister(destination), src);
} else {
Register temp = kScratchReg;
__ ldr(temp, src);
__ str(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
if (RelocInfo::IsWasmReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ mov(dst, Operand(src.ToInt32()));
}
break;
case Constant::kInt64:
UNREACHABLE();
break;
case Constant::kFloat32:
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
break;
case Constant::kHeapObject: {
// Helper function to write the given constant to the dst register.
auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (src.type() == Constant::kHeapObject) {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
......@@ -2980,101 +2947,77 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
__ Move(dst, src_object);
}
break;
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
break;
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFloatStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
Register temp = kScratchReg;
__ mov(temp, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(temp, dst);
} else {
SwVfpRegister dst = g.ToFloatRegister(destination);
__ vmov(dst, Float32::FromBits(src.ToFloat32AsInt()));
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
DwVfpRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64(), kScratchReg);
if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
}
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ vstr(src, g.ToMemOperand(destination));
__ mov(dst, g.ToImmediate(source));
}
} else if (rep == MachineRepresentation::kFloat32) {
// GapResolver may give us reg codes that don't map to actual s-registers.
// Generate code to work around those cases.
};
switch (MoveType::InferMove(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
__ mov(g.ToRegister(destination), g.ToRegister(source));
} else if (source->IsFloatRegister()) {
DCHECK(destination->IsFloatRegister());
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFloatRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src_code);
} else if (source->IsDoubleRegister()) {
__ Move(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
} else {
DCHECK(destination->IsFloatStackSlot());
__ VmovExtended(g.ToMemOperand(destination), src_code);
__ Move(g.ToSimd128Register(destination), g.ToSimd128Register(source));
}
return;
case MoveType::kRegisterToStack: {
MemOperand dst = g.ToMemOperand(destination);
if (source->IsRegister()) {
__ str(g.ToRegister(source), dst);
} else if (source->IsFloatRegister()) {
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int src_code = LocationOperand::cast(source)->register_code();
__ VmovExtended(dst, src_code);
} else if (source->IsDoubleRegister()) {
__ vstr(g.ToDoubleRegister(source), dst);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
QwNeonRegister src = g.ToSimd128Register(source);
if (destination->IsSimd128Register()) {
QwNeonRegister dst = g.ToSimd128Register(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsSimd128StackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ add(kScratchReg, dst.rn(), Operand(dst.offset()));
__ vst1(Neon8, NeonListOperand(src.low(), 2),
NeonMemOperand(kScratchReg));
}
return;
}
} else if (source->IsFPStackSlot()) {
case MoveType::kStackToRegister: {
MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep =
LocationOperand::cast(destination)->representation();
if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat64) {
__ vldr(g.ToDoubleRegister(destination), src);
} else if (rep == MachineRepresentation::kFloat32) {
if (source->IsStackSlot()) {
__ ldr(g.ToRegister(destination), src);
} else if (source->IsFloatStackSlot()) {
DCHECK(destination->IsFloatRegister());
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src);
} else if (source->IsDoubleStackSlot()) {
__ vldr(g.ToDoubleRegister(destination), src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
QwNeonRegister dst = g.ToSimd128Register(destination);
__ add(kScratchReg, src.rn(), Operand(src.offset()));
__ vld1(Neon8, NeonListOperand(dst.low(), 2),
NeonMemOperand(kScratchReg));
}
} else {
DCHECK(destination->IsFPStackSlot());
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister temp = kScratchDoubleReg;
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
} else if (rep == MachineRepresentation::kFloat32) {
SwVfpRegister temp = kScratchDoubleReg.low();
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
return;
}
case MoveType::kStackToStack: {
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
__ ldr(kScratchReg, src);
__ str(kScratchReg, dst);
} else if (source->IsDoubleStackSlot()) {
__ vldr(kScratchDoubleReg, src);
__ vstr(kScratchDoubleReg, dst);
} else {
DCHECK(source->IsSimd128StackSlot());
__ add(kScratchReg, src.rn(), Operand(src.offset()));
__ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg));
......@@ -3082,81 +3025,83 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg));
}
return;
}
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
MoveConstantToRegister(g.ToRegister(destination), src);
} else if (destination->IsFloatRegister()) {
__ vmov(g.ToFloatRegister(destination),
Float32::FromBits(src.ToFloat32AsInt()));
} else {
UNREACHABLE();
__ vmov(g.ToDoubleRegister(destination), src.ToFloat64(), kScratchReg);
}
return;
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination);
if (destination->IsStackSlot()) {
MoveConstantToRegister(kScratchReg, src);
__ str(kScratchReg, dst);
} else if (destination->IsFloatStackSlot()) {
__ mov(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(kScratchReg, dst);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ vmov(kScratchDoubleReg, src.ToFloat64(), kScratchReg);
__ vstr(kScratchDoubleReg, g.ToMemOperand(destination));
}
return;
}
}
UNREACHABLE();
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
switch (MoveType::InferSwap(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
// Register-register.
Register temp = kScratchReg;
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ mov(temp, src);
__ ldr(src, dst);
__ str(temp, dst);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
Register temp_0 = kScratchReg;
SwVfpRegister temp_1 = kScratchDoubleReg.low();
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
__ ldr(temp_0, src);
__ vldr(temp_1, dst);
__ str(temp_0, dst);
__ vstr(temp_1, src);
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
__ Swap(g.ToRegister(source), g.ToRegister(destination));
} else if (source->IsFloatRegister()) {
DCHECK(destination->IsFloatRegister());
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
LowDwVfpRegister temp = kScratchDoubleReg;
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Swap(src, dst);
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ vldr(src, dst);
__ vstr(temp, dst);
}
} else if (rep == MachineRepresentation::kFloat32) {
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFPRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst_code);
__ VmovExtended(dst_code, temp.low().code());
} else if (source->IsDoubleRegister()) {
__ Swap(g.ToDoubleRegister(source), g.ToDoubleRegister(destination));
} else {
DCHECK(destination->IsFPStackSlot());
__ Swap(g.ToSimd128Register(source), g.ToSimd128Register(destination));
}
return;
case MoveType::kRegisterToStack: {
MemOperand dst = g.ToMemOperand(destination);
if (source->IsRegister()) {
Register src = g.ToRegister(source);
__ mov(kScratchReg, src);
__ ldr(src, dst);
__ str(kScratchReg, dst);
} else if (source->IsFloatRegister()) {
int src_code = LocationOperand::cast(source)->register_code();
LowDwVfpRegister temp = kScratchDoubleReg;
__ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst);
}
} else if (source->IsDoubleRegister()) {
DwVfpRegister src = g.ToDoubleRegister(source);
__ Move(kScratchDoubleReg, src);
__ vldr(src, dst);
__ vstr(kScratchDoubleReg, dst);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
QwNeonRegister src = g.ToSimd128Register(source);
if (destination->IsFPRegister()) {
QwNeonRegister dst = g.ToSimd128Register(destination);
__ Swap(src, dst);
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(kScratchQuadReg, src);
__ add(kScratchReg, dst.rn(), Operand(dst.offset()));
__ vld1(Neon8, NeonListOperand(src.low(), 2),
......@@ -3164,32 +3109,35 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg));
}
return;
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
case MoveType::kStackToStack: {
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
Register temp_0 = kScratchReg;
SwVfpRegister temp_1 = kScratchDoubleReg.low();
__ ldr(temp_0, src);
__ vldr(temp_1, dst);
__ str(temp_0, dst);
__ vstr(temp_1, src);
} else if (source->IsDoubleStackSlot()) {
Register temp_0 = kScratchReg;
LowDwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
MemOperand dst0 = g.ToMemOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
__ vldr(temp_1, dst0); // Save destination in temp_1.
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
__ str(temp_0, dst0);
__ ldr(temp_0, src1);
__ str(temp_0, dst1);
__ vstr(temp_1, src0);
} else if (rep == MachineRepresentation::kFloat32) {
__ vldr(temp_1.low(), dst0); // Save destination in temp_1.
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
__ str(temp_0, dst0);
__ vstr(temp_1.low(), src0);
// Save destination in temp_1.
__ vldr(temp_1, dst);
// Then use temp_0 to copy source to destination.
__ ldr(temp_0, src);
__ str(temp_0, dst);
__ ldr(temp_0, MemOperand(src.rn(), src.offset() + kPointerSize));
__ str(temp_0, MemOperand(dst.rn(), dst.offset() + kPointerSize));
__ vstr(temp_1, src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
MemOperand src1(src0.rn(), src0.offset() + kDoubleSize);
MemOperand dst1(dst0.rn(), dst0.offset() + kDoubleSize);
DCHECK(source->IsSimd128StackSlot());
MemOperand src0 = src;
MemOperand dst0 = dst;
MemOperand src1(src.rn(), src.offset() + kDoubleSize);
MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
__ vldr(kScratchQuadReg.low(), dst0);
__ vldr(kScratchQuadReg.high(), src0);
__ vstr(kScratchQuadReg.low(), src0);
......@@ -3199,9 +3147,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(kScratchQuadReg.low(), src1);
__ vstr(kScratchQuadReg.high(), dst1);
}
} else {
// No other combinations are possible.
return;
}
default:
UNREACHABLE();
break;
}
}
......
......@@ -2486,33 +2486,8 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
__ Mov(g.ToRegister(destination), src);
} else {
__ Str(src, g.ToMemOperand(destination, tasm()));
}
} else if (source->IsStackSlot()) {
MemOperand src = g.ToMemOperand(source, tasm());
DCHECK(destination->IsRegister() || destination->IsStackSlot());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else {
UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
__ Str(temp, g.ToMemOperand(destination, tasm()));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(ConstantOperand::cast(source));
if (destination->IsRegister() || destination->IsStackSlot()) {
UseScratchRegisterScope scope(tasm());
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: scope.AcquireX();
// Helper function to write the given constant to the dst register.
auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (src.type() == Constant::kHeapObject) {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
......@@ -2524,115 +2499,166 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
__ Mov(dst, g.ToImmediate(source));
}
if (destination->IsStackSlot()) {
__ Str(dst, g.ToMemOperand(destination, tasm()));
};
switch (MoveType::InferMove(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
__ Mov(g.ToRegister(destination), g.ToRegister(source));
} else if (source->IsFloatRegister() || source->IsDoubleRegister()) {
__ Mov(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
} else {
DCHECK(source->IsSimd128Register());
__ Mov(g.ToDoubleRegister(destination).Q(),
g.ToDoubleRegister(source).Q());
}
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination).S();
__ Fmov(dst, src.ToFloat32());
return;
case MoveType::kRegisterToStack: {
MemOperand dst = g.ToMemOperand(destination, tasm());
if (source->IsRegister()) {
__ Str(g.ToRegister(source), dst);
} else {
DCHECK(destination->IsFPStackSlot());
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ Str(wzr, g.ToMemOperand(destination, tasm()));
VRegister src = g.ToDoubleRegister(source);
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
__ Str(src, dst);
} else {
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
__ Str(temp, g.ToMemOperand(destination, tasm()));
DCHECK(source->IsSimd128Register());
__ Str(src.Q(), dst);
}
}
return;
}
case MoveType::kStackToRegister: {
MemOperand src = g.ToMemOperand(source, tasm());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src.ToFloat64().value());
} else {
DCHECK(destination->IsFPStackSlot());
if (src.ToFloat64().AsUint64() == 0) {
__ Str(xzr, g.ToMemOperand(destination, tasm()));
if (destination->IsFloatRegister() || destination->IsDoubleRegister()) {
__ Ldr(dst, src);
} else {
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireD();
__ Fmov(temp, src.ToFloat64().value());
__ Str(temp, g.ToMemOperand(destination, tasm()));
DCHECK(destination->IsSimd128Register());
__ Ldr(dst.Q(), src);
}
}
return;
}
} else if (source->IsFPRegister()) {
VRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
if (destination->IsSimd128Register()) {
__ Mov(dst.Q(), src.Q());
case MoveType::kStackToStack: {
MemOperand src = g.ToMemOperand(source, tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
if (source->IsSimd128StackSlot()) {
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireQ();
__ Ldr(temp, src);
__ Str(temp, dst);
} else {
__ Mov(dst, src);
UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
__ Str(temp, dst);
}
return;
}
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
MoveConstantToRegister(g.ToRegister(destination), src);
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination, tasm());
if (destination->IsSimd128StackSlot()) {
__ Str(src.Q(), dst);
VRegister dst = g.ToDoubleRegister(destination);
if (destination->IsFloatRegister()) {
__ Fmov(dst.S(), src.ToFloat32());
} else {
__ Str(src, dst);
DCHECK(destination->IsDoubleRegister());
__ Fmov(dst, src.ToFloat64().value());
}
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source, tasm());
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
if (destination->IsSimd128Register()) {
__ Ldr(dst.Q(), src);
return;
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination, tasm());
if (destination->IsStackSlot()) {
UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
MoveConstantToRegister(temp, src);
__ Str(temp, dst);
} else if (destination->IsFloatStackSlot()) {
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ Str(wzr, dst);
} else {
__ Ldr(dst, src);
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
__ Str(temp, dst);
}
} else {
DCHECK(destination->IsDoubleStackSlot());
if (src.ToFloat64().AsUint64() == 0) {
__ Str(xzr, dst);
} else {
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireD();
MemOperand dst = g.ToMemOperand(destination, tasm());
if (destination->IsSimd128StackSlot()) {
__ Ldr(temp.Q(), src);
__ Str(temp.Q(), dst);
} else {
__ Ldr(temp, src);
__ Fmov(temp, src.ToFloat64().value());
__ Str(temp, dst);
}
}
} else {
UNREACHABLE();
return;
}
}
UNREACHABLE();
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
switch (MoveType::InferSwap(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
// Register-register.
__ Swap(g.ToRegister(source), g.ToRegister(destination));
} else {
VRegister src = g.ToDoubleRegister(source);
VRegister dst = g.ToDoubleRegister(destination);
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
__ Swap(src, dst);
} else {
DCHECK(source->IsSimd128Register());
__ Swap(src.Q(), dst.Q());
}
}
return;
case MoveType::kRegisterToStack: {
UseScratchRegisterScope scope(tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
if (source->IsRegister()) {
Register temp = scope.AcquireX();
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Mov(temp, src);
__ Mov(src, dst);
__ Mov(dst, temp);
__ Ldr(src, dst);
__ Str(temp, dst);
} else {
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination, tasm());
UseScratchRegisterScope scope(tasm());
VRegister src = g.ToDoubleRegister(source);
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
VRegister temp = scope.AcquireD();
__ Mov(temp, src);
__ Ldr(src, dst);
__ Str(temp, dst);
} else {
DCHECK(source->IsSimd128Register());
VRegister temp = scope.AcquireQ();
__ Mov(temp, src.Q());
__ Ldr(src.Q(), dst);
__ Str(temp, dst);
}
} else if (source->IsStackSlot() || source->IsFPStackSlot()) {
}
return;
}
case MoveType::kStackToStack: {
UseScratchRegisterScope scope(tasm());
VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD();
MemOperand src = g.ToMemOperand(source, tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD();
if (source->IsSimd128StackSlot()) {
__ Ldr(temp_0.Q(), src);
__ Ldr(temp_1.Q(), dst);
......@@ -2644,37 +2670,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Str(temp_0, dst);
__ Str(temp_1, src);
}
} else if (source->IsFPRegister()) {
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireD();
VRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
if (source->IsSimd128Register()) {
__ Mov(temp.Q(), src.Q());
__ Mov(src.Q(), dst.Q());
__ Mov(dst.Q(), temp.Q());
} else {
__ Mov(temp, src);
__ Mov(src, dst);
__ Mov(dst, temp);
}
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination, tasm());
if (source->IsSimd128Register()) {
__ Mov(temp.Q(), src.Q());
__ Ldr(src.Q(), dst);
__ Str(temp.Q(), dst);
} else {
__ Mov(temp, src);
__ Ldr(src, dst);
__ Str(temp, dst);
}
return;
}
} else {
// No other combinations are possible.
default:
UNREACHABLE();
break;
}
}
......
......@@ -488,6 +488,54 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
pushes->resize(push_count);
}
CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferMove(
InstructionOperand* source, InstructionOperand* destination) {
if (source->IsConstant()) {
if (destination->IsAnyRegister()) {
return MoveType::kConstantToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kConstantToStack;
}
}
DCHECK(LocationOperand::cast(source)->IsCompatible(
LocationOperand::cast(destination)));
if (source->IsAnyRegister()) {
if (destination->IsAnyRegister()) {
return MoveType::kRegisterToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kRegisterToStack;
}
} else {
DCHECK(source->IsAnyStackSlot());
if (destination->IsAnyRegister()) {
return MoveType::kStackToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kStackToStack;
}
}
}
CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferSwap(
InstructionOperand* source, InstructionOperand* destination) {
DCHECK(LocationOperand::cast(source)->IsCompatible(
LocationOperand::cast(destination)));
if (source->IsAnyRegister()) {
if (destination->IsAnyRegister()) {
return MoveType::kRegisterToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kRegisterToStack;
}
} else {
DCHECK(source->IsAnyStackSlot());
DCHECK(destination->IsAnyStackSlot());
return MoveType::kStackToStack;
}
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
Instruction* instr, const InstructionBlock* block) {
int first_unused_stack_slot;
......
......@@ -224,6 +224,26 @@ class CodeGenerator final : public GapResolver::Assembler {
PushTypeFlags push_type,
ZoneVector<MoveOperands*>* pushes);
class MoveType {
public:
enum Type {
kRegisterToRegister,
kRegisterToStack,
kStackToRegister,
kStackToStack,
kConstantToRegister,
kConstantToStack
};
// Detect what type of move or swap needs to be performed. Note that these
// functions do not take into account the representation (Tagged, FP,
// ...etc).
static Type InferMove(InstructionOperand* source,
InstructionOperand* destination);
static Type InferSwap(InstructionOperand* source,
InstructionOperand* destination);
};
// Called before a tail call |instr|'s gap moves are assembled and allows
// gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
// need it before gap moves or conversion of certain gap moves into pushes.
......
......@@ -3510,119 +3510,129 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
// Dispatch on the source and destination operand kinds.
switch (MoveType::InferMove(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
Operand dst = g.ToOperand(destination);
__ mov(dst, src);
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Operand src = g.ToOperand(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ mov(dst, src);
} else {
Operand dst = g.ToOperand(destination);
__ push(src);
__ pop(dst);
}
} else if (source->IsConstant()) {
Constant src_constant = g.ToConstant(source);
if (src_constant.type() == Constant::kHeapObject) {
Handle<HeapObject> src = src_constant.ToHeapObject();
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Move(dst, src);
__ mov(g.ToRegister(destination), g.ToRegister(source));
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
__ mov(dst, src);
DCHECK(source->IsFPRegister());
__ movaps(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
}
} else if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Move(dst, g.ToImmediate(source));
} else if (destination->IsStackSlot()) {
Operand dst = g.ToOperand(destination);
__ Move(dst, g.ToImmediate(source));
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src = src_constant.ToFloat32AsInt();
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
return;
case MoveType::kRegisterToStack: {
Operand dst = g.ToOperand(destination);
__ Move(dst, Immediate(src));
}
} else {
DCHECK_EQ(Constant::kFloat64, src_constant.type());
uint64_t src = src_constant.ToFloat64().AsUint64();
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
if (source->IsRegister()) {
__ mov(dst, g.ToRegister(source));
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst0 = g.ToOperand(destination);
Operand dst1 = g.ToOperand(destination, kPointerSize);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
} else if (source->IsFPRegister()) {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
__ movsd(dst, src);
} else if (rep == MachineRepresentation::kFloat32) {
if (rep == MachineRepresentation::kFloat32) {
__ movss(dst, src);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(dst, src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(dst, src);
}
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
return;
}
case MoveType::kStackToRegister: {
Operand src = g.ToOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (destination->IsFPRegister()) {
if (source->IsStackSlot()) {
__ mov(g.ToRegister(destination), src);
} else {
DCHECK(source->IsFPStackSlot());
XMMRegister dst = g.ToDoubleRegister(destination);
if (rep == MachineRepresentation::kFloat64) {
__ movsd(dst, src);
} else if (rep == MachineRepresentation::kFloat32) {
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(dst, src);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(dst, src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(dst, src);
}
} else {
}
return;
}
case MoveType::kStackToStack: {
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, src);
__ movsd(dst, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat32) {
if (source->IsStackSlot()) {
__ push(src);
__ pop(dst);
} else {
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, src);
__ movss(dst, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, src);
__ movsd(dst, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, src);
__ movups(dst, kScratchDoubleReg);
}
}
return;
}
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
if (src.type() == Constant::kHeapObject) {
__ Move(dst, src.ToHeapObject());
} else {
UNREACHABLE();
__ Move(dst, g.ToImmediate(source));
}
} else {
DCHECK(destination->IsFPRegister());
XMMRegister dst = g.ToDoubleRegister(destination);
if (src.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
__ Move(dst, src.ToFloat32AsInt());
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
__ Move(dst, src.ToFloat64().AsUint64());
}
}
return;
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
if (destination->IsStackSlot()) {
if (src.type() == Constant::kHeapObject) {
__ mov(dst, src.ToHeapObject());
} else {
__ Move(dst, g.ToImmediate(source));
}
} else {
DCHECK(destination->IsFPStackSlot());
if (src.type() == Constant::kFloat32) {
__ Move(dst, Immediate(src.ToFloat32AsInt()));
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
uint64_t constant_value = src.ToFloat64().AsUint64();
uint32_t lower = static_cast<uint32_t>(constant_value);
uint32_t upper = static_cast<uint32_t>(constant_value >> 32);
Operand dst0 = dst;
Operand dst1 = g.ToOperand(destination, kPointerSize);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
return;
}
}
UNREACHABLE();
}
......@@ -3631,15 +3641,26 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
switch (MoveType::InferSwap(source, destination)) {
case MoveType::kRegisterToRegister: {
if (source->IsRegister()) {
Register src = g.ToRegister(source);
Register dst = g.ToRegister(destination);
__ push(src);
__ mov(src, dst);
__ pop(dst);
} else if (source->IsRegister() && destination->IsStackSlot()) {
// Register-memory.
} else {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(kScratchDoubleReg, src);
__ movaps(src, dst);
__ movaps(dst, kScratchDoubleReg);
}
return;
}
case MoveType::kRegisterToStack: {
if (source->IsRegister()) {
Register src = g.ToRegister(source);
__ push(src);
frame_access_state()->IncreaseSPDelta(1);
......@@ -3648,8 +3669,31 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
frame_access_state()->IncreaseSPDelta(-1);
dst = g.ToOperand(destination);
__ pop(dst);
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
// Memory-memory.
} else {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst);
__ movss(dst, src);
__ movaps(src, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, dst);
__ movsd(dst, src);
__ movaps(src, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, dst);
__ movups(dst, src);
__ movups(src, kScratchDoubleReg);
}
}
return;
}
case MoveType::kStackToStack: {
if (source->IsStackSlot()) {
Operand dst1 = g.ToOperand(destination);
__ push(dst1);
frame_access_state()->IncreaseSPDelta(1);
......@@ -3660,49 +3704,24 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
frame_access_state()->IncreaseSPDelta(-1);
Operand src2 = g.ToOperand(source);
__ pop(src2);
} else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(kScratchDoubleReg, src);
__ movaps(src, dst);
__ movaps(dst, kScratchDoubleReg);
} else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap.
XMMRegister reg = g.ToDoubleRegister(source);
Operand other = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, other);
__ movsd(other, reg);
__ movaps(reg, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, other);
__ movss(other, reg);
__ movaps(reg, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, other);
__ movups(other, reg);
__ movups(reg, kScratchDoubleReg);
}
} else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
// Double-width memory-to-memory.
DCHECK(source->IsFPStackSlot());
Operand src0 = g.ToOperand(source);
Operand dst0 = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ movss(src0, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ push(g.ToOperand(source, kPointerSize));
__ pop(g.ToOperand(destination, kPointerSize));
__ movsd(src0, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ movss(src0, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
......@@ -3716,9 +3735,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ pop(g.ToOperand(destination, 3 * kPointerSize));
__ movups(src0, kScratchDoubleReg);
}
} else {
// No other combinations are possible.
}
return;
}
default:
UNREACHABLE();
break;
}
}
......
......@@ -96,6 +96,26 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
return false;
}
bool LocationOperand::IsCompatible(LocationOperand* op) {
if (IsRegister() || IsStackSlot()) {
return op->IsRegister() || op->IsStackSlot();
} else if (kSimpleFPAliasing) {
// A backend may choose to generate the same instruction sequence regardless
// of the FP representation. As a result, we can relax the compatibility and
// allow a Double to be moved in a Float for example. However, this is only
// allowed if registers do not overlap.
return (IsFPRegister() || IsFPStackSlot()) &&
(op->IsFPRegister() || op->IsFPStackSlot());
} else if (IsFloatRegister() || IsFloatStackSlot()) {
return op->IsFloatRegister() || op->IsFloatStackSlot();
} else if (IsDoubleRegister() || IsDoubleStackSlot()) {
return op->IsDoubleRegister() || op->IsDoubleStackSlot();
} else {
return (IsSimd128Register() || IsSimd128StackSlot()) &&
(op->IsSimd128Register() || op->IsSimd128StackSlot());
}
}
void InstructionOperand::Print(const RegisterConfiguration* config) const {
OFStream os(stdout);
PrintableInstructionOperand wrapper;
......
......@@ -491,6 +491,9 @@ class LocationOperand : public InstructionOperand {
UNREACHABLE();
}
// Return true if the locations can be moved to one another.
bool IsCompatible(LocationOperand* op);
static LocationOperand* cast(InstructionOperand* op) {
DCHECK(op->IsAnyLocationOperand());
return static_cast<LocationOperand*>(op);
......
......@@ -3092,36 +3092,8 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
__ movq(g.ToRegister(destination), src);
} else {
__ movq(g.ToOperand(destination), src);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Operand src = g.ToOperand(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ movq(dst, src);
} else {
// Spill on demand to use a temporary register for memory-to-memory
// moves.
Register tmp = kScratchRegister;
Operand dst = g.ToOperand(destination);
__ movq(tmp, src);
__ movq(dst, tmp);
}
} else if (source->IsConstant()) {
ConstantOperand* constant_source = ConstantOperand::cast(source);
Constant src = g.ToConstant(constant_source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: kScratchRegister;
// Helper function to write the given constant to the dst register.
auto MoveConstantToRegister = [&](Register dst, Constant src) {
switch (src.type()) {
case Constant::kInt32: {
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
......@@ -3169,38 +3141,24 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE(); // TODO(dcarney): load of labels on x64.
break;
}
if (destination->IsStackSlot()) {
__ movq(g.ToOperand(destination), kScratchRegister);
}
} else if (src.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
};
// Dispatch on the source and destination operand kinds.
switch (MoveType::InferMove(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
__ movq(g.ToRegister(destination), g.ToRegister(source));
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ movl(dst, Immediate(src_const));
DCHECK(source->IsFPRegister());
__ Movapd(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
}
return;
case MoveType::kRegisterToStack: {
Operand dst = g.ToOperand(destination);
if (source->IsRegister()) {
__ movq(dst, g.ToRegister(source));
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
uint64_t src_const = src.ToFloat64().AsUint64();
if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
} else {
DCHECK(destination->IsFPStackSlot());
__ movq(kScratchRegister, src_const);
__ movq(g.ToOperand(destination), kScratchRegister);
}
}
} else if (source->IsFPRegister()) {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
......@@ -3209,30 +3167,84 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Movups(dst, src);
}
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
return;
}
case MoveType::kStackToRegister: {
Operand src = g.ToOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (destination->IsFPRegister()) {
if (source->IsStackSlot()) {
__ movq(g.ToRegister(destination), src);
} else {
DCHECK(source->IsFPStackSlot());
XMMRegister dst = g.ToDoubleRegister(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(dst, src);
} else {
__ Movups(dst, src);
}
} else {
}
return;
}
case MoveType::kStackToStack: {
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
if (source->IsStackSlot()) {
// Spill on demand to use a temporary register for memory-to-memory
// moves.
__ movq(kScratchRegister, src);
__ movq(dst, kScratchRegister);
} else {
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(kScratchDoubleReg, src);
__ Movsd(dst, kScratchDoubleReg);
} else {
DCHECK(source->IsSimd128StackSlot());
__ Movups(kScratchDoubleReg, src);
__ Movups(dst, kScratchDoubleReg);
}
}
return;
}
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
MoveConstantToRegister(g.ToRegister(destination), src);
} else {
UNREACHABLE();
DCHECK(destination->IsFPRegister());
XMMRegister dst = g.ToDoubleRegister(destination);
if (src.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
__ Move(dst, bit_cast<uint32_t>(src.ToFloat32()));
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
__ Move(dst, src.ToFloat64().AsUint64());
}
}
return;
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
if (destination->IsStackSlot()) {
MoveConstantToRegister(kScratchRegister, src);
__ movq(dst, kScratchRegister);
} else {
DCHECK(destination->IsFPStackSlot());
if (src.type() == Constant::kFloat32) {
__ movl(dst, Immediate(bit_cast<uint32_t>(src.ToFloat32())));
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
__ movq(kScratchRegister, src.ToFloat64().AsUint64());
__ movq(dst, kScratchRegister);
}
}
return;
}
}
UNREACHABLE();
}
......@@ -3241,14 +3253,26 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
switch (MoveType::InferSwap(source, destination)) {
case MoveType::kRegisterToRegister: {
if (source->IsRegister()) {
Register src = g.ToRegister(source);
Register dst = g.ToRegister(destination);
__ movq(kScratchRegister, src);
__ movq(src, dst);
__ movq(dst, kScratchRegister);
} else if (source->IsRegister() && destination->IsStackSlot()) {
} else {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(kScratchDoubleReg, src);
__ Movapd(src, dst);
__ Movapd(dst, kScratchDoubleReg);
}
return;
}
case MoveType::kRegisterToStack: {
if (source->IsRegister()) {
Register src = g.ToRegister(source);
__ pushq(src);
frame_access_state()->IncreaseSPDelta(1);
......@@ -3261,12 +3285,29 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ popq(dst);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kPointerSize);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsFPStackSlot() && destination->IsFPStackSlot())) {
// Memory-memory.
} else {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(kScratchDoubleReg, src);
__ Movsd(src, dst);
__ Movsd(dst, kScratchDoubleReg);
} else {
__ Movups(kScratchDoubleReg, src);
__ Movups(src, dst);
__ Movups(dst, kScratchDoubleReg);
}
}
return;
}
case MoveType::kStackToStack: {
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
Register tmp = kScratchRegister;
__ movq(tmp, dst);
......@@ -3295,30 +3336,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
-kPointerSize);
__ movups(src, kScratchDoubleReg);
}
} else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(kScratchDoubleReg, src);
__ Movapd(src, dst);
__ Movapd(dst, kScratchDoubleReg);
} else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap.
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(kScratchDoubleReg, src);
__ Movsd(src, dst);
__ Movsd(dst, kScratchDoubleReg);
} else {
__ Movups(kScratchDoubleReg, src);
__ Movups(src, dst);
__ Movups(dst, kScratchDoubleReg);
return;
}
} else {
// No other combinations are possible.
default:
UNREACHABLE();
break;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment