Commit 16f2bcdb authored by Pierre Langlois's avatar Pierre Langlois Committed by Commit Bot

[turbofan] Refactor AssembleMove and AssembleSwap

The way the code generator's AssembleMove and AssembleSwap methods are written
makes it easy to forget which sort of move is being implemented when looking at
a sequence of instructions. This patch is an attempt to address this by
rewriting those methods using switch/case instead of a string of if/else.

To do this, introduce new utility functions to detect what type of move to
perform given a pair of InstructionOperands.

Bug: 
Change-Id: I32b146c86409e595b7b59a66bf43220899024fdd
Reviewed-on: https://chromium-review.googlesource.com/749201
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50966}
parent 396e7bc8
...@@ -241,22 +241,6 @@ void TurboAssembler::Ret(int drop, Condition cond) { ...@@ -241,22 +241,6 @@ void TurboAssembler::Ret(int drop, Condition cond) {
Ret(cond); Ret(cond);
} }
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch,
Condition cond) {
if (scratch == no_reg) {
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
} else {
mov(scratch, reg1, LeaveCC, cond);
mov(reg1, reg2, LeaveCC, cond);
mov(reg2, scratch, LeaveCC, cond);
}
}
void TurboAssembler::Call(Label* target) { bl(target); } void TurboAssembler::Call(Label* target) { bl(target); }
void TurboAssembler::Push(Handle<HeapObject> handle) { void TurboAssembler::Push(Handle<HeapObject> handle) {
...@@ -305,9 +289,17 @@ void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) { ...@@ -305,9 +289,17 @@ void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
} }
} }
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
if (srcdst0 == srcdst1) return; // Swapping aliased registers emits nothing. DCHECK(srcdst0 != srcdst1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, srcdst0);
mov(srcdst0, srcdst1);
mov(srcdst1, scratch);
}
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
DCHECK(srcdst0 != srcdst1);
DCHECK(VfpRegisterIsAvailable(srcdst0)); DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1)); DCHECK(VfpRegisterIsAvailable(srcdst1));
...@@ -323,9 +315,8 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { ...@@ -323,9 +315,8 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
} }
void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) { void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
if (srcdst0 != srcdst1) { DCHECK(srcdst0 != srcdst1);
vswp(srcdst0, srcdst1); vswp(srcdst0, srcdst1);
}
} }
void MacroAssembler::Mls(Register dst, Register src1, Register src2, void MacroAssembler::Mls(Register dst, Register src1, Register src2,
......
...@@ -482,7 +482,8 @@ class TurboAssembler : public Assembler { ...@@ -482,7 +482,8 @@ class TurboAssembler : public Assembler {
void VmovExtended(int dst_code, const MemOperand& src); void VmovExtended(int dst_code, const MemOperand& src);
void VmovExtended(const MemOperand& dst, int src_code); void VmovExtended(const MemOperand& dst, int src_code);
// Register swap. // Register swap. Note that the register operands should be distinct.
void Swap(Register srcdst0, Register srcdst1);
void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1); void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1); void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
...@@ -580,11 +581,6 @@ class MacroAssembler : public TurboAssembler { ...@@ -580,11 +581,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size, MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object); CodeObjectRequired create_code_object);
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg,
Condition cond = al);
void Mls(Register dst, Register src1, Register src2, Register srcA, void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al); Condition cond = al);
void And(Register dst, Register src1, const Operand& src2, void And(Register dst, Register src1, const Operand& src2,
......
...@@ -1571,6 +1571,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); } ...@@ -1571,6 +1571,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); } void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); } void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, rhs);
Mov(rhs, lhs);
Mov(lhs, temp);
}
void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
UseScratchRegisterScope temps(this);
VRegister temp = VRegister::no_reg();
if (lhs.IsS()) {
temp = temps.AcquireS();
} else if (lhs.IsD()) {
temp = temps.AcquireD();
} else {
DCHECK(lhs.IsQ());
temp = temps.AcquireQ();
}
Mov(temp, rhs);
Mov(rhs, lhs);
Mov(lhs, temp);
}
void TurboAssembler::AssertSmi(Register object, AbortReason reason) { void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) { if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
......
...@@ -255,6 +255,10 @@ class TurboAssembler : public Assembler { ...@@ -255,6 +255,10 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Handle<HeapObject> x); void Move(Register dst, Handle<HeapObject> x);
void Move(Register dst, Smi* src); void Move(Register dst, Smi* src);
// Register swap. Note that the register operands should be distinct.
void Swap(Register lhs, Register rhs);
void Swap(VRegister lhs, VRegister rhs);
// NEON by element instructions. // NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \ #define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmla, Fmla) \ V(fmla, Fmla) \
...@@ -2104,6 +2108,7 @@ class UseScratchRegisterScope { ...@@ -2104,6 +2108,7 @@ class UseScratchRegisterScope {
Register AcquireX() { return AcquireNextAvailable(available_).X(); } Register AcquireX() { return AcquireNextAvailable(available_).X(); }
VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); } VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); } VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
VRegister AcquireV(VectorFormat format) { VRegister AcquireV(VectorFormat format) {
return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format); return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
} }
......
...@@ -44,21 +44,7 @@ class ArmOperandConverter final : public InstructionOperandConverter { ...@@ -44,21 +44,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
} }
Operand InputImmediate(size_t index) { Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index)); return ToImmediate(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
}
UNREACHABLE();
} }
Operand InputOperand2(size_t first_index) { Operand InputOperand2(size_t first_index) {
...@@ -124,6 +110,30 @@ class ArmOperandConverter final : public InstructionOperandConverter { ...@@ -124,6 +110,30 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return InputOffset(&first_index); return InputOffset(&first_index);
} }
Operand ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
if (RelocInfo::IsWasmReference(constant.rmode())) {
return Operand(constant.ToInt32(), constant.rmode());
} else {
return Operand(constant.ToInt32());
}
case Constant::kFloat32:
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
case Constant::kInt64:
case Constant::kHeapObject:
// TODO(dcarney): loading RPO constants on arm.
case Constant::kRpoNumber:
break;
}
UNREACHABLE();
}
MemOperand ToMemOperand(InstructionOperand* op) const { MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op); DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
...@@ -2927,154 +2937,87 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); } ...@@ -2927,154 +2937,87 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source, void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr); ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Helper function to write the given constant to the dst register.
// combinations are possible. auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (source->IsRegister()) { if (src.type() == Constant::kHeapObject) {
DCHECK(destination->IsRegister() || destination->IsStackSlot()); Handle<HeapObject> src_object = src.ToHeapObject();
Register src = g.ToRegister(source); Heap::RootListIndex index;
if (destination->IsRegister()) { if (IsMaterializableFromRoot(src_object, &index)) {
__ mov(g.ToRegister(destination), src); __ LoadRoot(dst, index);
} else {
__ str(src, g.ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsRegister()) {
__ ldr(g.ToRegister(destination), src);
} else {
Register temp = kScratchReg;
__ ldr(temp, src);
__ str(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
if (RelocInfo::IsWasmReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ mov(dst, Operand(src.ToInt32()));
}
break;
case Constant::kInt64:
UNREACHABLE();
break;
case Constant::kFloat32:
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
__ Move(dst, src_object);
}
break;
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
break;
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFloatStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
Register temp = kScratchReg;
__ mov(temp, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(temp, dst);
} else { } else {
SwVfpRegister dst = g.ToFloatRegister(destination); __ Move(dst, src_object);
__ vmov(dst, Float32::FromBits(src.ToFloat32AsInt()));
} }
} else { } else {
DCHECK_EQ(Constant::kFloat64, src.type()); __ mov(dst, g.ToImmediate(source));
DwVfpRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64(), kScratchReg);
if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
} }
} else if (source->IsFPRegister()) { };
MachineRepresentation rep = LocationOperand::cast(source)->representation(); switch (MoveType::InferMove(source, destination)) {
if (rep == MachineRepresentation::kFloat64) { case MoveType::kRegisterToRegister:
DwVfpRegister src = g.ToDoubleRegister(source); if (source->IsRegister()) {
if (destination->IsDoubleRegister()) { __ mov(g.ToRegister(destination), g.ToRegister(source));
DwVfpRegister dst = g.ToDoubleRegister(destination); } else if (source->IsFloatRegister()) {
__ Move(dst, src); DCHECK(destination->IsFloatRegister());
} else { // GapResolver may give us reg codes that don't map to actual
DCHECK(destination->IsDoubleStackSlot()); // s-registers. Generate code to work around those cases.
__ vstr(src, g.ToMemOperand(destination)); int src_code = LocationOperand::cast(source)->register_code();
}
} else if (rep == MachineRepresentation::kFloat32) {
// GapResolver may give us reg codes that don't map to actual s-registers.
// Generate code to work around those cases.
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFloatRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code(); int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src_code); __ VmovExtended(dst_code, src_code);
} else if (source->IsDoubleRegister()) {
__ Move(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
} else { } else {
DCHECK(destination->IsFloatStackSlot()); __ Move(g.ToSimd128Register(destination), g.ToSimd128Register(source));
__ VmovExtended(g.ToMemOperand(destination), src_code);
} }
} else { return;
DCHECK_EQ(MachineRepresentation::kSimd128, rep); case MoveType::kRegisterToStack: {
QwNeonRegister src = g.ToSimd128Register(source); MemOperand dst = g.ToMemOperand(destination);
if (destination->IsSimd128Register()) { if (source->IsRegister()) {
QwNeonRegister dst = g.ToSimd128Register(destination); __ str(g.ToRegister(source), dst);
__ Move(dst, src); } else if (source->IsFloatRegister()) {
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int src_code = LocationOperand::cast(source)->register_code();
__ VmovExtended(dst, src_code);
} else if (source->IsDoubleRegister()) {
__ vstr(g.ToDoubleRegister(source), dst);
} else { } else {
DCHECK(destination->IsSimd128StackSlot()); QwNeonRegister src = g.ToSimd128Register(source);
MemOperand dst = g.ToMemOperand(destination);
__ add(kScratchReg, dst.rn(), Operand(dst.offset())); __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
__ vst1(Neon8, NeonListOperand(src.low(), 2), __ vst1(Neon8, NeonListOperand(src.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
} }
} return;
} else if (source->IsFPStackSlot()) { }
MemOperand src = g.ToMemOperand(source); case MoveType::kStackToRegister: {
MachineRepresentation rep = MemOperand src = g.ToMemOperand(source);
LocationOperand::cast(destination)->representation(); if (source->IsStackSlot()) {
if (destination->IsFPRegister()) { __ ldr(g.ToRegister(destination), src);
if (rep == MachineRepresentation::kFloat64) { } else if (source->IsFloatStackSlot()) {
__ vldr(g.ToDoubleRegister(destination), src); DCHECK(destination->IsFloatRegister());
} else if (rep == MachineRepresentation::kFloat32) {
// GapResolver may give us reg codes that don't map to actual // GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases. // s-registers. Generate code to work around those cases.
int dst_code = LocationOperand::cast(destination)->register_code(); int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src); __ VmovExtended(dst_code, src);
} else if (source->IsDoubleStackSlot()) {
__ vldr(g.ToDoubleRegister(destination), src);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
QwNeonRegister dst = g.ToSimd128Register(destination); QwNeonRegister dst = g.ToSimd128Register(destination);
__ add(kScratchReg, src.rn(), Operand(src.offset())); __ add(kScratchReg, src.rn(), Operand(src.offset()));
__ vld1(Neon8, NeonListOperand(dst.low(), 2), __ vld1(Neon8, NeonListOperand(dst.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
} }
} else { return;
DCHECK(destination->IsFPStackSlot()); }
if (rep == MachineRepresentation::kFloat64) { case MoveType::kStackToStack: {
DwVfpRegister temp = kScratchDoubleReg; MemOperand src = g.ToMemOperand(source);
__ vldr(temp, src); MemOperand dst = g.ToMemOperand(destination);
__ vstr(temp, g.ToMemOperand(destination)); if (source->IsStackSlot() || source->IsFloatStackSlot()) {
} else if (rep == MachineRepresentation::kFloat32) { __ ldr(kScratchReg, src);
SwVfpRegister temp = kScratchDoubleReg.low(); __ str(kScratchReg, dst);
__ vldr(temp, src); } else if (source->IsDoubleStackSlot()) {
__ vstr(temp, g.ToMemOperand(destination)); __ vldr(kScratchDoubleReg, src);
__ vstr(kScratchDoubleReg, dst);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); DCHECK(source->IsSimd128StackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ add(kScratchReg, src.rn(), Operand(src.offset())); __ add(kScratchReg, src.rn(), Operand(src.offset()));
__ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2), __ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
...@@ -3082,81 +3025,83 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -3082,81 +3025,83 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2), __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
} }
return;
}
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
MoveConstantToRegister(g.ToRegister(destination), src);
} else if (destination->IsFloatRegister()) {
__ vmov(g.ToFloatRegister(destination),
Float32::FromBits(src.ToFloat32AsInt()));
} else {
__ vmov(g.ToDoubleRegister(destination), src.ToFloat64(), kScratchReg);
}
return;
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination);
if (destination->IsStackSlot()) {
MoveConstantToRegister(kScratchReg, src);
__ str(kScratchReg, dst);
} else if (destination->IsFloatStackSlot()) {
__ mov(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(kScratchReg, dst);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ vmov(kScratchDoubleReg, src.ToFloat64(), kScratchReg);
__ vstr(kScratchDoubleReg, g.ToMemOperand(destination));
}
return;
} }
} else {
UNREACHABLE();
} }
UNREACHABLE();
} }
void CodeGenerator::AssembleSwap(InstructionOperand* source, void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr); ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all switch (MoveType::InferSwap(source, destination)) {
// combinations are possible. case MoveType::kRegisterToRegister:
if (source->IsRegister()) { if (source->IsRegister()) {
// Register-register. __ Swap(g.ToRegister(source), g.ToRegister(destination));
Register temp = kScratchReg; } else if (source->IsFloatRegister()) {
Register src = g.ToRegister(source); DCHECK(destination->IsFloatRegister());
if (destination->IsRegister()) { // GapResolver may give us reg codes that don't map to actual
Register dst = g.ToRegister(destination); // s-registers. Generate code to work around those cases.
__ Move(temp, src); LowDwVfpRegister temp = kScratchDoubleReg;
__ Move(src, dst); int src_code = LocationOperand::cast(source)->register_code();
__ Move(dst, temp);
} else {
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ mov(temp, src);
__ ldr(src, dst);
__ str(temp, dst);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
Register temp_0 = kScratchReg;
SwVfpRegister temp_1 = kScratchDoubleReg.low();
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
__ ldr(temp_0, src);
__ vldr(temp_1, dst);
__ str(temp_0, dst);
__ vstr(temp_1, src);
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
LowDwVfpRegister temp = kScratchDoubleReg;
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Swap(src, dst);
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ vldr(src, dst);
__ vstr(temp, dst);
}
} else if (rep == MachineRepresentation::kFloat32) {
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFPRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code(); int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(temp.low().code(), src_code); __ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst_code); __ VmovExtended(src_code, dst_code);
__ VmovExtended(dst_code, temp.low().code()); __ VmovExtended(dst_code, temp.low().code());
} else if (source->IsDoubleRegister()) {
__ Swap(g.ToDoubleRegister(source), g.ToDoubleRegister(destination));
} else { } else {
DCHECK(destination->IsFPStackSlot()); __ Swap(g.ToSimd128Register(source), g.ToSimd128Register(destination));
MemOperand dst = g.ToMemOperand(destination); }
return;
case MoveType::kRegisterToStack: {
MemOperand dst = g.ToMemOperand(destination);
if (source->IsRegister()) {
Register src = g.ToRegister(source);
__ mov(kScratchReg, src);
__ ldr(src, dst);
__ str(kScratchReg, dst);
} else if (source->IsFloatRegister()) {
int src_code = LocationOperand::cast(source)->register_code();
LowDwVfpRegister temp = kScratchDoubleReg;
__ VmovExtended(temp.low().code(), src_code); __ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst); __ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst); __ vstr(temp.low(), dst);
} } else if (source->IsDoubleRegister()) {
} else { DwVfpRegister src = g.ToDoubleRegister(source);
DCHECK_EQ(MachineRepresentation::kSimd128, rep); __ Move(kScratchDoubleReg, src);
QwNeonRegister src = g.ToSimd128Register(source); __ vldr(src, dst);
if (destination->IsFPRegister()) { __ vstr(kScratchDoubleReg, dst);
QwNeonRegister dst = g.ToSimd128Register(destination);
__ Swap(src, dst);
} else { } else {
DCHECK(destination->IsFPStackSlot()); QwNeonRegister src = g.ToSimd128Register(source);
MemOperand dst = g.ToMemOperand(destination);
__ Move(kScratchQuadReg, src); __ Move(kScratchQuadReg, src);
__ add(kScratchReg, dst.rn(), Operand(dst.offset())); __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
__ vld1(Neon8, NeonListOperand(src.low(), 2), __ vld1(Neon8, NeonListOperand(src.low(), 2),
...@@ -3164,44 +3109,49 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3164,44 +3109,49 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2), __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
} }
return;
} }
} else if (source->IsFPStackSlot()) { case MoveType::kStackToStack: {
DCHECK(destination->IsFPStackSlot()); MemOperand src = g.ToMemOperand(source);
Register temp_0 = kScratchReg; MemOperand dst = g.ToMemOperand(destination);
LowDwVfpRegister temp_1 = kScratchDoubleReg; if (source->IsStackSlot() || source->IsFloatStackSlot()) {
MemOperand src0 = g.ToMemOperand(source); Register temp_0 = kScratchReg;
MemOperand dst0 = g.ToMemOperand(destination); SwVfpRegister temp_1 = kScratchDoubleReg.low();
MachineRepresentation rep = LocationOperand::cast(source)->representation(); __ ldr(temp_0, src);
if (rep == MachineRepresentation::kFloat64) { __ vldr(temp_1, dst);
MemOperand src1(src0.rn(), src0.offset() + kPointerSize); __ str(temp_0, dst);
MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize); __ vstr(temp_1, src);
__ vldr(temp_1, dst0); // Save destination in temp_1. } else if (source->IsDoubleStackSlot()) {
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination. Register temp_0 = kScratchReg;
__ str(temp_0, dst0); LowDwVfpRegister temp_1 = kScratchDoubleReg;
__ ldr(temp_0, src1); // Save destination in temp_1.
__ str(temp_0, dst1); __ vldr(temp_1, dst);
__ vstr(temp_1, src0); // Then use temp_0 to copy source to destination.
} else if (rep == MachineRepresentation::kFloat32) { __ ldr(temp_0, src);
__ vldr(temp_1.low(), dst0); // Save destination in temp_1. __ str(temp_0, dst);
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination. __ ldr(temp_0, MemOperand(src.rn(), src.offset() + kPointerSize));
__ str(temp_0, dst0); __ str(temp_0, MemOperand(dst.rn(), dst.offset() + kPointerSize));
__ vstr(temp_1.low(), src0); __ vstr(temp_1, src);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); DCHECK(source->IsSimd128StackSlot());
MemOperand src1(src0.rn(), src0.offset() + kDoubleSize); MemOperand src0 = src;
MemOperand dst1(dst0.rn(), dst0.offset() + kDoubleSize); MemOperand dst0 = dst;
__ vldr(kScratchQuadReg.low(), dst0); MemOperand src1(src.rn(), src.offset() + kDoubleSize);
__ vldr(kScratchQuadReg.high(), src0); MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
__ vstr(kScratchQuadReg.low(), src0); __ vldr(kScratchQuadReg.low(), dst0);
__ vstr(kScratchQuadReg.high(), dst0); __ vldr(kScratchQuadReg.high(), src0);
__ vldr(kScratchQuadReg.low(), dst1); __ vstr(kScratchQuadReg.low(), src0);
__ vldr(kScratchQuadReg.high(), src1); __ vstr(kScratchQuadReg.high(), dst0);
__ vstr(kScratchQuadReg.low(), src1); __ vldr(kScratchQuadReg.low(), dst1);
__ vstr(kScratchQuadReg.high(), dst1); __ vldr(kScratchQuadReg.high(), src1);
__ vstr(kScratchQuadReg.low(), src1);
__ vstr(kScratchQuadReg.high(), dst1);
}
return;
} }
} else { default:
// No other combinations are possible. UNREACHABLE();
UNREACHABLE(); break;
} }
} }
......
...@@ -2486,195 +2486,195 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); } ...@@ -2486,195 +2486,195 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source, void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr); Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Helper function to write the given constant to the dst register.
// combinations are possible. auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (source->IsRegister()) { if (src.type() == Constant::kHeapObject) {
DCHECK(destination->IsRegister() || destination->IsStackSlot()); Handle<HeapObject> src_object = src.ToHeapObject();
Register src = g.ToRegister(source); Heap::RootListIndex index;
if (destination->IsRegister()) { if (IsMaterializableFromRoot(src_object, &index)) {
__ Mov(g.ToRegister(destination), src); __ LoadRoot(dst, index);
} else { } else {
__ Str(src, g.ToMemOperand(destination, tasm())); __ Mov(dst, src_object);
} }
} else if (source->IsStackSlot()) {
MemOperand src = g.ToMemOperand(source, tasm());
DCHECK(destination->IsRegister() || destination->IsStackSlot());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else { } else {
UseScratchRegisterScope scope(tasm()); __ Mov(dst, g.ToImmediate(source));
Register temp = scope.AcquireX();
__ Ldr(temp, src);
__ Str(temp, g.ToMemOperand(destination, tasm()));
} }
} else if (source->IsConstant()) { };
Constant src = g.ToConstant(ConstantOperand::cast(source)); switch (MoveType::InferMove(source, destination)) {
if (destination->IsRegister() || destination->IsStackSlot()) { case MoveType::kRegisterToRegister:
UseScratchRegisterScope scope(tasm()); if (source->IsRegister()) {
Register dst = destination->IsRegister() ? g.ToRegister(destination) __ Mov(g.ToRegister(destination), g.ToRegister(source));
: scope.AcquireX(); } else if (source->IsFloatRegister() || source->IsDoubleRegister()) {
if (src.type() == Constant::kHeapObject) { __ Mov(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
__ Mov(dst, src_object);
}
} else { } else {
__ Mov(dst, g.ToImmediate(source)); DCHECK(source->IsSimd128Register());
} __ Mov(g.ToDoubleRegister(destination).Q(),
if (destination->IsStackSlot()) { g.ToDoubleRegister(source).Q());
__ Str(dst, g.ToMemOperand(destination, tasm()));
} }
} else if (src.type() == Constant::kFloat32) { return;
if (destination->IsFPRegister()) { case MoveType::kRegisterToStack: {
VRegister dst = g.ToDoubleRegister(destination).S(); MemOperand dst = g.ToMemOperand(destination, tasm());
__ Fmov(dst, src.ToFloat32()); if (source->IsRegister()) {
__ Str(g.ToRegister(source), dst);
} else { } else {
DCHECK(destination->IsFPStackSlot()); VRegister src = g.ToDoubleRegister(source);
if (bit_cast<int32_t>(src.ToFloat32()) == 0) { if (source->IsFloatRegister() || source->IsDoubleRegister()) {
__ Str(wzr, g.ToMemOperand(destination, tasm())); __ Str(src, dst);
} else { } else {
UseScratchRegisterScope scope(tasm()); DCHECK(source->IsSimd128Register());
VRegister temp = scope.AcquireS(); __ Str(src.Q(), dst);
__ Fmov(temp, src.ToFloat32());
__ Str(temp, g.ToMemOperand(destination, tasm()));
} }
} }
} else { return;
DCHECK_EQ(Constant::kFloat64, src.type()); }
if (destination->IsFPRegister()) { case MoveType::kStackToRegister: {
VRegister dst = g.ToDoubleRegister(destination); MemOperand src = g.ToMemOperand(source, tasm());
__ Fmov(dst, src.ToFloat64().value()); if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else { } else {
DCHECK(destination->IsFPStackSlot()); VRegister dst = g.ToDoubleRegister(destination);
if (src.ToFloat64().AsUint64() == 0) { if (destination->IsFloatRegister() || destination->IsDoubleRegister()) {
__ Str(xzr, g.ToMemOperand(destination, tasm())); __ Ldr(dst, src);
} else { } else {
UseScratchRegisterScope scope(tasm()); DCHECK(destination->IsSimd128Register());
VRegister temp = scope.AcquireD(); __ Ldr(dst.Q(), src);
__ Fmov(temp, src.ToFloat64().value());
__ Str(temp, g.ToMemOperand(destination, tasm()));
} }
} }
return;
} }
} else if (source->IsFPRegister()) { case MoveType::kStackToStack: {
VRegister src = g.ToDoubleRegister(source); MemOperand src = g.ToMemOperand(source, tasm());
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
if (destination->IsSimd128Register()) {
__ Mov(dst.Q(), src.Q());
} else {
__ Mov(dst, src);
}
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination, tasm()); MemOperand dst = g.ToMemOperand(destination, tasm());
if (destination->IsSimd128StackSlot()) { if (source->IsSimd128StackSlot()) {
__ Str(src.Q(), dst); UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireQ();
__ Ldr(temp, src);
__ Str(temp, dst);
} else { } else {
__ Str(src, dst); UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
__ Str(temp, dst);
} }
return;
} }
} else if (source->IsFPStackSlot()) { case MoveType::kConstantToRegister: {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); Constant src = g.ToConstant(source);
MemOperand src = g.ToMemOperand(source, tasm()); if (destination->IsRegister()) {
if (destination->IsFPRegister()) { MoveConstantToRegister(g.ToRegister(destination), src);
VRegister dst = g.ToDoubleRegister(destination);
if (destination->IsSimd128Register()) {
__ Ldr(dst.Q(), src);
} else { } else {
__ Ldr(dst, src); VRegister dst = g.ToDoubleRegister(destination);
if (destination->IsFloatRegister()) {
__ Fmov(dst.S(), src.ToFloat32());
} else {
DCHECK(destination->IsDoubleRegister());
__ Fmov(dst, src.ToFloat64().value());
}
} }
} else { return;
UseScratchRegisterScope scope(tasm()); }
VRegister temp = scope.AcquireD(); case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination, tasm()); MemOperand dst = g.ToMemOperand(destination, tasm());
if (destination->IsSimd128StackSlot()) { if (destination->IsStackSlot()) {
__ Ldr(temp.Q(), src); UseScratchRegisterScope scope(tasm());
__ Str(temp.Q(), dst); Register temp = scope.AcquireX();
} else { MoveConstantToRegister(temp, src);
__ Ldr(temp, src);
__ Str(temp, dst); __ Str(temp, dst);
} else if (destination->IsFloatStackSlot()) {
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ Str(wzr, dst);
} else {
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
__ Str(temp, dst);
}
} else {
DCHECK(destination->IsDoubleStackSlot());
if (src.ToFloat64().AsUint64() == 0) {
__ Str(xzr, dst);
} else {
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireD();
__ Fmov(temp, src.ToFloat64().value());
__ Str(temp, dst);
}
} }
return;
} }
} else {
UNREACHABLE();
} }
UNREACHABLE();
} }
void CodeGenerator::AssembleSwap(InstructionOperand* source, void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr); Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all switch (MoveType::InferSwap(source, destination)) {
// combinations are possible. case MoveType::kRegisterToRegister:
if (source->IsRegister()) { if (source->IsRegister()) {
// Register-register. __ Swap(g.ToRegister(source), g.ToRegister(destination));
UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Mov(temp, src);
__ Mov(src, dst);
__ Mov(dst, temp);
} else {
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination, tasm());
__ Mov(temp, src);
__ Ldr(src, dst);
__ Str(temp, dst);
}
} else if (source->IsStackSlot() || source->IsFPStackSlot()) {
UseScratchRegisterScope scope(tasm());
VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD();
MemOperand src = g.ToMemOperand(source, tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
if (source->IsSimd128StackSlot()) {
__ Ldr(temp_0.Q(), src);
__ Ldr(temp_1.Q(), dst);
__ Str(temp_0.Q(), dst);
__ Str(temp_1.Q(), src);
} else {
__ Ldr(temp_0, src);
__ Ldr(temp_1, dst);
__ Str(temp_0, dst);
__ Str(temp_1, src);
}
} else if (source->IsFPRegister()) {
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireD();
VRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
if (source->IsSimd128Register()) {
__ Mov(temp.Q(), src.Q());
__ Mov(src.Q(), dst.Q());
__ Mov(dst.Q(), temp.Q());
} else { } else {
__ Mov(temp, src); VRegister src = g.ToDoubleRegister(source);
__ Mov(src, dst); VRegister dst = g.ToDoubleRegister(destination);
__ Mov(dst, temp); if (source->IsFloatRegister() || source->IsDoubleRegister()) {
__ Swap(src, dst);
} else {
DCHECK(source->IsSimd128Register());
__ Swap(src.Q(), dst.Q());
}
} }
} else { return;
DCHECK(destination->IsFPStackSlot()); case MoveType::kRegisterToStack: {
UseScratchRegisterScope scope(tasm());
MemOperand dst = g.ToMemOperand(destination, tasm()); MemOperand dst = g.ToMemOperand(destination, tasm());
if (source->IsSimd128Register()) { if (source->IsRegister()) {
__ Mov(temp.Q(), src.Q()); Register temp = scope.AcquireX();
__ Ldr(src.Q(), dst); Register src = g.ToRegister(source);
__ Str(temp.Q(), dst);
} else {
__ Mov(temp, src); __ Mov(temp, src);
__ Ldr(src, dst); __ Ldr(src, dst);
__ Str(temp, dst); __ Str(temp, dst);
} else {
UseScratchRegisterScope scope(tasm());
VRegister src = g.ToDoubleRegister(source);
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
VRegister temp = scope.AcquireD();
__ Mov(temp, src);
__ Ldr(src, dst);
__ Str(temp, dst);
} else {
DCHECK(source->IsSimd128Register());
VRegister temp = scope.AcquireQ();
__ Mov(temp, src.Q());
__ Ldr(src.Q(), dst);
__ Str(temp, dst);
}
} }
return;
} }
} else { case MoveType::kStackToStack: {
// No other combinations are possible. UseScratchRegisterScope scope(tasm());
UNREACHABLE(); MemOperand src = g.ToMemOperand(source, tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD();
if (source->IsSimd128StackSlot()) {
__ Ldr(temp_0.Q(), src);
__ Ldr(temp_1.Q(), dst);
__ Str(temp_0.Q(), dst);
__ Str(temp_1.Q(), src);
} else {
__ Ldr(temp_0, src);
__ Ldr(temp_1, dst);
__ Str(temp_0, dst);
__ Str(temp_1, src);
}
return;
}
default:
UNREACHABLE();
break;
} }
} }
......
...@@ -488,6 +488,54 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr, ...@@ -488,6 +488,54 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
pushes->resize(push_count); pushes->resize(push_count);
} }
CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferMove(
InstructionOperand* source, InstructionOperand* destination) {
if (source->IsConstant()) {
if (destination->IsAnyRegister()) {
return MoveType::kConstantToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kConstantToStack;
}
}
DCHECK(LocationOperand::cast(source)->IsCompatible(
LocationOperand::cast(destination)));
if (source->IsAnyRegister()) {
if (destination->IsAnyRegister()) {
return MoveType::kRegisterToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kRegisterToStack;
}
} else {
DCHECK(source->IsAnyStackSlot());
if (destination->IsAnyRegister()) {
return MoveType::kStackToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kStackToStack;
}
}
}
CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferSwap(
InstructionOperand* source, InstructionOperand* destination) {
DCHECK(LocationOperand::cast(source)->IsCompatible(
LocationOperand::cast(destination)));
if (source->IsAnyRegister()) {
if (destination->IsAnyRegister()) {
return MoveType::kRegisterToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kRegisterToStack;
}
} else {
DCHECK(source->IsAnyStackSlot());
DCHECK(destination->IsAnyStackSlot());
return MoveType::kStackToStack;
}
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
Instruction* instr, const InstructionBlock* block) { Instruction* instr, const InstructionBlock* block) {
int first_unused_stack_slot; int first_unused_stack_slot;
......
...@@ -224,6 +224,26 @@ class CodeGenerator final : public GapResolver::Assembler { ...@@ -224,6 +224,26 @@ class CodeGenerator final : public GapResolver::Assembler {
PushTypeFlags push_type, PushTypeFlags push_type,
ZoneVector<MoveOperands*>* pushes); ZoneVector<MoveOperands*>* pushes);
class MoveType {
public:
enum Type {
kRegisterToRegister,
kRegisterToStack,
kStackToRegister,
kStackToStack,
kConstantToRegister,
kConstantToStack
};
// Detect what type of move or swap needs to be performed. Note that these
// functions do not take into account the representation (Tagged, FP,
// ...etc).
static Type InferMove(InstructionOperand* source,
InstructionOperand* destination);
static Type InferSwap(InstructionOperand* source,
InstructionOperand* destination);
};
// Called before a tail call |instr|'s gap moves are assembled and allows // Called before a tail call |instr|'s gap moves are assembled and allows
// gap-specific pre-processing, e.g. adjustment of the sp for tail calls that // gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
// need it before gap moves or conversion of certain gap moves into pushes. // need it before gap moves or conversion of certain gap moves into pushes.
......
...@@ -3510,119 +3510,129 @@ void CodeGenerator::FinishCode() {} ...@@ -3510,119 +3510,129 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source, void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
IA32OperandConverter g(this, nullptr); IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Dispatch on the source and destination operand kinds.
// combinations are possible. switch (MoveType::InferMove(source, destination)) {
if (source->IsRegister()) { case MoveType::kRegisterToRegister:
DCHECK(destination->IsRegister() || destination->IsStackSlot()); if (source->IsRegister()) {
Register src = g.ToRegister(source); __ mov(g.ToRegister(destination), g.ToRegister(source));
Operand dst = g.ToOperand(destination);
__ mov(dst, src);
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Operand src = g.ToOperand(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ mov(dst, src);
} else {
Operand dst = g.ToOperand(destination);
__ push(src);
__ pop(dst);
}
} else if (source->IsConstant()) {
Constant src_constant = g.ToConstant(source);
if (src_constant.type() == Constant::kHeapObject) {
Handle<HeapObject> src = src_constant.ToHeapObject();
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Move(dst, src);
} else { } else {
DCHECK(destination->IsStackSlot()); DCHECK(source->IsFPRegister());
Operand dst = g.ToOperand(destination); __ movaps(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
__ mov(dst, src);
} }
} else if (destination->IsRegister()) { return;
Register dst = g.ToRegister(destination); case MoveType::kRegisterToStack: {
__ Move(dst, g.ToImmediate(source));
} else if (destination->IsStackSlot()) {
Operand dst = g.ToOperand(destination); Operand dst = g.ToOperand(destination);
__ Move(dst, g.ToImmediate(source)); if (source->IsRegister()) {
} else if (src_constant.type() == Constant::kFloat32) { __ mov(dst, g.ToRegister(source));
// TODO(turbofan): Can we do better here?
uint32_t src = src_constant.ToFloat32AsInt();
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else { } else {
DCHECK(destination->IsFPStackSlot()); DCHECK(source->IsFPRegister());
Operand dst = g.ToOperand(destination); XMMRegister src = g.ToDoubleRegister(source);
__ Move(dst, Immediate(src)); MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(dst, src);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(dst, src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(dst, src);
}
} }
} else { return;
DCHECK_EQ(Constant::kFloat64, src_constant.type()); }
uint64_t src = src_constant.ToFloat64().AsUint64(); case MoveType::kStackToRegister: {
uint32_t lower = static_cast<uint32_t>(src); Operand src = g.ToOperand(source);
uint32_t upper = static_cast<uint32_t>(src >> 32); if (source->IsStackSlot()) {
if (destination->IsFPRegister()) { __ mov(g.ToRegister(destination), src);
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else { } else {
DCHECK(destination->IsFPStackSlot()); DCHECK(source->IsFPStackSlot());
Operand dst0 = g.ToOperand(destination); XMMRegister dst = g.ToDoubleRegister(destination);
Operand dst1 = g.ToOperand(destination, kPointerSize); MachineRepresentation rep =
__ Move(dst0, Immediate(lower)); LocationOperand::cast(source)->representation();
__ Move(dst1, Immediate(upper)); if (rep == MachineRepresentation::kFloat32) {
__ movss(dst, src);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(dst, src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(dst, src);
}
} }
return;
} }
} else if (source->IsFPRegister()) { case MoveType::kStackToStack: {
XMMRegister src = g.ToDoubleRegister(source); Operand src = g.ToOperand(source);
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination); Operand dst = g.ToOperand(destination);
MachineRepresentation rep = if (source->IsStackSlot()) {
LocationOperand::cast(source)->representation(); __ push(src);
if (rep == MachineRepresentation::kFloat64) { __ pop(dst);
__ movsd(dst, src); } else {
} else if (rep == MachineRepresentation::kFloat32) { MachineRepresentation rep =
__ movss(dst, src); LocationOperand::cast(source)->representation();
} else { if (rep == MachineRepresentation::kFloat32) {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); __ movss(kScratchDoubleReg, src);
__ movups(dst, src); __ movss(dst, kScratchDoubleReg);
} } else if (rep == MachineRepresentation::kFloat64) {
} __ movsd(kScratchDoubleReg, src);
} else if (source->IsFPStackSlot()) { __ movsd(dst, kScratchDoubleReg);
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); } else {
Operand src = g.ToOperand(source); DCHECK_EQ(MachineRepresentation::kSimd128, rep);
MachineRepresentation rep = LocationOperand::cast(source)->representation(); __ movups(kScratchDoubleReg, src);
if (destination->IsFPRegister()) { __ movups(dst, kScratchDoubleReg);
XMMRegister dst = g.ToDoubleRegister(destination); }
if (rep == MachineRepresentation::kFloat64) {
__ movsd(dst, src);
} else if (rep == MachineRepresentation::kFloat32) {
__ movss(dst, src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(dst, src);
} }
} else { return;
}
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
if (src.type() == Constant::kHeapObject) {
__ Move(dst, src.ToHeapObject());
} else {
__ Move(dst, g.ToImmediate(source));
}
} else {
DCHECK(destination->IsFPRegister());
XMMRegister dst = g.ToDoubleRegister(destination);
if (src.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
__ Move(dst, src.ToFloat32AsInt());
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
__ Move(dst, src.ToFloat64().AsUint64());
}
}
return;
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination); Operand dst = g.ToOperand(destination);
if (rep == MachineRepresentation::kFloat64) { if (destination->IsStackSlot()) {
__ movsd(kScratchDoubleReg, src); if (src.type() == Constant::kHeapObject) {
__ movsd(dst, kScratchDoubleReg); __ mov(dst, src.ToHeapObject());
} else if (rep == MachineRepresentation::kFloat32) { } else {
__ movss(kScratchDoubleReg, src); __ Move(dst, g.ToImmediate(source));
__ movss(dst, kScratchDoubleReg); }
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); DCHECK(destination->IsFPStackSlot());
__ movups(kScratchDoubleReg, src); if (src.type() == Constant::kFloat32) {
__ movups(dst, kScratchDoubleReg); __ Move(dst, Immediate(src.ToFloat32AsInt()));
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
uint64_t constant_value = src.ToFloat64().AsUint64();
uint32_t lower = static_cast<uint32_t>(constant_value);
uint32_t upper = static_cast<uint32_t>(constant_value >> 32);
Operand dst0 = dst;
Operand dst1 = g.ToOperand(destination, kPointerSize);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
} }
return;
} }
} else {
UNREACHABLE();
} }
UNREACHABLE();
} }
...@@ -3631,94 +3641,106 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3631,94 +3641,106 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
IA32OperandConverter g(this, nullptr); IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Dispatch on the source and destination operand kinds. Not all
// combinations are possible. // combinations are possible.
if (source->IsRegister() && destination->IsRegister()) { switch (MoveType::InferSwap(source, destination)) {
// Register-register. case MoveType::kRegisterToRegister: {
Register src = g.ToRegister(source); if (source->IsRegister()) {
Register dst = g.ToRegister(destination); Register src = g.ToRegister(source);
__ push(src); Register dst = g.ToRegister(destination);
__ mov(src, dst); __ push(src);
__ pop(dst); __ mov(src, dst);
} else if (source->IsRegister() && destination->IsStackSlot()) { __ pop(dst);
// Register-memory. } else {
Register src = g.ToRegister(source); DCHECK(source->IsFPRegister());
__ push(src); XMMRegister src = g.ToDoubleRegister(source);
frame_access_state()->IncreaseSPDelta(1); XMMRegister dst = g.ToDoubleRegister(destination);
Operand dst = g.ToOperand(destination); __ movaps(kScratchDoubleReg, src);
__ mov(src, dst); __ movaps(src, dst);
frame_access_state()->IncreaseSPDelta(-1); __ movaps(dst, kScratchDoubleReg);
dst = g.ToOperand(destination); }
__ pop(dst); return;
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
// Memory-memory.
Operand dst1 = g.ToOperand(destination);
__ push(dst1);
frame_access_state()->IncreaseSPDelta(1);
Operand src1 = g.ToOperand(source);
__ push(src1);
Operand dst2 = g.ToOperand(destination);
__ pop(dst2);
frame_access_state()->IncreaseSPDelta(-1);
Operand src2 = g.ToOperand(source);
__ pop(src2);
} else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(kScratchDoubleReg, src);
__ movaps(src, dst);
__ movaps(dst, kScratchDoubleReg);
} else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap.
XMMRegister reg = g.ToDoubleRegister(source);
Operand other = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, other);
__ movsd(other, reg);
__ movaps(reg, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, other);
__ movss(other, reg);
__ movaps(reg, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, other);
__ movups(other, reg);
__ movups(reg, kScratchDoubleReg);
}
} else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
// Double-width memory-to-memory.
Operand src0 = g.ToOperand(source);
Operand dst0 = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ push(g.ToOperand(source, kPointerSize));
__ pop(g.ToOperand(destination, kPointerSize));
__ movsd(src0, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ movss(src0, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ push(g.ToOperand(source, kPointerSize));
__ pop(g.ToOperand(destination, kPointerSize));
__ push(g.ToOperand(source, 2 * kPointerSize));
__ pop(g.ToOperand(destination, 2 * kPointerSize));
__ push(g.ToOperand(source, 3 * kPointerSize));
__ pop(g.ToOperand(destination, 3 * kPointerSize));
__ movups(src0, kScratchDoubleReg);
} }
} else { case MoveType::kRegisterToStack: {
// No other combinations are possible. if (source->IsRegister()) {
UNREACHABLE(); Register src = g.ToRegister(source);
__ push(src);
frame_access_state()->IncreaseSPDelta(1);
Operand dst = g.ToOperand(destination);
__ mov(src, dst);
frame_access_state()->IncreaseSPDelta(-1);
dst = g.ToOperand(destination);
__ pop(dst);
} else {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst);
__ movss(dst, src);
__ movaps(src, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, dst);
__ movsd(dst, src);
__ movaps(src, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, dst);
__ movups(dst, src);
__ movups(src, kScratchDoubleReg);
}
}
return;
}
case MoveType::kStackToStack: {
if (source->IsStackSlot()) {
Operand dst1 = g.ToOperand(destination);
__ push(dst1);
frame_access_state()->IncreaseSPDelta(1);
Operand src1 = g.ToOperand(source);
__ push(src1);
Operand dst2 = g.ToOperand(destination);
__ pop(dst2);
frame_access_state()->IncreaseSPDelta(-1);
Operand src2 = g.ToOperand(source);
__ pop(src2);
} else {
DCHECK(source->IsFPStackSlot());
Operand src0 = g.ToOperand(source);
Operand dst0 = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ movss(src0, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ push(g.ToOperand(source, kPointerSize));
__ pop(g.ToOperand(destination, kPointerSize));
__ movsd(src0, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ push(g.ToOperand(source, kPointerSize));
__ pop(g.ToOperand(destination, kPointerSize));
__ push(g.ToOperand(source, 2 * kPointerSize));
__ pop(g.ToOperand(destination, 2 * kPointerSize));
__ push(g.ToOperand(source, 3 * kPointerSize));
__ pop(g.ToOperand(destination, 3 * kPointerSize));
__ movups(src0, kScratchDoubleReg);
}
}
return;
}
default:
UNREACHABLE();
break;
} }
} }
......
...@@ -96,6 +96,26 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const { ...@@ -96,6 +96,26 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
return false; return false;
} }
bool LocationOperand::IsCompatible(LocationOperand* op) {
if (IsRegister() || IsStackSlot()) {
return op->IsRegister() || op->IsStackSlot();
} else if (kSimpleFPAliasing) {
// A backend may choose to generate the same instruction sequence regardless
// of the FP representation. As a result, we can relax the compatibility and
// allow a Double to be moved in a Float for example. However, this is only
// allowed if registers do not overlap.
return (IsFPRegister() || IsFPStackSlot()) &&
(op->IsFPRegister() || op->IsFPStackSlot());
} else if (IsFloatRegister() || IsFloatStackSlot()) {
return op->IsFloatRegister() || op->IsFloatStackSlot();
} else if (IsDoubleRegister() || IsDoubleStackSlot()) {
return op->IsDoubleRegister() || op->IsDoubleStackSlot();
} else {
return (IsSimd128Register() || IsSimd128StackSlot()) &&
(op->IsSimd128Register() || op->IsSimd128StackSlot());
}
}
void InstructionOperand::Print(const RegisterConfiguration* config) const { void InstructionOperand::Print(const RegisterConfiguration* config) const {
OFStream os(stdout); OFStream os(stdout);
PrintableInstructionOperand wrapper; PrintableInstructionOperand wrapper;
......
...@@ -491,6 +491,9 @@ class LocationOperand : public InstructionOperand { ...@@ -491,6 +491,9 @@ class LocationOperand : public InstructionOperand {
UNREACHABLE(); UNREACHABLE();
} }
// Return true if the locations can be moved to one another.
bool IsCompatible(LocationOperand* op);
static LocationOperand* cast(InstructionOperand* op) { static LocationOperand* cast(InstructionOperand* op) {
DCHECK(op->IsAnyLocationOperand()); DCHECK(op->IsAnyLocationOperand());
return static_cast<LocationOperand*>(op); return static_cast<LocationOperand*>(op);
......
...@@ -3092,147 +3092,159 @@ void CodeGenerator::FinishCode() {} ...@@ -3092,147 +3092,159 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source, void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
X64OperandConverter g(this, nullptr); X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Helper function to write the given constant to the dst register.
// combinations are possible. auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (source->IsRegister()) { switch (src.type()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot()); case Constant::kInt32: {
Register src = g.ToRegister(source); if (RelocInfo::IsWasmPtrReference(src.rmode())) {
if (destination->IsRegister()) { __ movq(dst, src.ToInt64(), src.rmode());
__ movq(g.ToRegister(destination), src); } else {
} else { int32_t value = src.ToInt32();
__ movq(g.ToOperand(destination), src); if (RelocInfo::IsWasmSizeReference(src.rmode())) {
} __ movl(dst, Immediate(value, src.rmode()));
} else if (source->IsStackSlot()) { } else if (value == 0) {
DCHECK(destination->IsRegister() || destination->IsStackSlot()); __ xorl(dst, dst);
Operand src = g.ToOperand(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ movq(dst, src);
} else {
// Spill on demand to use a temporary register for memory-to-memory
// moves.
Register tmp = kScratchRegister;
Operand dst = g.ToOperand(destination);
__ movq(tmp, src);
__ movq(dst, tmp);
}
} else if (source->IsConstant()) {
ConstantOperand* constant_source = ConstantOperand::cast(source);
Constant src = g.ToConstant(constant_source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: kScratchRegister;
switch (src.type()) {
case Constant::kInt32: {
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
int32_t value = src.ToInt32();
if (RelocInfo::IsWasmSizeReference(src.rmode())) {
__ movl(dst, Immediate(value, src.rmode()));
} else if (value == 0) {
__ xorl(dst, dst);
} else {
__ movl(dst, Immediate(value));
}
}
break;
}
case Constant::kInt64:
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ Set(dst, src.ToInt64());
}
break;
case Constant::kFloat32:
__ MoveNumber(dst, src.ToFloat32());
break;
case Constant::kFloat64:
__ MoveNumber(dst, src.ToFloat64().value());
break;
case Constant::kExternalReference:
__ Move(dst, src.ToExternalReference());
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else { } else {
__ Move(dst, src_object); __ movl(dst, Immediate(value));
} }
break;
} }
case Constant::kRpoNumber: break;
UNREACHABLE(); // TODO(dcarney): load of labels on x64.
break;
} }
if (destination->IsStackSlot()) { case Constant::kInt64:
__ movq(g.ToOperand(destination), kScratchRegister); if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ Set(dst, src.ToInt64());
}
break;
case Constant::kFloat32:
__ MoveNumber(dst, src.ToFloat32());
break;
case Constant::kFloat64:
__ MoveNumber(dst, src.ToFloat64().value());
break;
case Constant::kExternalReference:
__ Move(dst, src.ToExternalReference());
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
__ Move(dst, src_object);
}
break;
} }
} else if (src.type() == Constant::kFloat32) { case Constant::kRpoNumber:
// TODO(turbofan): Can we do better here? UNREACHABLE(); // TODO(dcarney): load of labels on x64.
uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32()); break;
if (destination->IsFPRegister()) { }
__ Move(g.ToDoubleRegister(destination), src_const); };
// Dispatch on the source and destination operand kinds.
switch (MoveType::InferMove(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
__ movq(g.ToRegister(destination), g.ToRegister(source));
} else { } else {
DCHECK(destination->IsFPStackSlot()); DCHECK(source->IsFPRegister());
Operand dst = g.ToOperand(destination); __ Movapd(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
__ movl(dst, Immediate(src_const));
} }
} else { return;
DCHECK_EQ(Constant::kFloat64, src.type()); case MoveType::kRegisterToStack: {
uint64_t src_const = src.ToFloat64().AsUint64(); Operand dst = g.ToOperand(destination);
if (destination->IsFPRegister()) { if (source->IsRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const); __ movq(dst, g.ToRegister(source));
} else {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(dst, src);
} else {
__ Movups(dst, src);
}
}
return;
}
case MoveType::kStackToRegister: {
Operand src = g.ToOperand(source);
if (source->IsStackSlot()) {
__ movq(g.ToRegister(destination), src);
} else { } else {
DCHECK(destination->IsFPStackSlot()); DCHECK(source->IsFPStackSlot());
__ movq(kScratchRegister, src_const); XMMRegister dst = g.ToDoubleRegister(destination);
__ movq(g.ToOperand(destination), kScratchRegister); MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(dst, src);
} else {
__ Movups(dst, src);
}
} }
return;
} }
} else if (source->IsFPRegister()) { case MoveType::kStackToStack: {
XMMRegister src = g.ToDoubleRegister(source); Operand src = g.ToOperand(source);
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination); Operand dst = g.ToOperand(destination);
MachineRepresentation rep = if (source->IsStackSlot()) {
LocationOperand::cast(source)->representation(); // Spill on demand to use a temporary register for memory-to-memory
if (rep != MachineRepresentation::kSimd128) { // moves.
__ Movsd(dst, src); __ movq(kScratchRegister, src);
} else { __ movq(dst, kScratchRegister);
__ Movups(dst, src); } else {
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(kScratchDoubleReg, src);
__ Movsd(dst, kScratchDoubleReg);
} else {
DCHECK(source->IsSimd128StackSlot());
__ Movups(kScratchDoubleReg, src);
__ Movups(dst, kScratchDoubleReg);
}
} }
return;
} }
} else if (source->IsFPStackSlot()) { case MoveType::kConstantToRegister: {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); Constant src = g.ToConstant(source);
Operand src = g.ToOperand(source); if (destination->IsRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation(); MoveConstantToRegister(g.ToRegister(destination), src);
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(dst, src);
} else { } else {
__ Movups(dst, src); DCHECK(destination->IsFPRegister());
XMMRegister dst = g.ToDoubleRegister(destination);
if (src.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
__ Move(dst, bit_cast<uint32_t>(src.ToFloat32()));
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
__ Move(dst, src.ToFloat64().AsUint64());
}
} }
} else { return;
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination); Operand dst = g.ToOperand(destination);
if (rep != MachineRepresentation::kSimd128) { if (destination->IsStackSlot()) {
__ Movsd(kScratchDoubleReg, src); MoveConstantToRegister(kScratchRegister, src);
__ Movsd(dst, kScratchDoubleReg); __ movq(dst, kScratchRegister);
} else { } else {
__ Movups(kScratchDoubleReg, src); DCHECK(destination->IsFPStackSlot());
__ Movups(dst, kScratchDoubleReg); if (src.type() == Constant::kFloat32) {
__ movl(dst, Immediate(bit_cast<uint32_t>(src.ToFloat32())));
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
__ movq(kScratchRegister, src.ToFloat64().AsUint64());
__ movq(dst, kScratchRegister);
}
} }
return;
} }
} else {
UNREACHABLE();
} }
UNREACHABLE();
} }
...@@ -3241,84 +3253,94 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3241,84 +3253,94 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
X64OperandConverter g(this, nullptr); X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Dispatch on the source and destination operand kinds. Not all
// combinations are possible. // combinations are possible.
if (source->IsRegister() && destination->IsRegister()) { switch (MoveType::InferSwap(source, destination)) {
// Register-register. case MoveType::kRegisterToRegister: {
Register src = g.ToRegister(source); if (source->IsRegister()) {
Register dst = g.ToRegister(destination); Register src = g.ToRegister(source);
__ movq(kScratchRegister, src); Register dst = g.ToRegister(destination);
__ movq(src, dst); __ movq(kScratchRegister, src);
__ movq(dst, kScratchRegister); __ movq(src, dst);
} else if (source->IsRegister() && destination->IsStackSlot()) { __ movq(dst, kScratchRegister);
Register src = g.ToRegister(source); } else {
__ pushq(src); DCHECK(source->IsFPRegister());
frame_access_state()->IncreaseSPDelta(1); XMMRegister src = g.ToDoubleRegister(source);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), XMMRegister dst = g.ToDoubleRegister(destination);
kPointerSize); __ Movapd(kScratchDoubleReg, src);
Operand dst = g.ToOperand(destination); __ Movapd(src, dst);
__ movq(src, dst); __ Movapd(dst, kScratchDoubleReg);
frame_access_state()->IncreaseSPDelta(-1); }
dst = g.ToOperand(destination); return;
__ popq(dst); }
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), case MoveType::kRegisterToStack: {
-kPointerSize); if (source->IsRegister()) {
} else if ((source->IsStackSlot() && destination->IsStackSlot()) || Register src = g.ToRegister(source);
(source->IsFPStackSlot() && destination->IsFPStackSlot())) { __ pushq(src);
// Memory-memory. frame_access_state()->IncreaseSPDelta(1);
Operand src = g.ToOperand(source); unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
Operand dst = g.ToOperand(destination); kPointerSize);
MachineRepresentation rep = LocationOperand::cast(source)->representation(); Operand dst = g.ToOperand(destination);
if (rep != MachineRepresentation::kSimd128) { __ movq(src, dst);
Register tmp = kScratchRegister; frame_access_state()->IncreaseSPDelta(-1);
__ movq(tmp, dst); dst = g.ToOperand(destination);
__ pushq(src); // Then use stack to copy src to destination. __ popq(dst);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize); -kPointerSize);
__ popq(dst); } else {
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), DCHECK(source->IsFPRegister());
-kPointerSize); XMMRegister src = g.ToDoubleRegister(source);
__ movq(src, tmp); Operand dst = g.ToOperand(destination);
} else { MachineRepresentation rep =
// Without AVX, misaligned reads and writes will trap. Move using the LocationOperand::cast(source)->representation();
// stack, in two parts. if (rep != MachineRepresentation::kSimd128) {
__ movups(kScratchDoubleReg, dst); // Save dst in scratch register. __ Movsd(kScratchDoubleReg, src);
__ pushq(src); // Then use stack to copy src to destination. __ Movsd(src, dst);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), __ Movsd(dst, kScratchDoubleReg);
kPointerSize); } else {
__ popq(dst); __ Movups(kScratchDoubleReg, src);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), __ Movups(src, dst);
-kPointerSize); __ Movups(dst, kScratchDoubleReg);
__ pushq(g.ToOperand(source, kPointerSize)); }
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), }
kPointerSize); return;
__ popq(g.ToOperand(destination, kPointerSize));
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kPointerSize);
__ movups(src, kScratchDoubleReg);
}
} else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(kScratchDoubleReg, src);
__ Movapd(src, dst);
__ Movapd(dst, kScratchDoubleReg);
} else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap.
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(kScratchDoubleReg, src);
__ Movsd(src, dst);
__ Movsd(dst, kScratchDoubleReg);
} else {
__ Movups(kScratchDoubleReg, src);
__ Movups(src, dst);
__ Movups(dst, kScratchDoubleReg);
} }
} else { case MoveType::kStackToStack: {
// No other combinations are possible. Operand src = g.ToOperand(source);
UNREACHABLE(); Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
Register tmp = kScratchRegister;
__ movq(tmp, dst);
__ pushq(src); // Then use stack to copy src to destination.
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
__ popq(dst);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kPointerSize);
__ movq(src, tmp);
} else {
// Without AVX, misaligned reads and writes will trap. Move using the
// stack, in two parts.
__ movups(kScratchDoubleReg, dst); // Save dst in scratch register.
__ pushq(src); // Then use stack to copy src to destination.
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
__ popq(dst);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kPointerSize);
__ pushq(g.ToOperand(source, kPointerSize));
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
__ popq(g.ToOperand(destination, kPointerSize));
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kPointerSize);
__ movups(src, kScratchDoubleReg);
}
return;
}
default:
UNREACHABLE();
break;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment