Commit 16f2bcdb authored by Pierre Langlois's avatar Pierre Langlois Committed by Commit Bot

[turbofan] Refactor AssembleMove and AssembleSwap

The way the code generator's AssembleMove and AssembleSwap methods are written
makes it easy to forget which sort of move is being implemented when looking at
a sequence of instructions. This patch is an attempt to address this by
rewriting those methods using switch/case instead of a string of if/else.

To do this, introduce new utility functions to detect what type of move to
perform given a pair of InstructionOperands.

Bug: 
Change-Id: I32b146c86409e595b7b59a66bf43220899024fdd
Reviewed-on: https://chromium-review.googlesource.com/749201
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50966}
parent 396e7bc8
...@@ -241,22 +241,6 @@ void TurboAssembler::Ret(int drop, Condition cond) { ...@@ -241,22 +241,6 @@ void TurboAssembler::Ret(int drop, Condition cond) {
Ret(cond); Ret(cond);
} }
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch,
Condition cond) {
if (scratch == no_reg) {
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
} else {
mov(scratch, reg1, LeaveCC, cond);
mov(reg1, reg2, LeaveCC, cond);
mov(reg2, scratch, LeaveCC, cond);
}
}
void TurboAssembler::Call(Label* target) { bl(target); } void TurboAssembler::Call(Label* target) { bl(target); }
void TurboAssembler::Push(Handle<HeapObject> handle) { void TurboAssembler::Push(Handle<HeapObject> handle) {
...@@ -305,9 +289,17 @@ void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) { ...@@ -305,9 +289,17 @@ void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
} }
} }
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
if (srcdst0 == srcdst1) return; // Swapping aliased registers emits nothing. DCHECK(srcdst0 != srcdst1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, srcdst0);
mov(srcdst0, srcdst1);
mov(srcdst1, scratch);
}
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
DCHECK(srcdst0 != srcdst1);
DCHECK(VfpRegisterIsAvailable(srcdst0)); DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1)); DCHECK(VfpRegisterIsAvailable(srcdst1));
...@@ -323,9 +315,8 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { ...@@ -323,9 +315,8 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
} }
void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) { void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
if (srcdst0 != srcdst1) { DCHECK(srcdst0 != srcdst1);
vswp(srcdst0, srcdst1); vswp(srcdst0, srcdst1);
}
} }
void MacroAssembler::Mls(Register dst, Register src1, Register src2, void MacroAssembler::Mls(Register dst, Register src1, Register src2,
......
...@@ -482,7 +482,8 @@ class TurboAssembler : public Assembler { ...@@ -482,7 +482,8 @@ class TurboAssembler : public Assembler {
void VmovExtended(int dst_code, const MemOperand& src); void VmovExtended(int dst_code, const MemOperand& src);
void VmovExtended(const MemOperand& dst, int src_code); void VmovExtended(const MemOperand& dst, int src_code);
// Register swap. // Register swap. Note that the register operands should be distinct.
void Swap(Register srcdst0, Register srcdst1);
void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1); void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1); void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
...@@ -580,11 +581,6 @@ class MacroAssembler : public TurboAssembler { ...@@ -580,11 +581,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size, MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object); CodeObjectRequired create_code_object);
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg,
Condition cond = al);
void Mls(Register dst, Register src1, Register src2, Register srcA, void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al); Condition cond = al);
void And(Register dst, Register src1, const Operand& src2, void And(Register dst, Register src1, const Operand& src2,
......
...@@ -1571,6 +1571,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); } ...@@ -1571,6 +1571,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); } void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); } void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, rhs);
Mov(rhs, lhs);
Mov(lhs, temp);
}
void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
UseScratchRegisterScope temps(this);
VRegister temp = VRegister::no_reg();
if (lhs.IsS()) {
temp = temps.AcquireS();
} else if (lhs.IsD()) {
temp = temps.AcquireD();
} else {
DCHECK(lhs.IsQ());
temp = temps.AcquireQ();
}
Mov(temp, rhs);
Mov(rhs, lhs);
Mov(lhs, temp);
}
void TurboAssembler::AssertSmi(Register object, AbortReason reason) { void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) { if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
......
...@@ -255,6 +255,10 @@ class TurboAssembler : public Assembler { ...@@ -255,6 +255,10 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Handle<HeapObject> x); void Move(Register dst, Handle<HeapObject> x);
void Move(Register dst, Smi* src); void Move(Register dst, Smi* src);
// Register swap. Note that the register operands should be distinct.
void Swap(Register lhs, Register rhs);
void Swap(VRegister lhs, VRegister rhs);
// NEON by element instructions. // NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \ #define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmla, Fmla) \ V(fmla, Fmla) \
...@@ -2104,6 +2108,7 @@ class UseScratchRegisterScope { ...@@ -2104,6 +2108,7 @@ class UseScratchRegisterScope {
Register AcquireX() { return AcquireNextAvailable(available_).X(); } Register AcquireX() { return AcquireNextAvailable(available_).X(); }
VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); } VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); } VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
VRegister AcquireV(VectorFormat format) { VRegister AcquireV(VectorFormat format) {
return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format); return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
} }
......
...@@ -44,21 +44,7 @@ class ArmOperandConverter final : public InstructionOperandConverter { ...@@ -44,21 +44,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
} }
Operand InputImmediate(size_t index) { Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index)); return ToImmediate(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
}
UNREACHABLE();
} }
Operand InputOperand2(size_t first_index) { Operand InputOperand2(size_t first_index) {
...@@ -124,6 +110,30 @@ class ArmOperandConverter final : public InstructionOperandConverter { ...@@ -124,6 +110,30 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return InputOffset(&first_index); return InputOffset(&first_index);
} }
Operand ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
if (RelocInfo::IsWasmReference(constant.rmode())) {
return Operand(constant.ToInt32(), constant.rmode());
} else {
return Operand(constant.ToInt32());
}
case Constant::kFloat32:
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
case Constant::kInt64:
case Constant::kHeapObject:
// TODO(dcarney): loading RPO constants on arm.
case Constant::kRpoNumber:
break;
}
UNREACHABLE();
}
MemOperand ToMemOperand(InstructionOperand* op) const { MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op); DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
...@@ -2927,52 +2937,9 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); } ...@@ -2927,52 +2937,9 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source, void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr); ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Helper function to write the given constant to the dst register.
// combinations are possible. auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (source->IsRegister()) { if (src.type() == Constant::kHeapObject) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
__ mov(g.ToRegister(destination), src);
} else {
__ str(src, g.ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsRegister()) {
__ ldr(g.ToRegister(destination), src);
} else {
Register temp = kScratchReg;
__ ldr(temp, src);
__ str(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
if (RelocInfo::IsWasmReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ mov(dst, Operand(src.ToInt32()));
}
break;
case Constant::kInt64:
UNREACHABLE();
break;
case Constant::kFloat32:
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
__ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject(); Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index; Heap::RootListIndex index;
if (IsMaterializableFromRoot(src_object, &index)) { if (IsMaterializableFromRoot(src_object, &index)) {
...@@ -2980,101 +2947,77 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -2980,101 +2947,77 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else { } else {
__ Move(dst, src_object); __ Move(dst, src_object);
} }
break;
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
break;
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFloatStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
Register temp = kScratchReg;
__ mov(temp, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(temp, dst);
} else {
SwVfpRegister dst = g.ToFloatRegister(destination);
__ vmov(dst, Float32::FromBits(src.ToFloat32AsInt()));
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
DwVfpRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64(), kScratchReg);
if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
}
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else { } else {
DCHECK(destination->IsDoubleStackSlot()); __ mov(dst, g.ToImmediate(source));
__ vstr(src, g.ToMemOperand(destination));
} }
} else if (rep == MachineRepresentation::kFloat32) { };
// GapResolver may give us reg codes that don't map to actual s-registers. switch (MoveType::InferMove(source, destination)) {
// Generate code to work around those cases. case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
__ mov(g.ToRegister(destination), g.ToRegister(source));
} else if (source->IsFloatRegister()) {
DCHECK(destination->IsFloatRegister());
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int src_code = LocationOperand::cast(source)->register_code(); int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFloatRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code(); int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src_code); __ VmovExtended(dst_code, src_code);
} else if (source->IsDoubleRegister()) {
__ Move(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
} else { } else {
DCHECK(destination->IsFloatStackSlot()); __ Move(g.ToSimd128Register(destination), g.ToSimd128Register(source));
__ VmovExtended(g.ToMemOperand(destination), src_code);
} }
return;
case MoveType::kRegisterToStack: {
MemOperand dst = g.ToMemOperand(destination);
if (source->IsRegister()) {
__ str(g.ToRegister(source), dst);
} else if (source->IsFloatRegister()) {
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int src_code = LocationOperand::cast(source)->register_code();
__ VmovExtended(dst, src_code);
} else if (source->IsDoubleRegister()) {
__ vstr(g.ToDoubleRegister(source), dst);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
QwNeonRegister src = g.ToSimd128Register(source); QwNeonRegister src = g.ToSimd128Register(source);
if (destination->IsSimd128Register()) {
QwNeonRegister dst = g.ToSimd128Register(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsSimd128StackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ add(kScratchReg, dst.rn(), Operand(dst.offset())); __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
__ vst1(Neon8, NeonListOperand(src.low(), 2), __ vst1(Neon8, NeonListOperand(src.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
} }
return;
} }
} else if (source->IsFPStackSlot()) { case MoveType::kStackToRegister: {
MemOperand src = g.ToMemOperand(source); MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep = if (source->IsStackSlot()) {
LocationOperand::cast(destination)->representation(); __ ldr(g.ToRegister(destination), src);
if (destination->IsFPRegister()) { } else if (source->IsFloatStackSlot()) {
if (rep == MachineRepresentation::kFloat64) { DCHECK(destination->IsFloatRegister());
__ vldr(g.ToDoubleRegister(destination), src);
} else if (rep == MachineRepresentation::kFloat32) {
// GapResolver may give us reg codes that don't map to actual // GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases. // s-registers. Generate code to work around those cases.
int dst_code = LocationOperand::cast(destination)->register_code(); int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src); __ VmovExtended(dst_code, src);
} else if (source->IsDoubleStackSlot()) {
__ vldr(g.ToDoubleRegister(destination), src);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
QwNeonRegister dst = g.ToSimd128Register(destination); QwNeonRegister dst = g.ToSimd128Register(destination);
__ add(kScratchReg, src.rn(), Operand(src.offset())); __ add(kScratchReg, src.rn(), Operand(src.offset()));
__ vld1(Neon8, NeonListOperand(dst.low(), 2), __ vld1(Neon8, NeonListOperand(dst.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
} }
} else { return;
DCHECK(destination->IsFPStackSlot()); }
if (rep == MachineRepresentation::kFloat64) { case MoveType::kStackToStack: {
DwVfpRegister temp = kScratchDoubleReg; MemOperand src = g.ToMemOperand(source);
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
} else if (rep == MachineRepresentation::kFloat32) {
SwVfpRegister temp = kScratchDoubleReg.low();
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
MemOperand dst = g.ToMemOperand(destination); MemOperand dst = g.ToMemOperand(destination);
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
__ ldr(kScratchReg, src);
__ str(kScratchReg, dst);
} else if (source->IsDoubleStackSlot()) {
__ vldr(kScratchDoubleReg, src);
__ vstr(kScratchDoubleReg, dst);
} else {
DCHECK(source->IsSimd128StackSlot());
__ add(kScratchReg, src.rn(), Operand(src.offset())); __ add(kScratchReg, src.rn(), Operand(src.offset()));
__ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2), __ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
...@@ -3082,81 +3025,83 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -3082,81 +3025,83 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2), __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
} }
return;
} }
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
MoveConstantToRegister(g.ToRegister(destination), src);
} else if (destination->IsFloatRegister()) {
__ vmov(g.ToFloatRegister(destination),
Float32::FromBits(src.ToFloat32AsInt()));
} else { } else {
UNREACHABLE(); __ vmov(g.ToDoubleRegister(destination), src.ToFloat64(), kScratchReg);
} }
return;
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination);
if (destination->IsStackSlot()) {
MoveConstantToRegister(kScratchReg, src);
__ str(kScratchReg, dst);
} else if (destination->IsFloatStackSlot()) {
__ mov(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(kScratchReg, dst);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ vmov(kScratchDoubleReg, src.ToFloat64(), kScratchReg);
__ vstr(kScratchDoubleReg, g.ToMemOperand(destination));
}
return;
}
}
UNREACHABLE();
} }
void CodeGenerator::AssembleSwap(InstructionOperand* source, void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr); ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all switch (MoveType::InferSwap(source, destination)) {
// combinations are possible. case MoveType::kRegisterToRegister:
if (source->IsRegister()) { if (source->IsRegister()) {
// Register-register. __ Swap(g.ToRegister(source), g.ToRegister(destination));
Register temp = kScratchReg; } else if (source->IsFloatRegister()) {
Register src = g.ToRegister(source); DCHECK(destination->IsFloatRegister());
if (destination->IsRegister()) { // GapResolver may give us reg codes that don't map to actual
Register dst = g.ToRegister(destination); // s-registers. Generate code to work around those cases.
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ mov(temp, src);
__ ldr(src, dst);
__ str(temp, dst);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
Register temp_0 = kScratchReg;
SwVfpRegister temp_1 = kScratchDoubleReg.low();
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
__ ldr(temp_0, src);
__ vldr(temp_1, dst);
__ str(temp_0, dst);
__ vstr(temp_1, src);
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
LowDwVfpRegister temp = kScratchDoubleReg; LowDwVfpRegister temp = kScratchDoubleReg;
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Swap(src, dst);
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ vldr(src, dst);
__ vstr(temp, dst);
}
} else if (rep == MachineRepresentation::kFloat32) {
int src_code = LocationOperand::cast(source)->register_code(); int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFPRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code(); int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(temp.low().code(), src_code); __ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst_code); __ VmovExtended(src_code, dst_code);
__ VmovExtended(dst_code, temp.low().code()); __ VmovExtended(dst_code, temp.low().code());
} else if (source->IsDoubleRegister()) {
__ Swap(g.ToDoubleRegister(source), g.ToDoubleRegister(destination));
} else { } else {
DCHECK(destination->IsFPStackSlot()); __ Swap(g.ToSimd128Register(source), g.ToSimd128Register(destination));
}
return;
case MoveType::kRegisterToStack: {
MemOperand dst = g.ToMemOperand(destination); MemOperand dst = g.ToMemOperand(destination);
if (source->IsRegister()) {
Register src = g.ToRegister(source);
__ mov(kScratchReg, src);
__ ldr(src, dst);
__ str(kScratchReg, dst);
} else if (source->IsFloatRegister()) {
int src_code = LocationOperand::cast(source)->register_code();
LowDwVfpRegister temp = kScratchDoubleReg;
__ VmovExtended(temp.low().code(), src_code); __ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst); __ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst); __ vstr(temp.low(), dst);
} } else if (source->IsDoubleRegister()) {
DwVfpRegister src = g.ToDoubleRegister(source);
__ Move(kScratchDoubleReg, src);
__ vldr(src, dst);
__ vstr(kScratchDoubleReg, dst);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
QwNeonRegister src = g.ToSimd128Register(source); QwNeonRegister src = g.ToSimd128Register(source);
if (destination->IsFPRegister()) {
QwNeonRegister dst = g.ToSimd128Register(destination);
__ Swap(src, dst);
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(kScratchQuadReg, src); __ Move(kScratchQuadReg, src);
__ add(kScratchReg, dst.rn(), Operand(dst.offset())); __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
__ vld1(Neon8, NeonListOperand(src.low(), 2), __ vld1(Neon8, NeonListOperand(src.low(), 2),
...@@ -3164,32 +3109,35 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3164,32 +3109,35 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2), __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg)); NeonMemOperand(kScratchReg));
} }
return;
} }
} else if (source->IsFPStackSlot()) { case MoveType::kStackToStack: {
DCHECK(destination->IsFPStackSlot()); MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
Register temp_0 = kScratchReg;
SwVfpRegister temp_1 = kScratchDoubleReg.low();
__ ldr(temp_0, src);
__ vldr(temp_1, dst);
__ str(temp_0, dst);
__ vstr(temp_1, src);
} else if (source->IsDoubleStackSlot()) {
Register temp_0 = kScratchReg; Register temp_0 = kScratchReg;
LowDwVfpRegister temp_1 = kScratchDoubleReg; LowDwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source); // Save destination in temp_1.
MemOperand dst0 = g.ToMemOperand(destination); __ vldr(temp_1, dst);
MachineRepresentation rep = LocationOperand::cast(source)->representation(); // Then use temp_0 to copy source to destination.
if (rep == MachineRepresentation::kFloat64) { __ ldr(temp_0, src);
MemOperand src1(src0.rn(), src0.offset() + kPointerSize); __ str(temp_0, dst);
MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize); __ ldr(temp_0, MemOperand(src.rn(), src.offset() + kPointerSize));
__ vldr(temp_1, dst0); // Save destination in temp_1. __ str(temp_0, MemOperand(dst.rn(), dst.offset() + kPointerSize));
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination. __ vstr(temp_1, src);
__ str(temp_0, dst0);
__ ldr(temp_0, src1);
__ str(temp_0, dst1);
__ vstr(temp_1, src0);
} else if (rep == MachineRepresentation::kFloat32) {
__ vldr(temp_1.low(), dst0); // Save destination in temp_1.
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
__ str(temp_0, dst0);
__ vstr(temp_1.low(), src0);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); DCHECK(source->IsSimd128StackSlot());
MemOperand src1(src0.rn(), src0.offset() + kDoubleSize); MemOperand src0 = src;
MemOperand dst1(dst0.rn(), dst0.offset() + kDoubleSize); MemOperand dst0 = dst;
MemOperand src1(src.rn(), src.offset() + kDoubleSize);
MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
__ vldr(kScratchQuadReg.low(), dst0); __ vldr(kScratchQuadReg.low(), dst0);
__ vldr(kScratchQuadReg.high(), src0); __ vldr(kScratchQuadReg.high(), src0);
__ vstr(kScratchQuadReg.low(), src0); __ vstr(kScratchQuadReg.low(), src0);
...@@ -3199,9 +3147,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3199,9 +3147,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(kScratchQuadReg.low(), src1); __ vstr(kScratchQuadReg.low(), src1);
__ vstr(kScratchQuadReg.high(), dst1); __ vstr(kScratchQuadReg.high(), dst1);
} }
} else { return;
// No other combinations are possible. }
default:
UNREACHABLE(); UNREACHABLE();
break;
} }
} }
......
...@@ -2486,33 +2486,8 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); } ...@@ -2486,33 +2486,8 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source, void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr); Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Helper function to write the given constant to the dst register.
// combinations are possible. auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
__ Mov(g.ToRegister(destination), src);
} else {
__ Str(src, g.ToMemOperand(destination, tasm()));
}
} else if (source->IsStackSlot()) {
MemOperand src = g.ToMemOperand(source, tasm());
DCHECK(destination->IsRegister() || destination->IsStackSlot());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else {
UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
__ Str(temp, g.ToMemOperand(destination, tasm()));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(ConstantOperand::cast(source));
if (destination->IsRegister() || destination->IsStackSlot()) {
UseScratchRegisterScope scope(tasm());
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: scope.AcquireX();
if (src.type() == Constant::kHeapObject) { if (src.type() == Constant::kHeapObject) {
Handle<HeapObject> src_object = src.ToHeapObject(); Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index; Heap::RootListIndex index;
...@@ -2524,115 +2499,166 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -2524,115 +2499,166 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else { } else {
__ Mov(dst, g.ToImmediate(source)); __ Mov(dst, g.ToImmediate(source));
} }
if (destination->IsStackSlot()) { };
__ Str(dst, g.ToMemOperand(destination, tasm())); switch (MoveType::InferMove(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) {
__ Mov(g.ToRegister(destination), g.ToRegister(source));
} else if (source->IsFloatRegister() || source->IsDoubleRegister()) {
__ Mov(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
} else {
DCHECK(source->IsSimd128Register());
__ Mov(g.ToDoubleRegister(destination).Q(),
g.ToDoubleRegister(source).Q());
} }
} else if (src.type() == Constant::kFloat32) { return;
if (destination->IsFPRegister()) { case MoveType::kRegisterToStack: {
VRegister dst = g.ToDoubleRegister(destination).S(); MemOperand dst = g.ToMemOperand(destination, tasm());
__ Fmov(dst, src.ToFloat32()); if (source->IsRegister()) {
__ Str(g.ToRegister(source), dst);
} else { } else {
DCHECK(destination->IsFPStackSlot()); VRegister src = g.ToDoubleRegister(source);
if (bit_cast<int32_t>(src.ToFloat32()) == 0) { if (source->IsFloatRegister() || source->IsDoubleRegister()) {
__ Str(wzr, g.ToMemOperand(destination, tasm())); __ Str(src, dst);
} else { } else {
UseScratchRegisterScope scope(tasm()); DCHECK(source->IsSimd128Register());
VRegister temp = scope.AcquireS(); __ Str(src.Q(), dst);
__ Fmov(temp, src.ToFloat32());
__ Str(temp, g.ToMemOperand(destination, tasm()));
} }
} }
return;
}
case MoveType::kStackToRegister: {
MemOperand src = g.ToMemOperand(source, tasm());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else { } else {
DCHECK_EQ(Constant::kFloat64, src.type());
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination); VRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src.ToFloat64().value()); if (destination->IsFloatRegister() || destination->IsDoubleRegister()) {
} else { __ Ldr(dst, src);
DCHECK(destination->IsFPStackSlot());
if (src.ToFloat64().AsUint64() == 0) {
__ Str(xzr, g.ToMemOperand(destination, tasm()));
} else { } else {
UseScratchRegisterScope scope(tasm()); DCHECK(destination->IsSimd128Register());
VRegister temp = scope.AcquireD(); __ Ldr(dst.Q(), src);
__ Fmov(temp, src.ToFloat64().value());
__ Str(temp, g.ToMemOperand(destination, tasm()));
} }
} }
return;
} }
} else if (source->IsFPRegister()) { case MoveType::kStackToStack: {
VRegister src = g.ToDoubleRegister(source); MemOperand src = g.ToMemOperand(source, tasm());
if (destination->IsFPRegister()) { MemOperand dst = g.ToMemOperand(destination, tasm());
VRegister dst = g.ToDoubleRegister(destination); if (source->IsSimd128StackSlot()) {
if (destination->IsSimd128Register()) { UseScratchRegisterScope scope(tasm());
__ Mov(dst.Q(), src.Q()); VRegister temp = scope.AcquireQ();
__ Ldr(temp, src);
__ Str(temp, dst);
} else { } else {
__ Mov(dst, src); UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
__ Str(temp, dst);
}
return;
} }
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
MoveConstantToRegister(g.ToRegister(destination), src);
} else { } else {
DCHECK(destination->IsFPStackSlot()); VRegister dst = g.ToDoubleRegister(destination);
MemOperand dst = g.ToMemOperand(destination, tasm()); if (destination->IsFloatRegister()) {
if (destination->IsSimd128StackSlot()) { __ Fmov(dst.S(), src.ToFloat32());
__ Str(src.Q(), dst);
} else { } else {
__ Str(src, dst); DCHECK(destination->IsDoubleRegister());
__ Fmov(dst, src.ToFloat64().value());
} }
} }
} else if (source->IsFPStackSlot()) { return;
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); }
MemOperand src = g.ToMemOperand(source, tasm()); case MoveType::kConstantToStack: {
if (destination->IsFPRegister()) { Constant src = g.ToConstant(source);
VRegister dst = g.ToDoubleRegister(destination); MemOperand dst = g.ToMemOperand(destination, tasm());
if (destination->IsSimd128Register()) { if (destination->IsStackSlot()) {
__ Ldr(dst.Q(), src); UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
MoveConstantToRegister(temp, src);
__ Str(temp, dst);
} else if (destination->IsFloatStackSlot()) {
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ Str(wzr, dst);
} else { } else {
__ Ldr(dst, src); UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
__ Str(temp, dst);
} }
} else {
DCHECK(destination->IsDoubleStackSlot());
if (src.ToFloat64().AsUint64() == 0) {
__ Str(xzr, dst);
} else { } else {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireD(); VRegister temp = scope.AcquireD();
MemOperand dst = g.ToMemOperand(destination, tasm()); __ Fmov(temp, src.ToFloat64().value());
if (destination->IsSimd128StackSlot()) {
__ Ldr(temp.Q(), src);
__ Str(temp.Q(), dst);
} else {
__ Ldr(temp, src);
__ Str(temp, dst); __ Str(temp, dst);
} }
} }
} else { return;
UNREACHABLE();
} }
}
UNREACHABLE();
} }
void CodeGenerator::AssembleSwap(InstructionOperand* source, void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr); Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all switch (MoveType::InferSwap(source, destination)) {
// combinations are possible. case MoveType::kRegisterToRegister:
if (source->IsRegister()) { if (source->IsRegister()) {
// Register-register. __ Swap(g.ToRegister(source), g.ToRegister(destination));
} else {
VRegister src = g.ToDoubleRegister(source);
VRegister dst = g.ToDoubleRegister(destination);
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
__ Swap(src, dst);
} else {
DCHECK(source->IsSimd128Register());
__ Swap(src.Q(), dst.Q());
}
}
return;
case MoveType::kRegisterToStack: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
if (source->IsRegister()) {
Register temp = scope.AcquireX(); Register temp = scope.AcquireX();
Register src = g.ToRegister(source); Register src = g.ToRegister(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Mov(temp, src); __ Mov(temp, src);
__ Mov(src, dst); __ Ldr(src, dst);
__ Mov(dst, temp); __ Str(temp, dst);
} else { } else {
DCHECK(destination->IsStackSlot()); UseScratchRegisterScope scope(tasm());
MemOperand dst = g.ToMemOperand(destination, tasm()); VRegister src = g.ToDoubleRegister(source);
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
VRegister temp = scope.AcquireD();
__ Mov(temp, src); __ Mov(temp, src);
__ Ldr(src, dst); __ Ldr(src, dst);
__ Str(temp, dst); __ Str(temp, dst);
} else {
DCHECK(source->IsSimd128Register());
VRegister temp = scope.AcquireQ();
__ Mov(temp, src.Q());
__ Ldr(src.Q(), dst);
__ Str(temp, dst);
} }
} else if (source->IsStackSlot() || source->IsFPStackSlot()) { }
return;
}
case MoveType::kStackToStack: {
UseScratchRegisterScope scope(tasm()); UseScratchRegisterScope scope(tasm());
VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD();
MemOperand src = g.ToMemOperand(source, tasm()); MemOperand src = g.ToMemOperand(source, tasm());
MemOperand dst = g.ToMemOperand(destination, tasm()); MemOperand dst = g.ToMemOperand(destination, tasm());
VRegister temp_0 = scope.AcquireD();
VRegister temp_1 = scope.AcquireD();
if (source->IsSimd128StackSlot()) { if (source->IsSimd128StackSlot()) {
__ Ldr(temp_0.Q(), src); __ Ldr(temp_0.Q(), src);
__ Ldr(temp_1.Q(), dst); __ Ldr(temp_1.Q(), dst);
...@@ -2644,37 +2670,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -2644,37 +2670,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Str(temp_0, dst); __ Str(temp_0, dst);
__ Str(temp_1, src); __ Str(temp_1, src);
} }
} else if (source->IsFPRegister()) { return;
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireD();
VRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
VRegister dst = g.ToDoubleRegister(destination);
if (source->IsSimd128Register()) {
__ Mov(temp.Q(), src.Q());
__ Mov(src.Q(), dst.Q());
__ Mov(dst.Q(), temp.Q());
} else {
__ Mov(temp, src);
__ Mov(src, dst);
__ Mov(dst, temp);
}
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination, tasm());
if (source->IsSimd128Register()) {
__ Mov(temp.Q(), src.Q());
__ Ldr(src.Q(), dst);
__ Str(temp.Q(), dst);
} else {
__ Mov(temp, src);
__ Ldr(src, dst);
__ Str(temp, dst);
}
} }
} else { default:
// No other combinations are possible.
UNREACHABLE(); UNREACHABLE();
break;
} }
} }
......
...@@ -488,6 +488,54 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr, ...@@ -488,6 +488,54 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
pushes->resize(push_count); pushes->resize(push_count);
} }
CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferMove(
InstructionOperand* source, InstructionOperand* destination) {
if (source->IsConstant()) {
if (destination->IsAnyRegister()) {
return MoveType::kConstantToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kConstantToStack;
}
}
DCHECK(LocationOperand::cast(source)->IsCompatible(
LocationOperand::cast(destination)));
if (source->IsAnyRegister()) {
if (destination->IsAnyRegister()) {
return MoveType::kRegisterToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kRegisterToStack;
}
} else {
DCHECK(source->IsAnyStackSlot());
if (destination->IsAnyRegister()) {
return MoveType::kStackToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kStackToStack;
}
}
}
CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferSwap(
InstructionOperand* source, InstructionOperand* destination) {
DCHECK(LocationOperand::cast(source)->IsCompatible(
LocationOperand::cast(destination)));
if (source->IsAnyRegister()) {
if (destination->IsAnyRegister()) {
return MoveType::kRegisterToRegister;
} else {
DCHECK(destination->IsAnyStackSlot());
return MoveType::kRegisterToStack;
}
} else {
DCHECK(source->IsAnyStackSlot());
DCHECK(destination->IsAnyStackSlot());
return MoveType::kStackToStack;
}
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
Instruction* instr, const InstructionBlock* block) { Instruction* instr, const InstructionBlock* block) {
int first_unused_stack_slot; int first_unused_stack_slot;
......
...@@ -224,6 +224,26 @@ class CodeGenerator final : public GapResolver::Assembler { ...@@ -224,6 +224,26 @@ class CodeGenerator final : public GapResolver::Assembler {
PushTypeFlags push_type, PushTypeFlags push_type,
ZoneVector<MoveOperands*>* pushes); ZoneVector<MoveOperands*>* pushes);
class MoveType {
public:
enum Type {
kRegisterToRegister,
kRegisterToStack,
kStackToRegister,
kStackToStack,
kConstantToRegister,
kConstantToStack
};
// Detect what type of move or swap needs to be performed. Note that these
// functions do not take into account the representation (Tagged, FP,
// ...etc).
static Type InferMove(InstructionOperand* source,
InstructionOperand* destination);
static Type InferSwap(InstructionOperand* source,
InstructionOperand* destination);
};
// Called before a tail call |instr|'s gap moves are assembled and allows // Called before a tail call |instr|'s gap moves are assembled and allows
// gap-specific pre-processing, e.g. adjustment of the sp for tail calls that // gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
// need it before gap moves or conversion of certain gap moves into pushes. // need it before gap moves or conversion of certain gap moves into pushes.
......
...@@ -3510,119 +3510,129 @@ void CodeGenerator::FinishCode() {} ...@@ -3510,119 +3510,129 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source, void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
IA32OperandConverter g(this, nullptr); IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Dispatch on the source and destination operand kinds.
// combinations are possible. switch (MoveType::InferMove(source, destination)) {
case MoveType::kRegisterToRegister:
if (source->IsRegister()) { if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot()); __ mov(g.ToRegister(destination), g.ToRegister(source));
Register src = g.ToRegister(source);
Operand dst = g.ToOperand(destination);
__ mov(dst, src);
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Operand src = g.ToOperand(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ mov(dst, src);
} else {
Operand dst = g.ToOperand(destination);
__ push(src);
__ pop(dst);
}
} else if (source->IsConstant()) {
Constant src_constant = g.ToConstant(source);
if (src_constant.type() == Constant::kHeapObject) {
Handle<HeapObject> src = src_constant.ToHeapObject();
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Move(dst, src);
} else { } else {
DCHECK(destination->IsStackSlot()); DCHECK(source->IsFPRegister());
Operand dst = g.ToOperand(destination); __ movaps(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
__ mov(dst, src);
} }
} else if (destination->IsRegister()) { return;
Register dst = g.ToRegister(destination); case MoveType::kRegisterToStack: {
__ Move(dst, g.ToImmediate(source));
} else if (destination->IsStackSlot()) {
Operand dst = g.ToOperand(destination);
__ Move(dst, g.ToImmediate(source));
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src = src_constant.ToFloat32AsInt();
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination); Operand dst = g.ToOperand(destination);
__ Move(dst, Immediate(src)); if (source->IsRegister()) {
} __ mov(dst, g.ToRegister(source));
} else {
DCHECK_EQ(Constant::kFloat64, src_constant.type());
uint64_t src = src_constant.ToFloat64().AsUint64();
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else { } else {
DCHECK(destination->IsFPStackSlot()); DCHECK(source->IsFPRegister());
Operand dst0 = g.ToOperand(destination);
Operand dst1 = g.ToOperand(destination, kPointerSize);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
} else if (source->IsFPRegister()) {
XMMRegister src = g.ToDoubleRegister(source); XMMRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
MachineRepresentation rep = MachineRepresentation rep =
LocationOperand::cast(source)->representation(); LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) { if (rep == MachineRepresentation::kFloat32) {
__ movsd(dst, src);
} else if (rep == MachineRepresentation::kFloat32) {
__ movss(dst, src); __ movss(dst, src);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(dst, src);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(dst, src); __ movups(dst, src);
} }
} }
} else if (source->IsFPStackSlot()) { return;
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); }
case MoveType::kStackToRegister: {
Operand src = g.ToOperand(source); Operand src = g.ToOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (source->IsStackSlot()) {
if (destination->IsFPRegister()) { __ mov(g.ToRegister(destination), src);
} else {
DCHECK(source->IsFPStackSlot());
XMMRegister dst = g.ToDoubleRegister(destination); XMMRegister dst = g.ToDoubleRegister(destination);
if (rep == MachineRepresentation::kFloat64) { MachineRepresentation rep =
__ movsd(dst, src); LocationOperand::cast(source)->representation();
} else if (rep == MachineRepresentation::kFloat32) { if (rep == MachineRepresentation::kFloat32) {
__ movss(dst, src); __ movss(dst, src);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(dst, src);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(dst, src); __ movups(dst, src);
} }
} else { }
return;
}
case MoveType::kStackToStack: {
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination); Operand dst = g.ToOperand(destination);
if (rep == MachineRepresentation::kFloat64) { if (source->IsStackSlot()) {
__ movsd(kScratchDoubleReg, src); __ push(src);
__ movsd(dst, kScratchDoubleReg); __ pop(dst);
} else if (rep == MachineRepresentation::kFloat32) { } else {
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, src); __ movss(kScratchDoubleReg, src);
__ movss(dst, kScratchDoubleReg); __ movss(dst, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, src);
__ movsd(dst, kScratchDoubleReg);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, src); __ movups(kScratchDoubleReg, src);
__ movups(dst, kScratchDoubleReg); __ movups(dst, kScratchDoubleReg);
} }
} }
return;
}
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
if (src.type() == Constant::kHeapObject) {
__ Move(dst, src.ToHeapObject());
} else { } else {
UNREACHABLE(); __ Move(dst, g.ToImmediate(source));
} }
} else {
DCHECK(destination->IsFPRegister());
XMMRegister dst = g.ToDoubleRegister(destination);
if (src.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
__ Move(dst, src.ToFloat32AsInt());
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
__ Move(dst, src.ToFloat64().AsUint64());
}
}
return;
}
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
if (destination->IsStackSlot()) {
if (src.type() == Constant::kHeapObject) {
__ mov(dst, src.ToHeapObject());
} else {
__ Move(dst, g.ToImmediate(source));
}
} else {
DCHECK(destination->IsFPStackSlot());
if (src.type() == Constant::kFloat32) {
__ Move(dst, Immediate(src.ToFloat32AsInt()));
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
uint64_t constant_value = src.ToFloat64().AsUint64();
uint32_t lower = static_cast<uint32_t>(constant_value);
uint32_t upper = static_cast<uint32_t>(constant_value >> 32);
Operand dst0 = dst;
Operand dst1 = g.ToOperand(destination, kPointerSize);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
return;
}
}
UNREACHABLE();
} }
...@@ -3631,15 +3641,26 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3631,15 +3641,26 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
IA32OperandConverter g(this, nullptr); IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Dispatch on the source and destination operand kinds. Not all
// combinations are possible. // combinations are possible.
if (source->IsRegister() && destination->IsRegister()) { switch (MoveType::InferSwap(source, destination)) {
// Register-register. case MoveType::kRegisterToRegister: {
if (source->IsRegister()) {
Register src = g.ToRegister(source); Register src = g.ToRegister(source);
Register dst = g.ToRegister(destination); Register dst = g.ToRegister(destination);
__ push(src); __ push(src);
__ mov(src, dst); __ mov(src, dst);
__ pop(dst); __ pop(dst);
} else if (source->IsRegister() && destination->IsStackSlot()) { } else {
// Register-memory. DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(kScratchDoubleReg, src);
__ movaps(src, dst);
__ movaps(dst, kScratchDoubleReg);
}
return;
}
case MoveType::kRegisterToStack: {
if (source->IsRegister()) {
Register src = g.ToRegister(source); Register src = g.ToRegister(source);
__ push(src); __ push(src);
frame_access_state()->IncreaseSPDelta(1); frame_access_state()->IncreaseSPDelta(1);
...@@ -3648,8 +3669,31 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3648,8 +3669,31 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
frame_access_state()->IncreaseSPDelta(-1); frame_access_state()->IncreaseSPDelta(-1);
dst = g.ToOperand(destination); dst = g.ToOperand(destination);
__ pop(dst); __ pop(dst);
} else if (source->IsStackSlot() && destination->IsStackSlot()) { } else {
// Memory-memory. DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst);
__ movss(dst, src);
__ movaps(src, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, dst);
__ movsd(dst, src);
__ movaps(src, kScratchDoubleReg);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, dst);
__ movups(dst, src);
__ movups(src, kScratchDoubleReg);
}
}
return;
}
case MoveType::kStackToStack: {
if (source->IsStackSlot()) {
Operand dst1 = g.ToOperand(destination); Operand dst1 = g.ToOperand(destination);
__ push(dst1); __ push(dst1);
frame_access_state()->IncreaseSPDelta(1); frame_access_state()->IncreaseSPDelta(1);
...@@ -3660,49 +3704,24 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3660,49 +3704,24 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
frame_access_state()->IncreaseSPDelta(-1); frame_access_state()->IncreaseSPDelta(-1);
Operand src2 = g.ToOperand(source); Operand src2 = g.ToOperand(source);
__ pop(src2); __ pop(src2);
} else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(kScratchDoubleReg, src);
__ movaps(src, dst);
__ movaps(dst, kScratchDoubleReg);
} else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap.
XMMRegister reg = g.ToDoubleRegister(source);
Operand other = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, other);
__ movsd(other, reg);
__ movaps(reg, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, other);
__ movss(other, reg);
__ movaps(reg, kScratchDoubleReg);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); DCHECK(source->IsFPStackSlot());
__ movups(kScratchDoubleReg, other);
__ movups(other, reg);
__ movups(reg, kScratchDoubleReg);
}
} else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
// Double-width memory-to-memory.
Operand src0 = g.ToOperand(source); Operand src0 = g.ToOperand(source);
Operand dst0 = g.ToOperand(destination); Operand dst0 = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation(); MachineRepresentation rep =
if (rep == MachineRepresentation::kFloat64) { LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ movss(src0, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat64) {
__ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register. __ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination. __ push(src0); // Then use stack to copy src to destination.
__ pop(dst0); __ pop(dst0);
__ push(g.ToOperand(source, kPointerSize)); __ push(g.ToOperand(source, kPointerSize));
__ pop(g.ToOperand(destination, kPointerSize)); __ pop(g.ToOperand(destination, kPointerSize));
__ movsd(src0, kScratchDoubleReg); __ movsd(src0, kScratchDoubleReg);
} else if (rep == MachineRepresentation::kFloat32) {
__ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
__ push(src0); // Then use stack to copy src to destination.
__ pop(dst0);
__ movss(src0, kScratchDoubleReg);
} else { } else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep); DCHECK_EQ(MachineRepresentation::kSimd128, rep);
__ movups(kScratchDoubleReg, dst0); // Save dst in scratch register. __ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
...@@ -3716,9 +3735,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3716,9 +3735,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ pop(g.ToOperand(destination, 3 * kPointerSize)); __ pop(g.ToOperand(destination, 3 * kPointerSize));
__ movups(src0, kScratchDoubleReg); __ movups(src0, kScratchDoubleReg);
} }
} else { }
// No other combinations are possible. return;
}
default:
UNREACHABLE(); UNREACHABLE();
break;
} }
} }
......
...@@ -96,6 +96,26 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const { ...@@ -96,6 +96,26 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
return false; return false;
} }
bool LocationOperand::IsCompatible(LocationOperand* op) {
if (IsRegister() || IsStackSlot()) {
return op->IsRegister() || op->IsStackSlot();
} else if (kSimpleFPAliasing) {
// A backend may choose to generate the same instruction sequence regardless
// of the FP representation. As a result, we can relax the compatibility and
// allow a Double to be moved in a Float for example. However, this is only
// allowed if registers do not overlap.
return (IsFPRegister() || IsFPStackSlot()) &&
(op->IsFPRegister() || op->IsFPStackSlot());
} else if (IsFloatRegister() || IsFloatStackSlot()) {
return op->IsFloatRegister() || op->IsFloatStackSlot();
} else if (IsDoubleRegister() || IsDoubleStackSlot()) {
return op->IsDoubleRegister() || op->IsDoubleStackSlot();
} else {
return (IsSimd128Register() || IsSimd128StackSlot()) &&
(op->IsSimd128Register() || op->IsSimd128StackSlot());
}
}
void InstructionOperand::Print(const RegisterConfiguration* config) const { void InstructionOperand::Print(const RegisterConfiguration* config) const {
OFStream os(stdout); OFStream os(stdout);
PrintableInstructionOperand wrapper; PrintableInstructionOperand wrapper;
......
...@@ -491,6 +491,9 @@ class LocationOperand : public InstructionOperand { ...@@ -491,6 +491,9 @@ class LocationOperand : public InstructionOperand {
UNREACHABLE(); UNREACHABLE();
} }
// Return true if the locations can be moved to one another.
bool IsCompatible(LocationOperand* op);
static LocationOperand* cast(InstructionOperand* op) { static LocationOperand* cast(InstructionOperand* op) {
DCHECK(op->IsAnyLocationOperand()); DCHECK(op->IsAnyLocationOperand());
return static_cast<LocationOperand*>(op); return static_cast<LocationOperand*>(op);
......
...@@ -3092,36 +3092,8 @@ void CodeGenerator::FinishCode() {} ...@@ -3092,36 +3092,8 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source, void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
X64OperandConverter g(this, nullptr); X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Helper function to write the given constant to the dst register.
// combinations are possible. auto MoveConstantToRegister = [&](Register dst, Constant src) {
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
__ movq(g.ToRegister(destination), src);
} else {
__ movq(g.ToOperand(destination), src);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Operand src = g.ToOperand(source);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ movq(dst, src);
} else {
// Spill on demand to use a temporary register for memory-to-memory
// moves.
Register tmp = kScratchRegister;
Operand dst = g.ToOperand(destination);
__ movq(tmp, src);
__ movq(dst, tmp);
}
} else if (source->IsConstant()) {
ConstantOperand* constant_source = ConstantOperand::cast(source);
Constant src = g.ToConstant(constant_source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: kScratchRegister;
switch (src.type()) { switch (src.type()) {
case Constant::kInt32: { case Constant::kInt32: {
if (RelocInfo::IsWasmPtrReference(src.rmode())) { if (RelocInfo::IsWasmPtrReference(src.rmode())) {
...@@ -3169,38 +3141,24 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -3169,38 +3141,24 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE(); // TODO(dcarney): load of labels on x64. UNREACHABLE(); // TODO(dcarney): load of labels on x64.
break; break;
} }
if (destination->IsStackSlot()) { };
__ movq(g.ToOperand(destination), kScratchRegister); // Dispatch on the source and destination operand kinds.
} switch (MoveType::InferMove(source, destination)) {
} else if (src.type() == Constant::kFloat32) { case MoveType::kRegisterToRegister:
// TODO(turbofan): Can we do better here? if (source->IsRegister()) {
uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32()); __ movq(g.ToRegister(destination), g.ToRegister(source));
if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
} else { } else {
DCHECK(destination->IsFPStackSlot()); DCHECK(source->IsFPRegister());
Operand dst = g.ToOperand(destination); __ Movapd(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
__ movl(dst, Immediate(src_const));
} }
return;
case MoveType::kRegisterToStack: {
Operand dst = g.ToOperand(destination);
if (source->IsRegister()) {
__ movq(dst, g.ToRegister(source));
} else { } else {
DCHECK_EQ(Constant::kFloat64, src.type()); DCHECK(source->IsFPRegister());
uint64_t src_const = src.ToFloat64().AsUint64();
if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
} else {
DCHECK(destination->IsFPStackSlot());
__ movq(kScratchRegister, src_const);
__ movq(g.ToOperand(destination), kScratchRegister);
}
}
} else if (source->IsFPRegister()) {
XMMRegister src = g.ToDoubleRegister(source); XMMRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
MachineRepresentation rep = MachineRepresentation rep =
LocationOperand::cast(source)->representation(); LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) { if (rep != MachineRepresentation::kSimd128) {
...@@ -3209,30 +3167,84 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -3209,30 +3167,84 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Movups(dst, src); __ Movups(dst, src);
} }
} }
} else if (source->IsFPStackSlot()) { return;
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); }
case MoveType::kStackToRegister: {
Operand src = g.ToOperand(source); Operand src = g.ToOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (source->IsStackSlot()) {
if (destination->IsFPRegister()) { __ movq(g.ToRegister(destination), src);
} else {
DCHECK(source->IsFPStackSlot());
XMMRegister dst = g.ToDoubleRegister(destination); XMMRegister dst = g.ToDoubleRegister(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) { if (rep != MachineRepresentation::kSimd128) {
__ Movsd(dst, src); __ Movsd(dst, src);
} else { } else {
__ Movups(dst, src); __ Movups(dst, src);
} }
} else { }
return;
}
case MoveType::kStackToStack: {
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination); Operand dst = g.ToOperand(destination);
if (source->IsStackSlot()) {
// Spill on demand to use a temporary register for memory-to-memory
// moves.
__ movq(kScratchRegister, src);
__ movq(dst, kScratchRegister);
} else {
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) { if (rep != MachineRepresentation::kSimd128) {
__ Movsd(kScratchDoubleReg, src); __ Movsd(kScratchDoubleReg, src);
__ Movsd(dst, kScratchDoubleReg); __ Movsd(dst, kScratchDoubleReg);
} else { } else {
DCHECK(source->IsSimd128StackSlot());
__ Movups(kScratchDoubleReg, src); __ Movups(kScratchDoubleReg, src);
__ Movups(dst, kScratchDoubleReg); __ Movups(dst, kScratchDoubleReg);
} }
} }
return;
}
case MoveType::kConstantToRegister: {
Constant src = g.ToConstant(source);
if (destination->IsRegister()) {
MoveConstantToRegister(g.ToRegister(destination), src);
} else { } else {
UNREACHABLE(); DCHECK(destination->IsFPRegister());
XMMRegister dst = g.ToDoubleRegister(destination);
if (src.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
__ Move(dst, bit_cast<uint32_t>(src.ToFloat32()));
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
__ Move(dst, src.ToFloat64().AsUint64());
}
}
return;
} }
case MoveType::kConstantToStack: {
Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
if (destination->IsStackSlot()) {
MoveConstantToRegister(kScratchRegister, src);
__ movq(dst, kScratchRegister);
} else {
DCHECK(destination->IsFPStackSlot());
if (src.type() == Constant::kFloat32) {
__ movl(dst, Immediate(bit_cast<uint32_t>(src.ToFloat32())));
} else {
DCHECK_EQ(src.type(), Constant::kFloat64);
__ movq(kScratchRegister, src.ToFloat64().AsUint64());
__ movq(dst, kScratchRegister);
}
}
return;
}
}
UNREACHABLE();
} }
...@@ -3241,14 +3253,26 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3241,14 +3253,26 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
X64OperandConverter g(this, nullptr); X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all // Dispatch on the source and destination operand kinds. Not all
// combinations are possible. // combinations are possible.
if (source->IsRegister() && destination->IsRegister()) { switch (MoveType::InferSwap(source, destination)) {
// Register-register. case MoveType::kRegisterToRegister: {
if (source->IsRegister()) {
Register src = g.ToRegister(source); Register src = g.ToRegister(source);
Register dst = g.ToRegister(destination); Register dst = g.ToRegister(destination);
__ movq(kScratchRegister, src); __ movq(kScratchRegister, src);
__ movq(src, dst); __ movq(src, dst);
__ movq(dst, kScratchRegister); __ movq(dst, kScratchRegister);
} else if (source->IsRegister() && destination->IsStackSlot()) { } else {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(kScratchDoubleReg, src);
__ Movapd(src, dst);
__ Movapd(dst, kScratchDoubleReg);
}
return;
}
case MoveType::kRegisterToStack: {
if (source->IsRegister()) {
Register src = g.ToRegister(source); Register src = g.ToRegister(source);
__ pushq(src); __ pushq(src);
frame_access_state()->IncreaseSPDelta(1); frame_access_state()->IncreaseSPDelta(1);
...@@ -3261,12 +3285,29 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3261,12 +3285,29 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ popq(dst); __ popq(dst);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(), unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kPointerSize); -kPointerSize);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) || } else {
(source->IsFPStackSlot() && destination->IsFPStackSlot())) { DCHECK(source->IsFPRegister());
// Memory-memory. XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(kScratchDoubleReg, src);
__ Movsd(src, dst);
__ Movsd(dst, kScratchDoubleReg);
} else {
__ Movups(kScratchDoubleReg, src);
__ Movups(src, dst);
__ Movups(dst, kScratchDoubleReg);
}
}
return;
}
case MoveType::kStackToStack: {
Operand src = g.ToOperand(source); Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination); Operand dst = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation(); MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) { if (rep != MachineRepresentation::kSimd128) {
Register tmp = kScratchRegister; Register tmp = kScratchRegister;
__ movq(tmp, dst); __ movq(tmp, dst);
...@@ -3295,30 +3336,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -3295,30 +3336,11 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
-kPointerSize); -kPointerSize);
__ movups(src, kScratchDoubleReg); __ movups(src, kScratchDoubleReg);
} }
} else if (source->IsFPRegister() && destination->IsFPRegister()) { return;
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(kScratchDoubleReg, src);
__ Movapd(src, dst);
__ Movapd(dst, kScratchDoubleReg);
} else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap.
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep != MachineRepresentation::kSimd128) {
__ Movsd(kScratchDoubleReg, src);
__ Movsd(src, dst);
__ Movsd(dst, kScratchDoubleReg);
} else {
__ Movups(kScratchDoubleReg, src);
__ Movups(src, dst);
__ Movups(dst, kScratchDoubleReg);
} }
} else { default:
// No other combinations are possible.
UNREACHABLE(); UNREACHABLE();
break;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment