Commit 9eb756fb authored by bbudge's avatar bbudge Committed by Commit bot

[arm] Support float registers for moves and swaps.

Uses float registers s0-s31 for moves and swaps when rep is kFloat32.
Changes bitcast to use float registers.

LOG=N
BUG=v8:4124

Review-Url: https://codereview.chromium.org/2039843003
Cr-Commit-Position: refs/heads/master@{#36791}
parent d84fe421
...@@ -268,6 +268,11 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) { ...@@ -268,6 +268,11 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
} }
} }
void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src) {
if (!dst.is(src)) {
vmov(dst, src);
}
}
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
if (!dst.is(src)) { if (!dst.is(src)) {
...@@ -275,7 +280,6 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { ...@@ -275,7 +280,6 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
} }
} }
void MacroAssembler::Mls(Register dst, Register src1, Register src2, void MacroAssembler::Mls(Register dst, Register src1, Register src2,
Register srcA, Condition cond) { Register srcA, Condition cond) {
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
......
...@@ -170,6 +170,7 @@ class MacroAssembler: public Assembler { ...@@ -170,6 +170,7 @@ class MacroAssembler: public Assembler {
mov(dst, src, sbit, cond); mov(dst, src, sbit, cond);
} }
} }
void Move(SwVfpRegister dst, SwVfpRegister src);
void Move(DwVfpRegister dst, DwVfpRegister src); void Move(DwVfpRegister dst, DwVfpRegister src);
void Load(Register dst, const MemOperand& src, Representation r); void Load(Register dst, const MemOperand& src, Representation r);
......
...@@ -1108,6 +1108,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1108,6 +1108,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
break; break;
} }
case kArmVmovU32F32:
__ vmov(i.OutputRegister(), i.InputFloat32Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovLowU32F64: case kArmVmovLowU32F64:
__ VmovLow(i.OutputRegister(), i.InputFloat64Register(0)); __ VmovLow(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
...@@ -1591,23 +1595,50 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -1591,23 +1595,50 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} }
} }
} else if (source->IsFPRegister()) { } else if (source->IsFPRegister()) {
DwVfpRegister src = g.ToDoubleRegister(source); MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (destination->IsFPRegister()) { if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister dst = g.ToDoubleRegister(destination); DwVfpRegister src = g.ToDoubleRegister(source);
__ Move(dst, src); if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
} else { } else {
DCHECK(destination->IsFPStackSlot()); DCHECK_EQ(MachineRepresentation::kFloat32, rep);
__ vstr(src, g.ToMemOperand(destination)); SwVfpRegister src = g.ToFloat32Register(source);
if (destination->IsFPRegister()) {
SwVfpRegister dst = g.ToFloat32Register(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
} }
} else if (source->IsFPStackSlot()) { } else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source); MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep =
LocationOperand::cast(destination)->representation();
if (destination->IsFPRegister()) { if (destination->IsFPRegister()) {
__ vldr(g.ToDoubleRegister(destination), src); if (rep == MachineRepresentation::kFloat64) {
__ vldr(g.ToDoubleRegister(destination), src);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, rep);
__ vldr(g.ToFloat32Register(destination), src);
}
} else { } else {
DwVfpRegister temp = kScratchDoubleReg; DCHECK(destination->IsFPStackSlot());
__ vldr(temp, src); if (rep == MachineRepresentation::kFloat64) {
__ vstr(temp, g.ToMemOperand(destination)); DwVfpRegister temp = kScratchDoubleReg;
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, rep);
SwVfpRegister temp = kScratchDoubleReg.low();
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
}
} }
} else { } else {
UNREACHABLE(); UNREACHABLE();
...@@ -1647,34 +1678,61 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -1647,34 +1678,61 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ str(temp_0, dst); __ str(temp_0, dst);
__ vstr(temp_1, src); __ vstr(temp_1, src);
} else if (source->IsFPRegister()) { } else if (source->IsFPRegister()) {
DwVfpRegister temp = kScratchDoubleReg; MachineRepresentation rep = LocationOperand::cast(source)->representation();
DwVfpRegister src = g.ToDoubleRegister(source); LowDwVfpRegister temp = kScratchDoubleReg;
if (destination->IsFPRegister()) { if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister dst = g.ToDoubleRegister(destination); DwVfpRegister src = g.ToDoubleRegister(source);
__ Move(temp, src); if (destination->IsFPRegister()) {
__ Move(src, dst); DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, temp); __ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ vldr(src, dst);
__ vstr(temp, dst);
}
} else { } else {
DCHECK(destination->IsFPStackSlot()); DCHECK_EQ(MachineRepresentation::kFloat32, rep);
MemOperand dst = g.ToMemOperand(destination); SwVfpRegister src = g.ToFloat32Register(source);
__ Move(temp, src); if (destination->IsFPRegister()) {
__ vldr(src, dst); SwVfpRegister dst = g.ToFloat32Register(destination);
__ vstr(temp, dst); __ Move(temp.low(), src);
__ Move(src, dst);
__ Move(dst, temp.low());
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp.low(), src);
__ vldr(src, dst);
__ vstr(temp.low(), dst);
}
} }
} else if (source->IsFPStackSlot()) { } else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot()); DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg; Register temp_0 = kScratchReg;
DwVfpRegister temp_1 = kScratchDoubleReg; LowDwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source); MemOperand src0 = g.ToMemOperand(source);
MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
MemOperand dst0 = g.ToMemOperand(destination); MemOperand dst0 = g.ToMemOperand(destination);
MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize); MachineRepresentation rep = LocationOperand::cast(source)->representation();
__ vldr(temp_1, dst0); // Save destination in temp_1. if (rep == MachineRepresentation::kFloat64) {
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination. MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
__ str(temp_0, dst0); MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
__ ldr(temp_0, src1); __ vldr(temp_1, dst0); // Save destination in temp_1.
__ str(temp_0, dst1); __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
__ vstr(temp_1, src0); __ str(temp_0, dst0);
__ ldr(temp_0, src1);
__ str(temp_0, dst1);
__ vstr(temp_1, src0);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, rep);
__ vldr(temp_1.low(), dst0); // Save destination in temp_1.
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
__ str(temp_0, dst0);
__ vstr(temp_1.low(), src0);
}
} else { } else {
// No other combinations are possible. // No other combinations are possible.
UNREACHABLE(); UNREACHABLE();
......
...@@ -93,6 +93,7 @@ namespace compiler { ...@@ -93,6 +93,7 @@ namespace compiler {
V(ArmVcvtU32F32) \ V(ArmVcvtU32F32) \
V(ArmVcvtS32F64) \ V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \ V(ArmVcvtU32F64) \
V(ArmVmovU32F32) \
V(ArmVmovLowU32F64) \ V(ArmVmovLowU32F64) \
V(ArmVmovLowF64U32) \ V(ArmVmovLowF64U32) \
V(ArmVmovHighU32F64) \ V(ArmVmovHighU32F64) \
......
...@@ -95,6 +95,7 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -95,6 +95,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmVcvtU32F32: case kArmVcvtU32F32:
case kArmVcvtS32F64: case kArmVcvtS32F64:
case kArmVcvtU32F64: case kArmVcvtU32F64:
case kArmVmovU32F32:
case kArmVmovLowU32F64: case kArmVmovLowU32F64:
case kArmVmovLowF64U32: case kArmVmovLowF64U32:
case kArmVmovHighU32F64: case kArmVmovHighU32F64:
......
...@@ -1213,7 +1213,7 @@ void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) { ...@@ -1213,7 +1213,7 @@ void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) { void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kArmVmovLowU32F64, node); VisitRR(this, kArmVmovU32F32, node);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment