Commit 381fc4fe authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[arm] Eliminate old form of core-scalar vmov instructions.

- Eliminates less general forms of vmov between core and scalar
  FP registers.

Bug: v8:7754
Change-Id: I3224c57e03dfd64ea6552b215017dacc357dda26
Reviewed-on: https://chromium-review.googlesource.com/1144126
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54663}
parent 7d7b6120
...@@ -2861,14 +2861,14 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm, ...@@ -2861,14 +2861,14 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
} else if (extra_scratch == no_reg) { } else if (extra_scratch == no_reg) {
// We only have one spare scratch register. // We only have one spare scratch register.
mov(scratch, Operand(lo)); mov(scratch, Operand(lo));
vmov(dst, VmovIndexLo, scratch); vmov(NeonS32, dst, 0, scratch);
if (((lo & 0xFFFF) == (hi & 0xFFFF)) && CpuFeatures::IsSupported(ARMv7)) { if (((lo & 0xFFFF) == (hi & 0xFFFF)) && CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7); CpuFeatureScope scope(this, ARMv7);
movt(scratch, hi >> 16); movt(scratch, hi >> 16);
} else { } else {
mov(scratch, Operand(hi)); mov(scratch, Operand(hi));
} }
vmov(dst, VmovIndexHi, scratch); vmov(NeonS32, dst, 1, scratch);
} else { } else {
// Move the low and high parts of the double to a D register in one // Move the low and high parts of the double to a D register in one
// instruction. // instruction.
...@@ -2908,40 +2908,6 @@ void Assembler::vmov(const DwVfpRegister dst, ...@@ -2908,40 +2908,6 @@ void Assembler::vmov(const DwVfpRegister dst,
vm); vm);
} }
void Assembler::vmov(const DwVfpRegister dst,
const VmovIndex index,
const Register src,
const Condition cond) {
// Dd[index] = Rt
// Instruction details available in ARM DDI 0406C.b, A8-940.
// cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
// Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(index.index == 0 || index.index == 1);
int vd, d;
dst.split_code(&vd, &d);
emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
d*B7 | B4);
}
void Assembler::vmov(const Register dst,
const VmovIndex index,
const DwVfpRegister src,
const Condition cond) {
// Dd[index] = Rt
// Instruction details available in ARM DDI 0406C.b, A8.8.342.
// cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
// Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
DCHECK(VfpRegisterIsAvailable(src));
DCHECK(index.index == 0 || index.index == 1);
int vn, n;
src.split_code(&vn, &n);
emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
0xB*B8 | n*B7 | B4);
}
void Assembler::vmov(const DwVfpRegister dst, void Assembler::vmov(const DwVfpRegister dst,
const Register src1, const Register src1,
const Register src2, const Register src2,
......
...@@ -605,13 +605,6 @@ class NeonListOperand BASE_EMBEDDED { ...@@ -605,13 +605,6 @@ class NeonListOperand BASE_EMBEDDED {
int register_count_; int register_count_;
}; };
struct VmovIndex {
unsigned char index;
};
constexpr VmovIndex VmovIndexLo = { 0 };
constexpr VmovIndex VmovIndexHi = { 1 };
class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public: public:
// Create an assembler. Instructions and relocation information are emitted // Create an assembler. Instructions and relocation information are emitted
...@@ -1070,16 +1063,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -1070,16 +1063,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst, void vmov(const DwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
const Condition cond = al); const Condition cond = al);
// TODO(bbudge) Replace uses of these with the more general core register to
// scalar register vmov's.
void vmov(const DwVfpRegister dst,
const VmovIndex index,
const Register src,
const Condition cond = al);
void vmov(const Register dst,
const VmovIndex index,
const DwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst, void vmov(const DwVfpRegister dst,
const Register src1, const Register src1,
const Register src2, const Register src2,
......
...@@ -834,7 +834,7 @@ void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) { ...@@ -834,7 +834,7 @@ void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
vmov(dst, loc.high()); vmov(dst, loc.high());
} else { } else {
vmov(dst, VmovIndexHi, src); vmov(NeonS32, dst, src, 1);
} }
} }
...@@ -843,7 +843,7 @@ void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) { ...@@ -843,7 +843,7 @@ void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
vmov(loc.high(), src); vmov(loc.high(), src);
} else { } else {
vmov(dst, VmovIndexHi, src); vmov(NeonS32, dst, 1, src);
} }
} }
...@@ -852,7 +852,7 @@ void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) { ...@@ -852,7 +852,7 @@ void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
vmov(dst, loc.low()); vmov(dst, loc.low());
} else { } else {
vmov(dst, VmovIndexLo, src); vmov(NeonS32, dst, src, 0);
} }
} }
...@@ -861,7 +861,7 @@ void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) { ...@@ -861,7 +861,7 @@ void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
vmov(loc.low(), src); vmov(loc.low(), src);
} else { } else {
vmov(dst, VmovIndexLo, src); vmov(NeonS32, dst, 0, src);
} }
} }
......
...@@ -1082,14 +1082,14 @@ TEST(13) { ...@@ -1082,14 +1082,14 @@ TEST(13) {
__ vmov(d21, Double(16.0)); __ vmov(d21, Double(16.0));
__ mov(r1, Operand(372106121)); __ mov(r1, Operand(372106121));
__ mov(r2, Operand(1079146608)); __ mov(r2, Operand(1079146608));
__ vmov(d22, VmovIndexLo, r1); __ vmov(NeonS32, d22, 0, r1);
__ vmov(d22, VmovIndexHi, r2); __ vmov(NeonS32, d22, 1, r2);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, i)))); __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, i))));
__ vstm(ia_w, r4, d20, d22); __ vstm(ia_w, r4, d20, d22);
// Move d22 into low and high. // Move d22 into low and high.
__ vmov(r4, VmovIndexLo, d22); __ vmov(NeonS32, r4, d22, 0);
__ str(r4, MemOperand(r0, offsetof(T, low))); __ str(r4, MemOperand(r0, offsetof(T, low)));
__ vmov(r4, VmovIndexHi, d22); __ vmov(NeonS32, r4, d22, 1);
__ str(r4, MemOperand(r0, offsetof(T, high))); __ str(r4, MemOperand(r0, offsetof(T, high)));
__ ldm(ia_w, sp, r4.bit() | pc.bit()); __ ldm(ia_w, sp, r4.bit() | pc.bit());
......
...@@ -672,14 +672,14 @@ TEST(Vfp) { ...@@ -672,14 +672,14 @@ TEST(Vfp) {
COMPARE(vmov(s3, Float32(13.0f)), COMPARE(vmov(s3, Float32(13.0f)),
"eef21a0a vmov.f32 s3, #13"); "eef21a0a vmov.f32 s3, #13");
COMPARE(vmov(d0, VmovIndexLo, r0), COMPARE(vmov(NeonS32, d0, 0, r0),
"ee000b10 vmov.32 d0[0], r0"); "ee000b10 vmov.32 d0[0], r0");
COMPARE(vmov(d0, VmovIndexHi, r0), COMPARE(vmov(NeonS32, d0, 1, r0),
"ee200b10 vmov.32 d0[1], r0"); "ee200b10 vmov.32 d0[1], r0");
COMPARE(vmov(r2, VmovIndexLo, d15), COMPARE(vmov(NeonS32, r2, d15, 0),
"ee1f2b10 vmov.32 r2, d15[0]"); "ee1f2b10 vmov.32 r2, d15[0]");
COMPARE(vmov(r3, VmovIndexHi, d14), COMPARE(vmov(NeonS32, r3, d14, 1),
"ee3e3b10 vmov.32 r3, d14[1]"); "ee3e3b10 vmov.32 r3, d14[1]");
COMPARE(vldr(s0, r0, 0), COMPARE(vldr(s0, r0, 0),
...@@ -833,9 +833,9 @@ TEST(Vfp) { ...@@ -833,9 +833,9 @@ TEST(Vfp) {
COMPARE(vmov(d30, Double(16.0)), COMPARE(vmov(d30, Double(16.0)),
"eef3eb00 vmov.f64 d30, #16"); "eef3eb00 vmov.f64 d30, #16");
COMPARE(vmov(d31, VmovIndexLo, r7), COMPARE(vmov(NeonS32, d31, 0, r7),
"ee0f7b90 vmov.32 d31[0], r7"); "ee0f7b90 vmov.32 d31[0], r7");
COMPARE(vmov(d31, VmovIndexHi, r7), COMPARE(vmov(NeonS32, d31, 1, r7),
"ee2f7b90 vmov.32 d31[1], r7"); "ee2f7b90 vmov.32 d31[1], r7");
COMPARE(vldr(d25, r0, 0), COMPARE(vldr(d25, r0, 0),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment