Commit 87483842 authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64] Implement simd128 in swap

enable simd on riscv64

Change-Id: I446d6b14e4f89164b49a66367340d904ba104911
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3347493Reviewed-by: 's avatarji qiu <qiuji@iscas.ac.cn>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#78553}
parent d1f04238
...@@ -1723,7 +1723,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1723,7 +1723,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvStoreToStackSlot: { case kRiscvStoreToStackSlot: {
if (instr->InputAt(0)->IsFPRegister()) { if (instr->InputAt(0)->IsFPRegister()) {
if (instr->InputAt(0)->IsSimd128Register()) { if (instr->InputAt(0)->IsSimd128Register()) {
UNREACHABLE(); Register dst = sp;
if (i.InputInt32(1) != 0) {
dst = kScratchReg2;
__ Add64(kScratchReg2, sp, Operand(i.InputInt32(1)));
}
__ VU.set(kScratchReg, E8, m1);
__ vs(i.InputSimd128Register(0), dst, 0, E8);
} else { } else {
__ StoreDouble(i.InputDoubleRegister(0), __ StoreDouble(i.InputDoubleRegister(0),
MemOperand(sp, i.InputInt32(1))); MemOperand(sp, i.InputInt32(1)));
...@@ -4192,17 +4198,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -4192,17 +4198,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
VRegister src = g.ToSimd128Register(source); VRegister src = g.ToSimd128Register(source);
if (destination->IsSimd128Register()) { if (destination->IsSimd128Register()) {
VRegister dst = g.ToSimd128Register(destination); VRegister dst = g.ToSimd128Register(destination);
__ VU.set(kScratchReg, E8, m1);
__ vmv_vv(dst, src); __ vmv_vv(dst, src);
} else { } else {
DCHECK(destination->IsSimd128StackSlot()); DCHECK(destination->IsSimd128StackSlot());
Register dst = g.ToMemOperand(destination).offset() == 0 __ VU.set(kScratchReg, E8, m1);
? g.ToMemOperand(destination).rm() MemOperand dst = g.ToMemOperand(destination);
: kScratchReg; Register dst_r = dst.rm();
if (g.ToMemOperand(destination).offset() != 0) { if (dst.offset() != 0) {
__ Add64(dst, g.ToMemOperand(destination).rm(), dst_r = kScratchReg;
g.ToMemOperand(destination).offset()); __ Add64(dst_r, dst.rm(), dst.offset());
} }
__ vs(src, dst, 0, E8); __ vs(src, dst_r, 0, E8);
} }
} else { } else {
FPURegister src = g.ToDoubleRegister(source); FPURegister src = g.ToDoubleRegister(source);
...@@ -4224,24 +4231,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -4224,24 +4231,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source); MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation(); MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) { if (rep == MachineRepresentation::kSimd128) {
Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; __ VU.set(kScratchReg, E8, m1);
Register src_r = src.rm();
if (src.offset() != 0) { if (src.offset() != 0) {
__ Add64(src_reg, src.rm(), src.offset()); src_r = kScratchReg;
__ Add64(src_r, src.rm(), src.offset());
} }
if (destination->IsSimd128Register()) { if (destination->IsSimd128Register()) {
__ vl(g.ToSimd128Register(destination), src_reg, 0, E8); __ vl(g.ToSimd128Register(destination), src_r, 0, E8);
} else { } else {
DCHECK(destination->IsSimd128StackSlot()); DCHECK(destination->IsSimd128StackSlot());
VRegister temp = kSimd128ScratchReg; VRegister temp = kSimd128ScratchReg;
Register dst = g.ToMemOperand(destination).offset() == 0 MemOperand dst = g.ToMemOperand(destination);
? g.ToMemOperand(destination).rm() Register dst_r = dst.rm();
: kScratchReg; if (dst.offset() != 0) {
if (g.ToMemOperand(destination).offset() != 0) { dst_r = kScratchReg2;
__ Add64(dst, g.ToMemOperand(destination).rm(), __ Add64(dst_r, dst.rm(), dst.offset());
g.ToMemOperand(destination).offset());
} }
__ vl(temp, src_reg, 0, E8); __ vl(temp, src_r, 0, E8);
__ vs(temp, dst, 0, E8); __ vs(temp, dst_r, 0, E8);
} }
} else { } else {
if (destination->IsFPRegister()) { if (destination->IsFPRegister()) {
...@@ -4272,91 +4280,106 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -4272,91 +4280,106 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source, void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) { InstructionOperand* destination) {
RiscvOperandConverter g(this, nullptr); RiscvOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all switch (MoveType::InferSwap(source, destination)) {
// combinations are possible. case MoveType::kRegisterToRegister:
if (source->IsRegister()) { if (source->IsRegister()) {
// Register-register. Register temp = kScratchReg;
Register temp = kScratchReg; Register src = g.ToRegister(source);
Register src = g.ToRegister(source); Register dst = g.ToRegister(destination);
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ Ld(src, dst);
__ Sd(temp, dst);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
Register temp_0 = kScratchReg;
Register temp_1 = kScratchReg2;
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
__ Ld(temp_0, src);
__ Ld(temp_1, dst);
__ Sd(temp_0, dst);
__ Sd(temp_1, src);
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
UNIMPLEMENTED();
} else {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src); __ Move(temp, src);
__ Move(src, dst); __ Move(src, dst);
__ Move(dst, temp); __ Move(dst, temp);
} else { } else {
DCHECK(destination->IsFPStackSlot()); if (source->IsFloatRegister() || source->IsDoubleRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
DCHECK(source->IsSimd128Register());
VRegister src = g.ToDoubleRegister(source).toV();
VRegister dst = g.ToDoubleRegister(destination).toV();
VRegister temp = kSimd128ScratchReg;
__ VU.set(kScratchReg, E8, m1);
__ vmv_vv(temp, src);
__ vmv_vv(src, dst);
__ vmv_vv(dst, temp);
}
}
return;
case MoveType::kRegisterToStack: {
MemOperand dst = g.ToMemOperand(destination);
if (source->IsRegister()) {
Register temp = kScratchReg;
Register src = g.ToRegister(source);
__ mv(temp, src);
__ Ld(src, dst);
__ Sd(temp, dst);
} else {
MemOperand dst = g.ToMemOperand(destination); MemOperand dst = g.ToMemOperand(destination);
if (rep == MachineRepresentation::kFloat32) { if (source->IsFloatRegister()) {
__ MoveFloat(temp, src); DoubleRegister src = g.ToDoubleRegister(source);
DoubleRegister temp = kScratchDoubleReg;
__ fmv_s(temp, src);
__ LoadFloat(src, dst); __ LoadFloat(src, dst);
__ StoreFloat(temp, dst); __ StoreFloat(temp, dst);
} else { } else if (source->IsDoubleRegister()) {
DCHECK_EQ(rep, MachineRepresentation::kFloat64); DoubleRegister src = g.ToDoubleRegister(source);
__ MoveDouble(temp, src); DoubleRegister temp = kScratchDoubleReg;
__ fmv_d(temp, src);
__ LoadDouble(src, dst); __ LoadDouble(src, dst);
__ StoreDouble(temp, dst); __ StoreDouble(temp, dst);
} else {
DCHECK(source->IsSimd128Register());
VRegister src = g.ToDoubleRegister(source).toV();
VRegister temp = kSimd128ScratchReg;
__ VU.set(kScratchReg, E8, m1);
__ vmv_vv(temp, src);
Register dst_v = dst.rm();
if (dst.offset() != 0) {
dst_v = kScratchReg2;
__ Add64(dst_v, dst.rm(), Operand(dst.offset()));
}
__ vl(src, dst_v, 0, E8);
__ vs(temp, dst_v, 0, E8);
} }
} }
return;
} }
} else if (source->IsFPStackSlot()) { case MoveType::kStackToStack: {
DCHECK(destination->IsFPStackSlot()); MemOperand src = g.ToMemOperand(source);
Register temp_0 = kScratchReg; MemOperand dst = g.ToMemOperand(destination);
MemOperand src0 = g.ToMemOperand(source); if (source->IsSimd128StackSlot()) {
MemOperand src1(src0.rm(), src0.offset() + kIntSize); __ VU.set(kScratchReg, E8, m1);
MemOperand dst0 = g.ToMemOperand(destination); Register src_v = src.rm();
MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize); Register dst_v = dst.rm();
MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (src.offset() != 0) {
if (rep == MachineRepresentation::kSimd128) { src_v = kScratchReg;
UNIMPLEMENTED(); __ Add64(src_v, src.rm(), Operand(src.offset()));
} else { }
FPURegister temp_1 = kScratchDoubleReg; if (dst.offset() != 0) {
if (rep == MachineRepresentation::kFloat32) { dst_v = kScratchReg2;
__ LoadFloat(temp_1, dst0); // Save destination in temp_1. __ Add64(dst_v, dst.rm(), Operand(dst.offset()));
__ Lw(temp_0, src0); // Then use temp_0 to copy source to destination. }
__ Sw(temp_0, dst0); __ vl(kSimd128ScratchReg, src_v, 0, E8);
__ StoreFloat(temp_1, src0); __ vl(kSimd128ScratchReg2, dst_v, 0, E8);
__ vs(kSimd128ScratchReg, dst_v, 0, E8);
__ vs(kSimd128ScratchReg2, src_v, 0, E8);
} else { } else {
DCHECK_EQ(rep, MachineRepresentation::kFloat64); UseScratchRegisterScope scope(tasm());
__ LoadDouble(temp_1, dst0); // Save destination in temp_1. Register temp_0 = kScratchReg;
__ Lw(temp_0, src0); // Then use temp_0 to copy source to destination. Register temp_1 = kScratchReg2;
__ Sw(temp_0, dst0); __ Ld(temp_0, src);
__ Lw(temp_0, src1); __ Ld(temp_1, dst);
__ Sw(temp_0, dst1); __ Sd(temp_0, dst);
__ StoreDouble(temp_1, src0); __ Sd(temp_1, src);
} }
return;
} }
} else { default:
// No other combinations are possible. UNREACHABLE();
UNREACHABLE();
} }
} }
......
...@@ -694,10 +694,6 @@ class BaseTestRunner(object): ...@@ -694,10 +694,6 @@ class BaseTestRunner(object):
utils.GuessPowerProcessorVersion() < 9: utils.GuessPowerProcessorVersion() < 9:
no_simd_hardware = True no_simd_hardware = True
# riscv64 do not support Simd instructions
if self.build_config.arch == 'riscv64':
no_simd_hardware = True
return { return {
"arch": self.build_config.arch, "arch": self.build_config.arch,
"asan": self.build_config.asan, "asan": self.build_config.asan,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment