Commit 436e236c authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64][wasm] Add f64x2 neg/mul/lt/le for wasm

And refactoring simd instr in code-generator
Bug: v8:11976

Change-Id: If5292e9e205374280ec5c86a481649078e348a80
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3274593
Commit-Queue: ji qiu <qiuji@iscas.ac.cn>
Reviewed-by: 's avatarji qiu <qiuji@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#77936}
parent c846b290
......@@ -954,12 +954,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
#undef DEFINE_VFUNARY
void vnot_vv(VRegister dst, VRegister src) { vxor_vi(dst, src, -1); }
void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
vxor_vi(dst, src, -1, mask);
}
void vneg_vv(VRegister dst, VRegister src) { vrsub_vx(dst, src, zero_reg); }
void vneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
vrsub_vx(dst, src, zero_reg, mask);
}
void vfneg_vv(VRegister dst, VRegister src) { vfsngjn_vv(dst, src, src); }
void vfabs_vv(VRegister dst, VRegister src) { vfsngjx_vv(dst, src, src); }
void vfneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
vfsngjn_vv(dst, src, src, mask);
}
void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
vfsngjx_vv(dst, src, src, mask);
}
// Privileged
void uret();
void sret();
......
......@@ -2205,6 +2205,26 @@ void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch,
RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN);
}
void TurboAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RTZ);
}
void TurboAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RTZ);
}
void TurboAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RNE);
}
void TurboAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch,
VRegister v_scratch) {
RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RNE);
}
void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RDN);
......
......@@ -854,6 +854,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
VRegister v_scratch);
void Floor_d(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch);
void Trunc_f(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch);
void Trunc_d(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch);
void Round_f(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch);
void Round_d(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch);
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
......
......@@ -481,6 +481,70 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
i.InputSimd128Register(1)); \
} while (0)
#define ASSEMBLE_RVV_BINOP_INTEGER(instr, OP) \
case kRiscvI8x16##instr: { \
__ VU.set(kScratchReg, E8, m1); \
__ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputSimd128Register(1)); \
break; \
} \
case kRiscvI16x8##instr: { \
__ VU.set(kScratchReg, E16, m1); \
__ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputSimd128Register(1)); \
break; \
} \
case kRiscvI32x4##instr: { \
__ VU.set(kScratchReg, E32, m1); \
__ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputSimd128Register(1)); \
break; \
}
#define ASSEMBLE_RVV_UNOP_INTEGER_VR(instr, OP) \
case kRiscvI8x16##instr: { \
__ VU.set(kScratchReg, E8, m1); \
__ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
break; \
} \
case kRiscvI16x8##instr: { \
__ VU.set(kScratchReg, E16, m1); \
__ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
break; \
} \
case kRiscvI32x4##instr: { \
__ VU.set(kScratchReg, E32, m1); \
__ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
break; \
} \
case kRiscvI64x2##instr: { \
__ VU.set(kScratchReg, E64, m1); \
__ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
break; \
}
#define ASSEMBLE_RVV_UNOP_INTEGER_VV(instr, OP) \
case kRiscvI8x16##instr: { \
__ VU.set(kScratchReg, E8, m1); \
__ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
break; \
} \
case kRiscvI16x8##instr: { \
__ VU.set(kScratchReg, E16, m1); \
__ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
break; \
} \
case kRiscvI32x4##instr: { \
__ VU.set(kScratchReg, E32, m1); \
__ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
break; \
} \
case kRiscvI64x2##instr: { \
__ VU.set(kScratchReg, E64, m1); \
__ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
break; \
}
void CodeGenerator::AssembleDeconstructFrame() {
__ Move(sp, fp);
__ Pop(ra, fp);
......@@ -1889,6 +1953,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vl(i.OutputSimd128Register(), src, 0, VSew::E8);
break;
}
case kRiscvS128Select: {
__ VU.set(kScratchReg, E8, m1);
__ vand_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
i.InputSimd128Register(0));
__ vnot_vv(kSimd128ScratchReg2, i.InputSimd128Register(0));
__ vand_vv(kSimd128ScratchReg2, i.InputSimd128Register(2),
kSimd128ScratchReg2);
__ vor_vv(i.OutputSimd128Register(), kSimd128ScratchReg,
kSimd128ScratchReg2);
break;
}
case kRiscvS128And: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvS128Or: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvS128Xor: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vxor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvS128Not: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvS128AndNot: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
__ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.OutputSimd128Register());
break;
}
case kRiscvS128Const: {
Simd128Register dst = i.OutputSimd128Register();
uint8_t imm[16];
......@@ -1905,18 +2010,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kRiscvI32x4Add: {
(__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
__ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvI16x8Add: {
(__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
__ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvI16x8AddSatS: {
(__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
__ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
......@@ -1929,12 +2022,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kRiscvI8x16Add: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvI8x16AddSatS: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
......@@ -1953,18 +2040,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kRiscvI32x4Sub: {
(__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
__ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvI16x8Sub: {
(__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
__ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvI16x8SubSatS: {
(__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
__ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
......@@ -1977,12 +2052,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kRiscvI8x16Sub: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvI8x16SubSatS: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
......@@ -1995,73 +2064,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kRiscvS128And: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvS128Or: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvS128Xor: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vxor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvS128Not: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvS128AndNot: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
__ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.OutputSimd128Register());
break;
}
case kRiscvI32x4ExtractLane: {
__ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1), E32, m1);
break;
}
case kRiscvI8x16Splat: {
(__ VU).set(kScratchReg, E8, m1);
__ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kRiscvI16x8Splat: {
(__ VU).set(kScratchReg, E16, m1);
__ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kRiscvI32x4Splat: {
(__ VU).set(kScratchReg, E32, m1);
__ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kRiscvI64x2Splat: {
(__ VU).set(kScratchReg, E64, m1);
__ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kRiscvF32x4Splat: {
(__ VU).set(kScratchReg, E32, m1);
__ fmv_x_w(kScratchReg, i.InputSingleRegister(0));
__ vmv_vx(i.OutputSimd128Register(), kScratchReg);
break;
}
case kRiscvF64x2Splat: {
(__ VU).set(kScratchReg, E64, m1);
__ fmv_x_d(kScratchReg, i.InputDoubleRegister(0));
__ vmv_vx(i.OutputSimd128Register(), kScratchReg);
break;
}
case kRiscvI32x4Abs: {
__ VU.set(kScratchReg, E32, m1);
__ vmv_vx(kSimd128RegZero, zero_reg);
......@@ -2405,9 +2412,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vor_vv(dst, dst, kSimd128ScratchReg);
break;
}
case kRiscvF32x4Abs: {
__ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
__ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
case kRiscvF64x2Splat: {
(__ VU).set(kScratchReg, E64, m1);
__ fmv_x_d(kScratchReg, i.InputDoubleRegister(0));
__ vmv_vx(i.OutputSimd128Register(), kScratchReg);
break;
}
case kRiscvF64x2Abs: {
......@@ -2415,16 +2423,89 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvF32x4Neg: {
__ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
__ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvF64x2Neg: {
__ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
__ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvF64x2Add: {
__ VU.set(kScratchReg, E64, m1);
__ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvF64x2Sub: {
__ VU.set(kScratchReg, E64, m1);
__ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvF64x2Ceil: {
__ Ceil_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
kScratchReg, kSimd128ScratchReg);
break;
}
case kRiscvF64x2Floor: {
__ Floor_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
kScratchReg, kSimd128ScratchReg);
break;
}
case kRiscvF64x2Ne: {
__ VU.set(kScratchReg, E64, m1);
__ vmfne_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
__ vmv_vx(i.OutputSimd128Register(), zero_reg);
__ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
break;
}
case kRiscvF64x2Eq: {
__ VU.set(kScratchReg, E64, m1);
__ vmfeq_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
__ vmv_vx(i.OutputSimd128Register(), zero_reg);
__ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
break;
}
case kRiscvF64x2Lt: {
__ VU.set(kScratchReg, E64, m1);
__ vmflt_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
__ vmv_vx(i.OutputSimd128Register(), zero_reg);
__ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
break;
}
case kRiscvF64x2Le: {
__ VU.set(kScratchReg, E64, m1);
__ vmfle_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
__ vmv_vx(i.OutputSimd128Register(), zero_reg);
__ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
break;
}
case kRiscvF64x2Min: {
__ VU.set(kScratchReg, E64, m1);
const int64_t kNaN = 0x7ff8000000000000L;
__ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
__ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
i.InputSimd128Register(1));
__ vand_vv(v0, v0, kSimd128ScratchReg);
__ li(kScratchReg, kNaN);
__ vmv_vx(kSimd128ScratchReg, kScratchReg);
__ vfmin_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
i.InputSimd128Register(0), Mask);
__ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
break;
}
case kRiscvF64x2Max: {
__ VU.set(kScratchReg, E64, m1);
const int64_t kNaN = 0x7ff8000000000000L;
__ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
__ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
i.InputSimd128Register(1));
__ vand_vv(v0, v0, kSimd128ScratchReg);
__ li(kScratchReg, kNaN);
__ vmv_vx(kSimd128ScratchReg, kScratchReg);
__ vfmax_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
i.InputSimd128Register(0), Mask);
__ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
break;
}
case kRiscvF32x4DemoteF64x2Zero: {
__ VU.set(kScratchReg, E32, m1);
__ vfncvt_f_f_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
......@@ -2433,26 +2514,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.OutputSimd128Register());
break;
}
case kRiscvF32x4Add: {
__ VU.set(kScratchReg, E32, m1);
__ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
case kRiscvF32x4Neg: {
__ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
__ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvF32x4Sub: {
__ VU.set(kScratchReg, E32, m1);
__ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
case kRiscvF32x4Abs: {
__ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
__ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kRiscvF64x2Add: {
__ VU.set(kScratchReg, E64, m1);
case kRiscvF32x4Splat: {
(__ VU).set(kScratchReg, E32, m1);
__ fmv_x_w(kScratchReg, i.InputSingleRegister(0));
__ vmv_vx(i.OutputSimd128Register(), kScratchReg);
break;
}
case kRiscvF32x4Add: {
__ VU.set(kScratchReg, E32, m1);
__ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kRiscvF64x2Sub: {
__ VU.set(kScratchReg, E64, m1);
case kRiscvF32x4Sub: {
__ VU.set(kScratchReg, E32, m1);
__ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
......@@ -2462,32 +2547,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchReg, kSimd128ScratchReg);
break;
}
case kRiscvF64x2Ceil: {
__ Ceil_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
kScratchReg, kSimd128ScratchReg);
break;
}
case kRiscvF32x4Floor: {
__ Floor_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
kScratchReg, kSimd128ScratchReg);
break;
}
case kRiscvF64x2Floor: {
__ Floor_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
kScratchReg, kSimd128ScratchReg);
break;
}
case kRiscvS128Select: {
__ VU.set(kScratchReg, E8, m1);
__ vand_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
i.InputSimd128Register(0));
__ vnot_vv(kSimd128ScratchReg2, i.InputSimd128Register(0));
__ vand_vv(kSimd128ScratchReg2, i.InputSimd128Register(2),
kSimd128ScratchReg2);
__ vor_vv(i.OutputSimd128Register(), kSimd128ScratchReg,
kSimd128ScratchReg2);
break;
}
case kRiscvF32x4UConvertI32x4: {
__ VU.set(kScratchReg, E32, m1);
__ VU.set(RoundingMode::RTZ);
......@@ -2689,6 +2753,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vnclipu_vi(i.OutputSimd128Register(), kSimd128ScratchReg2, 0);
break;
}
ASSEMBLE_RVV_UNOP_INTEGER_VV(Neg, vneg_vv)
ASSEMBLE_RVV_BINOP_INTEGER(MaxU, vmaxu_vv)
ASSEMBLE_RVV_BINOP_INTEGER(MaxS, vmax_vv)
ASSEMBLE_RVV_BINOP_INTEGER(MinU, vminu_vv)
ASSEMBLE_RVV_BINOP_INTEGER(MinS, vmin_vv)
ASSEMBLE_RVV_UNOP_INTEGER_VR(Splat, vmv_vx)
ASSEMBLE_RVV_BINOP_INTEGER(Add, vadd_vv)
ASSEMBLE_RVV_BINOP_INTEGER(Sub, vsub_vv)
default:
#ifdef DEBUG
switch (arch_opcode) {
......
......@@ -2038,22 +2038,34 @@ void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_eq");
VU.set(kScratchReg, E64, m1);
vmfeq_vv(v0, rhs.fp().toV(), lhs.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_ne");
VU.set(kScratchReg, E64, m1);
vmfne_vv(v0, rhs.fp().toV(), lhs.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_lt");
VU.set(kScratchReg, E64, m1);
vmflt_vv(v0, rhs.fp().toV(), lhs.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_le");
VU.set(kScratchReg, E64, m1);
vmfle_vv(v0, rhs.fp().toV(), lhs.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
......@@ -2221,30 +2233,35 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_min_s");
VU.set(kScratchReg, E8, m1);
vmin_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_min_u");
VU.set(kScratchReg, E8, m1);
vminu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_max_s");
VU.set(kScratchReg, E8, m1);
vmax_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_max_u");
VU.set(kScratchReg, E8, m1);
vmaxu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i16x8_neg");
VU.set(kScratchReg, E16, m1);
vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
......@@ -2348,30 +2365,35 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i16x8_min_s");
VU.set(kScratchReg, E16, m1);
vmin_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i16x8_min_u");
VU.set(kScratchReg, E16, m1);
vminu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i16x8_max_s");
VU.set(kScratchReg, E16, m1);
vmax_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i16x8_max_u");
VU.set(kScratchReg, E16, m1);
vmaxu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_neg");
VU.set(kScratchReg, E32, m1);
vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
......@@ -2453,25 +2475,29 @@ void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i32x4_min_s");
VU.set(kScratchReg, E32, m1);
vmin_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i32x4_min_u");
VU.set(kScratchReg, E32, m1);
vminu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i32x4_max_s");
VU.set(kScratchReg, E32, m1);
vmax_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i32x4_max_u");
VU.set(kScratchReg, E32, m1);
vmaxu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
......@@ -2482,7 +2508,8 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i64x2_neg");
VU.set(kScratchReg, E64, m1);
vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
......@@ -2585,13 +2612,13 @@ bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_f32x4_trunc");
Trunc_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_f32x4_nearest_int");
Round_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
......@@ -2687,13 +2714,13 @@ bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_f64x2_trunc");
Trunc_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_f64x2_nearest_int");
Round_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
......@@ -2711,22 +2738,41 @@ void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_mul");
VU.set(kScratchReg, E64, m1);
vfmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_div");
VU.set(kScratchReg, E64, m1);
vfdiv_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_min");
VU.set(kScratchReg, E64, m1);
const int64_t kNaN = 0x7ff8000000000000L;
vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
vand_vv(v0, v0, kSimd128ScratchReg);
li(kScratchReg, kNaN);
vmv_vx(kSimd128ScratchReg, kScratchReg);
vfmin_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_max");
VU.set(kScratchReg, E64, m1);
const int64_t kNaN = 0x7ff8000000000000L;
VU.set(kScratchReg, E32, m1);
vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
vand_vv(v0, v0, kSimd128ScratchReg);
li(kScratchReg, kNaN);
vmv_vx(kSimd128ScratchReg, kScratchReg);
vfmax_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
......@@ -2893,17 +2939,26 @@ void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i8x16_abs");
VU.set(kScratchReg, E8, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i16x8_abs");
VU.set(kScratchReg, E16, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i64x2_abs");
VU.set(kScratchReg, E64, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
......
......@@ -1993,7 +1993,6 @@ TEST(li_estimate) {
CHECK(!memcmp(src, dst, sizeof(src))); \
}
#ifdef CAN_USE_RVV_INSTRUCTIONS
UTEST_LOAD_STORE_RVV(vl, vs, E8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16)
// UTEST_LOAD_STORE_RVV(vl, vs, E8, 127, 127, 127, 127, 127, 127, 127)
......@@ -2498,8 +2497,6 @@ UTEST_RVV_VP_VS_VI_FORM_WITH_RES(vslidedown_vi, uint8_t, 8, ARRAY(uint8_t),
#undef UTEST_RVV_VP_VS_VI_FORM_WITH_RES
#undef ARRAY
#endif
#undef __
} // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment