Commit 4f06ec6d authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64] Implement simd for liftoff

Bug: v8:11976

Change-Id: Ifdce8e668c4b0fe20180c8d28b9c1d4abe705a67
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3297354
Commit-Queue: ji qiu <qiuji@iscas.ac.cn>
Reviewed-by: 's avatarji qiu <qiuji@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#78078}
parent 53d9e8b1
......@@ -2461,6 +2461,27 @@ void Assembler::EBREAK() {
}
// RVV
void Assembler::vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
MaskType mask) {
GenInstrV(VREDMAXU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
}
void Assembler::vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
MaskType mask) {
GenInstrV(VREDMAX_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
}
void Assembler::vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
MaskType mask) {
GenInstrV(VREDMIN_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
}
void Assembler::vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
MaskType mask) {
GenInstrV(VREDMINU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
}
void Assembler::vmv_vv(VRegister vd, VRegister vs1) {
GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
}
......@@ -2536,6 +2557,11 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
}
void Assembler::vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
MaskType mask) {
GenInstrV(VWADDUW_FUNCT6, OP_MVX, vd, rs1, vs2, mask);
}
#define DEFINE_OPIVV(name, funct6) \
void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask) { \
......@@ -2548,6 +2574,12 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
}
#define DEFINE_OPFRED(name, funct6) \
void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask) { \
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
}
#define DEFINE_OPIVX(name, funct6) \
void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
MaskType mask) { \
......@@ -2561,11 +2593,19 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
}
#define DEFINE_OPMVV(name, funct6) \
void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask) { \
GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
}
// void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1,
// VRegister vs2, MaskType mask = NoMask);
#define DEFINE_OPMVX(name, funct6) \
void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
MaskType mask) { \
GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask); \
}
#define DEFINE_OPFVF(name, funct6) \
void Assembler::name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
MaskType mask) { \
......@@ -2594,8 +2634,12 @@ void Assembler::vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask) {
GenInstrV(VMV_FUNCT6, OP_FVF, vd, fs1, v0, mask);
}
void Assembler::vfmv_fs(FPURegister fd, VRegister vs2, MaskType mask) {
GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, mask);
void Assembler::vfmv_fs(FPURegister fd, VRegister vs2) {
GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, NoMask);
}
void Assembler::vfmv_sf(VRegister vd, FPURegister fs) {
GenInstrV(VRFUNARY0_FUNCT6, OP_FVF, vd, fs, v0, NoMask);
}
DEFINE_OPIVV(vadd, VADD_FUNCT6)
......@@ -2603,6 +2647,19 @@ DEFINE_OPIVX(vadd, VADD_FUNCT6)
DEFINE_OPIVI(vadd, VADD_FUNCT6)
DEFINE_OPIVV(vsub, VSUB_FUNCT6)
DEFINE_OPIVX(vsub, VSUB_FUNCT6)
DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
DEFINE_OPMVX(vmul, VMUL_FUNCT6)
DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
DEFINE_OPMVV(vmul, VMUL_FUNCT6)
DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
......@@ -2670,14 +2727,16 @@ DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
DEFINE_OPIVV(vsra, VSRA_FUNCT6)
DEFINE_OPIVX(vsra, VSRA_FUNCT6)
DEFINE_OPIVI(vsra, VSRA_FUNCT6)
DEFINE_OPIVV(vsll, VSLL_FUNCT6)
DEFINE_OPIVX(vsll, VSLL_FUNCT6)
DEFINE_OPIVI(vsll, VSLL_FUNCT6)
DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
......@@ -2694,6 +2753,8 @@ DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
......
......@@ -739,6 +739,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
MaskType mask = NoMask);
void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
MaskType mask = NoMask);
void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
MaskType mask = NoMask);
void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
MaskType mask = NoMask);
void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
......@@ -748,7 +757,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
void vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask = NoMask);
void vfmv_fs(FPURegister fd, VRegister vs2, MaskType mask = NoMask);
void vfmv_fs(FPURegister fd, VRegister vs2);
void vfmv_sf(VRegister vd, FPURegister fs);
void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
MaskType mask = NoMask);
#define DEFINE_OPIVV(name, funct6) \
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
......@@ -763,7 +776,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
MaskType mask = NoMask);
#define DEFINE_OPMVV(name, funct6) \
void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask = NoMask);
#define DEFINE_OPMVX(name, funct6) \
......@@ -774,6 +787,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask = NoMask);
#define DEFINE_OPFRED(name, funct6) \
void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask = NoMask);
#define DEFINE_OPFVF(name, funct6) \
void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
MaskType mask = NoMask);
......@@ -794,6 +811,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPIVI(vadd, VADD_FUNCT6)
DEFINE_OPIVV(vsub, VSUB_FUNCT6)
DEFINE_OPIVX(vsub, VSUB_FUNCT6)
DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
DEFINE_OPMVX(vmul, VMUL_FUNCT6)
DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
DEFINE_OPMVV(vmul, VMUL_FUNCT6)
DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
......@@ -864,14 +894,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
DEFINE_OPIVV(vsra, VSRA_FUNCT6)
DEFINE_OPIVX(vsra, VSRA_FUNCT6)
DEFINE_OPIVI(vsra, VSRA_FUNCT6)
DEFINE_OPIVV(vsll, VSLL_FUNCT6)
DEFINE_OPIVX(vsll, VSLL_FUNCT6)
DEFINE_OPIVI(vsll, VSLL_FUNCT6)
DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
......@@ -888,6 +920,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
......@@ -940,6 +973,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#undef DEFINE_OPFVV_FMA
#undef DEFINE_OPFVF_FMA
#undef DEFINE_OPMVV_VIE
#undef DEFINE_OPFRED
#define DEFINE_VFUNARY(name, funct6, vs1) \
void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
......@@ -953,6 +987,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
#undef DEFINE_VFUNARY
void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
......
......@@ -712,6 +712,46 @@ enum Opcode : uint32_t {
RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
VDIVU_FUNCT6 = 0b100000,
RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift),
RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift),
VDIV_FUNCT6 = 0b100001,
RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift),
RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift),
VREMU_FUNCT6 = 0b100010,
RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift),
RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift),
VREM_FUNCT6 = 0b100011,
RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift),
RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift),
VMULHU_FUNCT6 = 0b100100,
RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift),
RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift),
VMUL_FUNCT6 = 0b100101,
RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift),
RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift),
VMULHSU_FUNCT6 = 0b100110,
RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
VMULH_FUNCT6 = 0b100111,
RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift),
RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift),
VWADDU_FUNCT6 = 0b110000,
RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift),
RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift),
VWADDUW_FUNCT6 = 0b110101,
RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
VSADDU_FUNCT6 = 0b100000,
RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
......@@ -829,11 +869,20 @@ enum Opcode : uint32_t {
RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
VSRA_FUNCT6 = 0b101001,
RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift),
RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift),
RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift),
VSLL_FUNCT6 = 0b100101,
RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
VSMUL_FUNCT6 = 0b100111,
RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift),
VADC_FUNCT6 = 0b010000,
RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
......@@ -856,6 +905,9 @@ enum Opcode : uint32_t {
VWFUNARY0_FUNCT6 = 0b010000,
RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift),
VRFUNARY0_FUNCT6 = 0b010000,
RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift),
VREDMAXU_FUNCT6 = 0b000110,
RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
VREDMAX_FUNCT6 = 0b000111,
......@@ -878,6 +930,9 @@ enum Opcode : uint32_t {
VFNCVT_F_F_W = 0b10100,
VFCLASS_V = 0b10000,
VFSQRT_V = 0b00000,
VFSQRT7_V = 0b00100,
VFREC7_V = 0b00101,
VFADD_FUNCT6 = 0b000000,
RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift),
......@@ -921,6 +976,9 @@ enum Opcode : uint32_t {
RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift),
RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift),
VFREDMAX_FUNCT6 = 0b0001111,
RO_V_VFREDMAX_VV = OP_FVV | (VFREDMAX_FUNCT6 << kRvvFunct6Shift),
VFMIN_FUNCT6 = 0b000100,
RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift),
RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift),
......@@ -1788,7 +1846,7 @@ class InstructionGetters : public T {
RVV_LMUL(CAST_VLMUL)
default:
return "unknown";
#undef CAST_VSEW
#undef CAST_VLMUL
}
}
......
......@@ -2159,11 +2159,25 @@ void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
// they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
// and JS round semantics specify that rounding of NaN (Infinity) returns NaN
// (Infinity), so NaN and Infinity are considered rounded value too.
li(scratch, 64 - kFloat32MantissaBits - kFloat32ExponentBits);
const int kFloatMantissaBits =
sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits;
const int kFloatExponentBits =
sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits;
const int kFloatExponentBias =
sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias;
// slli(rt, rs, 64 - (pos + size));
// if (sign_extend) {
// srai(rt, rt, 64 - size);
// } else {
// srli(rt, rt, 64 - size);
// }
li(scratch, 64 - kFloatMantissaBits - kFloatExponentBits);
vsll_vx(v_scratch, src, scratch);
li(scratch, 64 - kFloat32ExponentBits);
li(scratch, 64 - kFloatExponentBits);
vsrl_vx(v_scratch, v_scratch, scratch);
li(scratch, kFloat32ExponentBias + kFloat32MantissaBits);
li(scratch, kFloatExponentBias + kFloatMantissaBits);
vmslt_vx(v0, v_scratch, scratch);
VU.set(frm);
......
......@@ -374,8 +374,9 @@ constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = fa0;
constexpr VRegister kSimd128ScratchReg = v27;
constexpr VRegister kSimd128ScratchReg2 = v26;
constexpr VRegister kSimd128ScratchReg = v26;
constexpr VRegister kSimd128ScratchReg2 = v27;
constexpr VRegister kSimd128ScratchReg3 = v8;
constexpr VRegister kSimd128RegZero = v25;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
......
......@@ -2706,10 +2706,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Neg, kRiscvI64x2Neg) \
V(I64x2Abs, kRiscvI64x2Abs) \
V(I64x2BitMask, kRiscvI64x2BitMask) \
V(I64x2Eq, kRiscvI64x2Eq) \
V(I64x2Ne, kRiscvI64x2Ne) \
V(I64x2GtS, kRiscvI64x2GtS) \
V(I64x2GeS, kRiscvI64x2GeS) \
V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
V(F32x4Abs, kRiscvF32x4Abs) \
......@@ -2780,6 +2776,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Ne, kRiscvF64x2Ne) \
V(F64x2Lt, kRiscvF64x2Lt) \
V(F64x2Le, kRiscvF64x2Le) \
V(I64x2Eq, kRiscvI64x2Eq) \
V(I64x2Ne, kRiscvI64x2Ne) \
V(I64x2GtS, kRiscvI64x2GtS) \
V(I64x2GeS, kRiscvI64x2GeS) \
V(I64x2Add, kRiscvI64x2Add) \
V(I64x2Sub, kRiscvI64x2Sub) \
V(I64x2Mul, kRiscvI64x2Mul) \
......
......@@ -1923,6 +1923,9 @@ void Decoder::DecodeRvvIVV(Instruction* instr) {
case RO_V_VSSUB_VV:
Format(instr, "vssub.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VSSUBU_VV:
Format(instr, "vssubu.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VMIN_VV:
Format(instr, "vmin.vv 'vd, 'vs2, 'vs1'vm");
break;
......@@ -1992,6 +1995,18 @@ void Decoder::DecodeRvvIVV(Instruction* instr) {
case RO_V_VNCLIPU_WV:
Format(instr, "vnclipu.wv 'vd, 'vs2, 'vs1");
break;
case RO_V_VSLL_VV:
Format(instr, "vsll.vv 'vd, 'vs2, 'vs1");
break;
case RO_V_VSRL_VV:
Format(instr, "vsrl.vv 'vd, 'vs2, 'vs1");
break;
case RO_V_VSRA_VV:
Format(instr, "vsra.vv 'vd, 'vs2, 'vs1");
break;
case RO_V_VSMUL_VV:
Format(instr, "vsmul.vv 'vd, 'vs2, 'vs1");
break;
default:
UNSUPPORTED_RISCV();
break;
......@@ -2056,6 +2071,9 @@ void Decoder::DecodeRvvIVI(Instruction* instr) {
case RO_V_VSRL_VI:
Format(instr, "vsrl.vi 'vd, 'vs2, 'uimm5'vm");
break;
case RO_V_VSRA_VI:
Format(instr, "vsra.vi 'vd, 'vs2, 'uimm5'vm");
break;
case RO_V_VSLL_VI:
Format(instr, "vsll.vi 'vd, 'vs2, 'uimm5'vm");
break;
......@@ -2184,12 +2202,18 @@ void Decoder::DecodeRvvIVX(Instruction* instr) {
case RO_V_VSRL_VX:
Format(instr, "vsrl.vx 'vd, 'vs2, 'rs1");
break;
case RO_V_VSRA_VX:
Format(instr, "vsra.vx 'vd, 'vs2, 'rs1");
break;
case RO_V_VNCLIP_WX:
Format(instr, "vnclip.wx 'vd, 'vs2, 'rs1");
break;
case RO_V_VNCLIPU_WX:
Format(instr, "vnclipu.wx 'vd, 'vs2, 'rs1");
break;
case RO_V_VSMUL_VX:
Format(instr, "vsmul.vx 'vd, 'vs2, 'vs1");
break;
default:
UNSUPPORTED_RISCV();
break;
......@@ -2235,6 +2259,21 @@ void Decoder::DecodeRvvMVV(Instruction* instr) {
UNSUPPORTED_RISCV();
}
break;
case RO_V_VMUL_VV:
Format(instr, "vmul.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VMULHU_VV:
Format(instr, "vmulhu.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VDIV_VV:
Format(instr, "vdiv.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VDIVU_VV:
Format(instr, "vdivu.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VWADDU_VV:
Format(instr, "vwaddu.vv 'vd, 'vs2, 'vs1'vm");
break;
default:
UNSUPPORTED_RISCV();
break;
......@@ -2251,6 +2290,21 @@ void Decoder::DecodeRvvMVX(Instruction* instr) {
UNSUPPORTED_RISCV();
}
break;
case RO_V_VMUL_VX:
Format(instr, "vmul.vx 'vd, 'vs2, 'rs1'vm");
break;
case RO_V_VMULHU_VX:
Format(instr, "vmulhu.vx 'vd, 'vs2, 'rs1'vm");
break;
case RO_V_VDIV_VX:
Format(instr, "vdiv.vx 'vd, 'vs2, 'rs1'vm");
break;
case RO_V_VDIVU_VX:
Format(instr, "vdivu.vx 'vd, 'vs2, 'rs1'vm");
break;
case RO_V_VWADDUW_VX:
Format(instr, "vwaddu.wx 'vd, 'vs2, 'rs1'vm");
break;
default:
UNSUPPORTED_RISCV();
break;
......@@ -2287,6 +2341,9 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
case VFCLASS_V:
Format(instr, "vfclass.v 'vd, 'vs2'vm");
break;
case VFSQRT_V:
Format(instr, "vfsqrt.v 'vd, 'vs2'vm");
break;
default:
break;
}
......@@ -2306,6 +2363,9 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
case RO_V_VFMAX_VV:
Format(instr, "vfmax.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VFREDMAX_VV:
Format(instr, "vfredmax.vs 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VFMIN_VV:
Format(instr, "vfmin.vv 'vd, 'vs2, 'vs1'vm");
break;
......
This diff is collapsed.
......@@ -69,60 +69,6 @@ T Nabs(T a) {
return a < 0 ? a : -a;
}
template <uint64_t N>
struct type_usew_t;
template <>
struct type_usew_t<8> {
using type = uint8_t;
};
template <>
struct type_usew_t<16> {
using type = uint16_t;
};
template <>
struct type_usew_t<32> {
using type = uint32_t;
};
template <>
struct type_usew_t<64> {
using type = uint64_t;
};
template <>
struct type_usew_t<128> {
using type = __uint128_t;
};
template <uint64_t N>
struct type_sew_t;
template <>
struct type_sew_t<8> {
using type = int8_t;
};
template <>
struct type_sew_t<16> {
using type = int16_t;
};
template <>
struct type_sew_t<32> {
using type = int32_t;
};
template <>
struct type_sew_t<64> {
using type = int64_t;
};
template <>
struct type_sew_t<128> {
using type = __int128_t;
};
#if defined(USE_SIMULATOR)
// Running with a simulator.
......@@ -446,6 +392,13 @@ class Simulator : public SimulatorBase {
inline uint64_t rvv_vlenb() const { return vlenb_; }
inline uint32_t rvv_zimm() const { return instr_.Rvvzimm(); }
inline uint32_t rvv_vlmul() const { return (rvv_vtype() & 0x7); }
inline float rvv_vflmul() const {
if ((rvv_vtype() & 0b100) == 0) {
return static_cast<float>(0x1 << (rvv_vtype() & 0x7));
} else {
return 1.0 / static_cast<float>(0x1 << (4 - rvv_vtype() & 0x7));
}
}
inline uint32_t rvv_vsew() const { return ((rvv_vtype() >> 3) & 0x7); }
inline const char* rvv_sew_s() const {
......@@ -470,7 +423,7 @@ class Simulator : public SimulatorBase {
RVV_LMUL(CAST_VLMUL)
default:
return "unknown";
#undef CAST_VSEW
#undef CAST_VLMUL
}
}
......@@ -726,6 +679,60 @@ class Simulator : public SimulatorBase {
// PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
// HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
// MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
template <uint64_t N>
struct type_usew_t;
template <>
struct type_usew_t<8> {
using type = uint8_t;
};
template <>
struct type_usew_t<16> {
using type = uint16_t;
};
template <>
struct type_usew_t<32> {
using type = uint32_t;
};
template <>
struct type_usew_t<64> {
using type = uint64_t;
};
template <>
struct type_usew_t<128> {
using type = __uint128_t;
};
template <uint64_t N>
struct type_sew_t;
template <>
struct type_sew_t<8> {
using type = int8_t;
};
template <>
struct type_sew_t<16> {
using type = int16_t;
};
template <>
struct type_sew_t<32> {
using type = int32_t;
};
template <>
struct type_sew_t<64> {
using type = int64_t;
};
template <>
struct type_sew_t<128> {
using type = __int128_t;
};
#define VV_PARAMS(x) \
type_sew_t<x>::type& vd = \
Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
......@@ -806,7 +813,7 @@ class Simulator : public SimulatorBase {
inline void rvv_trace_vd() {
if (::v8::internal::FLAG_trace_sim) {
__int128_t value = Vregister_[rvv_vd_reg()];
SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " (%" PRId64 ")",
SNPrintF(trace_buf_, "%016" PRIx64 "%016" PRIx64 " (%" PRId64 ")",
*(reinterpret_cast<int64_t*>(&value) + 1),
*reinterpret_cast<int64_t*>(&value), icount_);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment