Commit 4ae67baf authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64]Implement webassembly simd swizzle/TruncSat/extadd/S128LOAD

Add func UseImmediate64(int64_t imm) into instruction-selector-impl
Bug: v8:11976

Change-Id: I274ab59cc6d9a9cdc8b4081a7c418c56c3e8f5b7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3312453Reviewed-by: 's avatarji qiu <qiuji@iscas.ac.cn>
Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#78288}
parent 272cf914
This diff is collapsed.
......@@ -762,6 +762,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
MaskType mask = NoMask);
void vid_v(VRegister vd, MaskType mask = Mask);
#define DEFINE_OPIVV(name, funct6) \
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
......@@ -823,7 +824,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
......@@ -984,7 +989,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
DEFINE_VFUNARY(vfwcvt_xu_f_v, VFUNARY0_FUNCT6, VFWCVT_XU_F_V)
DEFINE_VFUNARY(vfwcvt_x_f_v, VFUNARY0_FUNCT6, VFWCVT_X_F_V)
DEFINE_VFUNARY(vfwcvt_f_x_v, VFUNARY0_FUNCT6, VFWCVT_F_X_V)
DEFINE_VFUNARY(vfwcvt_f_xu_v, VFUNARY0_FUNCT6, VFWCVT_F_XU_V)
DEFINE_VFUNARY(vfwcvt_f_f_v, VFUNARY0_FUNCT6, VFWCVT_F_F_V)
DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
DEFINE_VFUNARY(vfncvt_x_f_w, VFUNARY0_FUNCT6, VFNCVT_X_F_W)
DEFINE_VFUNARY(vfncvt_xu_f_w, VFUNARY0_FUNCT6, VFNCVT_XU_F_W)
DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
......@@ -1303,6 +1316,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
void set(Register rd, int8_t sew, int8_t lmul) {
DCHECK_GE(sew, E8);
DCHECK_LE(sew, E64);
DCHECK_GE(lmul, m1);
DCHECK_LE(lmul, mf2);
set(rd, VSew(sew), Vlmul(lmul));
}
void set(RoundingMode mode) {
if (mode_ != mode) {
assm_->addi(kScratchReg, zero_reg, mode << kFcsrFrmShift);
......@@ -1741,6 +1762,18 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
RegList old_available_;
};
class LoadStoreLaneParams {
public:
int sz;
uint8_t laneidx;
LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
private:
LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
: sz(sz), laneidx(laneidx % lanes) {}
};
} // namespace internal
} // namespace v8
......
......@@ -736,6 +736,14 @@ enum Opcode : uint32_t {
RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift),
RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift),
VWMUL_FUNCT6 = 0b111011,
RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift),
RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift),
VWMULU_FUNCT6 = 0b111000,
RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift),
RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift),
VMULHSU_FUNCT6 = 0b100110,
RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
......@@ -744,6 +752,10 @@ enum Opcode : uint32_t {
RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift),
RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift),
VWADD_FUNCT6 = 0b110001,
RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift),
RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift),
VWADDU_FUNCT6 = 0b110000,
RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift),
RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift),
......@@ -752,6 +764,9 @@ enum Opcode : uint32_t {
RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
VCOMPRESS_FUNCT6 = 0b010111,
RO_V_VCOMPRESS_VV = OP_MVV | (VCOMPRESS_FUNCT6 << kRvvFunct6Shift),
VSADDU_FUNCT6 = 0b100000,
RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
......@@ -895,9 +910,13 @@ enum Opcode : uint32_t {
VWXUNARY0_FUNCT6 = 0b010000,
VRXUNARY0_FUNCT6 = 0b010000,
VMUNARY0_FUNCT6 = 0b010100,
RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift),
VID_V = 0b10001,
VXUNARY0_FUNCT6 = 0b010010,
RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift),
......@@ -927,7 +946,14 @@ enum Opcode : uint32_t {
VFCVT_X_F_V = 0b00001,
VFCVT_F_XU_V = 0b00010,
VFCVT_F_X_V = 0b00011,
VFWCVT_XU_F_V = 0b01000,
VFWCVT_X_F_V = 0b01001,
VFWCVT_F_XU_V = 0b01010,
VFWCVT_F_X_V = 0b01011,
VFWCVT_F_F_V = 0b01100,
VFNCVT_F_F_W = 0b10100,
VFNCVT_X_F_W = 0b10001,
VFNCVT_XU_F_W = 0b10000,
VFCLASS_V = 0b10000,
VFSQRT_V = 0b00000,
......@@ -1193,14 +1219,10 @@ enum FClassFlag {
V(E8) \
V(E16) \
V(E32) \
V(E64) \
V(E128) \
V(E256) \
V(E512) \
V(E1024)
V(E64)
enum VSew {
#define DEFINE_FLAG(name) name,
enum VSew {
RVV_SEW(DEFINE_FLAG)
#undef DEFINE_FLAG
};
......
......@@ -4028,6 +4028,64 @@ void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
vsll_vi(v0, v0, 1);
vmerge_vx(dst, kScratchReg, dst);
}
void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
MemOperand src) {
if (ts == 8) {
Lbu(kScratchReg2, src);
VU.set(kScratchReg, E64, m1);
li(kScratchReg, 0x1 << laneidx);
vmv_sx(v0, kScratchReg);
VU.set(kScratchReg, E8, m1);
vmerge_vx(dst, kScratchReg2, dst);
} else if (ts == 16) {
Lhu(kScratchReg2, src);
VU.set(kScratchReg, E16, m1);
li(kScratchReg, 0x1 << laneidx);
vmv_sx(v0, kScratchReg);
vmerge_vx(dst, kScratchReg2, dst);
} else if (ts == 32) {
Lwu(kScratchReg2, src);
VU.set(kScratchReg, E32, m1);
li(kScratchReg, 0x1 << laneidx);
vmv_sx(v0, kScratchReg);
vmerge_vx(dst, kScratchReg2, dst);
} else if (ts == 64) {
Ld(kScratchReg2, src);
VU.set(kScratchReg, E64, m1);
li(kScratchReg, 0x1 << laneidx);
vmv_sx(v0, kScratchReg);
vmerge_vx(dst, kScratchReg2, dst);
} else {
UNREACHABLE();
}
}
void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
MemOperand dst) {
if (sz == 8) {
VU.set(kScratchReg, E8, m1);
vslidedown_vi(kSimd128ScratchReg, src, laneidx);
vmv_xs(kScratchReg, kSimd128ScratchReg);
Sb(kScratchReg, dst);
} else if (sz == 16) {
VU.set(kScratchReg, E16, m1);
vslidedown_vi(kSimd128ScratchReg, src, laneidx);
vmv_xs(kScratchReg, kSimd128ScratchReg);
Sh(kScratchReg, dst);
} else if (sz == 32) {
VU.set(kScratchReg, E32, m1);
vslidedown_vi(kSimd128ScratchReg, src, laneidx);
vmv_xs(kScratchReg, kSimd128ScratchReg);
Sw(kScratchReg, dst);
} else {
DCHECK_EQ(sz, 64);
VU.set(kScratchReg, E64, m1);
vslidedown_vi(kSimd128ScratchReg, src, laneidx);
vmv_xs(kScratchReg, kSimd128ScratchReg);
Sd(kScratchReg, dst);
}
}
// -----------------------------------------------------------------------------
// Runtime calls.
......
......@@ -961,6 +961,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Vlmul lmul);
void WasmRvvS128const(VRegister dst, const uint8_t imms[16]);
void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
......
......@@ -55,6 +55,11 @@ namespace internal {
V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
#define UNALLOACTABLE_VECTOR_REGISTERS(V) \
V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \
V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
V(v24) V(v25)
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
V(ft9) V(ft10) V(ft11) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
......
......@@ -204,6 +204,10 @@ class OperandGenerator {
return sequence()->AddImmediate(Constant(immediate));
}
InstructionOperand UseImmediate64(int64_t immediate) {
return sequence()->AddImmediate(Constant(immediate));
}
InstructionOperand UseImmediate(Node* node) {
return sequence()->AddImmediate(ToConstant(node));
}
......
......@@ -263,7 +263,6 @@ namespace compiler {
V(RiscvI32x4GeU) \
V(RiscvI32x4Abs) \
V(RiscvI32x4BitMask) \
V(RiscvI32x4DotI16x8S) \
V(RiscvI32x4TruncSatF64x2SZero) \
V(RiscvI32x4TruncSatF64x2UZero) \
V(RiscvI16x8Splat) \
......@@ -329,6 +328,8 @@ namespace compiler {
V(RiscvS128Not) \
V(RiscvS128Select) \
V(RiscvS128AndNot) \
V(RiscvS128Load64Zero) \
V(RiscvS128Load32Zero) \
V(RiscvI32x4AllTrue) \
V(RiscvI16x8AllTrue) \
V(RiscvV128AnyTrue) \
......@@ -356,21 +357,13 @@ namespace compiler {
V(RiscvS8x16InterleaveEven) \
V(RiscvS8x16InterleaveOdd) \
V(RiscvI8x16Shuffle) \
V(RiscvI8x16Swizzle) \
V(RiscvS8x16Concat) \
V(RiscvS8x8Reverse) \
V(RiscvS8x4Reverse) \
V(RiscvS8x2Reverse) \
V(RiscvS128Load8Splat) \
V(RiscvS128Load16Splat) \
V(RiscvS128Load32Splat) \
V(RiscvS128Load64Splat) \
V(RiscvS128Load8x8S) \
V(RiscvS128Load8x8U) \
V(RiscvS128Load16x4S) \
V(RiscvS128Load16x4U) \
V(RiscvS128Load32x2S) \
V(RiscvS128Load32x2U) \
V(RiscvS128LoadSplat) \
V(RiscvS128Load64ExtendS) \
V(RiscvS128Load64ExtendU) \
V(RiscvS128LoadLane) \
V(RiscvS128StoreLane) \
V(RiscvRvvLd) \
......@@ -387,6 +380,15 @@ namespace compiler {
V(RiscvI16x8UConvertI8x16High) \
V(RiscvI8x16SConvertI16x8) \
V(RiscvI8x16UConvertI16x8) \
V(RiscvVwmul) \
V(RiscvVwmulu) \
V(RiscvVmvSx) \
V(RiscvVcompress) \
V(RiscvVaddVv) \
V(RiscvVwadd) \
V(RiscvVwaddu) \
V(RiscvVrgather) \
V(RiscvVslidedown) \
V(RiscvWord64AtomicLoadUint64) \
V(RiscvWord64AtomicStoreWord64) \
V(RiscvWord64AtomicAddUint64) \
......
......@@ -228,7 +228,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI32x4UConvertI16x8Low:
case kRiscvI32x4Abs:
case kRiscvI32x4BitMask:
case kRiscvI32x4DotI16x8S:
case kRiscvI8x16Add:
case kRiscvI8x16AddSatS:
case kRiscvI8x16AddSatU:
......@@ -287,6 +286,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS128Xor:
case kRiscvS128Const:
case kRiscvS128Zero:
case kRiscvS128Load32Zero:
case kRiscvS128Load64Zero:
case kRiscvS128AllOnes:
case kRiscvS16x8InterleaveEven:
case kRiscvS16x8InterleaveOdd:
......@@ -319,7 +320,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS8x4Reverse:
case kRiscvS8x8Reverse:
case kRiscvI8x16Shuffle:
case kRiscvI8x16Swizzle:
case kRiscvVwmul:
case kRiscvVwmulu:
case kRiscvVmvSx:
case kRiscvVcompress:
case kRiscvVaddVv:
case kRiscvVwadd:
case kRiscvVwaddu:
case kRiscvVrgather:
case kRiscvVslidedown:
case kRiscvSar32:
case kRiscvSignExtendByte:
case kRiscvSignExtendShort:
......@@ -361,16 +370,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvUlw:
case kRiscvUlwu:
case kRiscvULoadFloat:
case kRiscvS128Load8Splat:
case kRiscvS128Load16Splat:
case kRiscvS128Load32Splat:
case kRiscvS128Load64Splat:
case kRiscvS128Load8x8S:
case kRiscvS128Load8x8U:
case kRiscvS128Load16x4S:
case kRiscvS128Load16x4U:
case kRiscvS128Load32x2S:
case kRiscvS128Load32x2U:
case kRiscvS128LoadSplat:
case kRiscvS128Load64ExtendU:
case kRiscvS128Load64ExtendS:
case kRiscvS128LoadLane:
case kRiscvWord64AtomicLoadUint64:
case kRiscvLoadDecompressTaggedSigned:
......
......@@ -398,7 +398,7 @@ void Decoder::PrintRvcImm8B(Instruction* instr) {
void Decoder::PrintRvvVm(Instruction* instr) {
uint8_t imm = instr->RvvVM();
if (imm == 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " vm");
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " v0.t");
}
}
......@@ -2223,6 +2223,14 @@ void Decoder::DecodeRvvIVX(Instruction* instr) {
void Decoder::DecodeRvvMVV(Instruction* instr) {
DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
switch (instr->InstructionBits() & kVTypeMask) {
case RO_V_VMUNARY0: {
if (instr->Vs1Value() == VID_V) {
Format(instr, "vid.v 'rd, 'vs2'vm");
} else {
UNSUPPORTED_RISCV();
}
break;
}
case RO_V_VWXUNARY0:
if (instr->Vs1Value() == 0x0) {
Format(instr, "vmv.x.s 'rd, 'vs2");
......@@ -2259,6 +2267,12 @@ void Decoder::DecodeRvvMVV(Instruction* instr) {
UNSUPPORTED_RISCV();
}
break;
case RO_V_VWMUL_VV:
Format(instr, "vwmul.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VWMULU_VV:
Format(instr, "vwmulu.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VMUL_VV:
Format(instr, "vmul.vv 'vd, 'vs2, 'vs1'vm");
break;
......@@ -2274,6 +2288,12 @@ void Decoder::DecodeRvvMVV(Instruction* instr) {
case RO_V_VWADDU_VV:
Format(instr, "vwaddu.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VWADD_VV:
Format(instr, "vwadd.vv 'vd, 'vs2, 'vs1'vm");
break;
case RO_V_VCOMPRESS_VV:
Format(instr, "vcompress.vm 'vd, 'vs2, 'vs1'vm");
break;
default:
UNSUPPORTED_RISCV();
break;
......@@ -2290,6 +2310,12 @@ void Decoder::DecodeRvvMVX(Instruction* instr) {
UNSUPPORTED_RISCV();
}
break;
case RO_V_VWMUL_VX:
Format(instr, "vwmul.vx 'vd, 'vs2, 'rs1'vm");
break;
case RO_V_VWMULU_VX:
Format(instr, "vwmulu.vx 'vd, 'vs2, 'rs1'vm");
break;
case RO_V_VMUL_VX:
Format(instr, "vmul.vx 'vd, 'vs2, 'rs1'vm");
break;
......@@ -2305,6 +2331,12 @@ void Decoder::DecodeRvvMVX(Instruction* instr) {
case RO_V_VWADDUW_VX:
Format(instr, "vwaddu.wx 'vd, 'vs2, 'rs1'vm");
break;
case RO_V_VWADDU_VX:
Format(instr, "vwaddu.vx 'vd, 'vs2, 'rs1'vm");
break;
case RO_V_VWADD_VX:
Format(instr, "vwadd.vx 'vd, 'vs2, 'rs1'vm");
break;
default:
UNSUPPORTED_RISCV();
break;
......@@ -2325,12 +2357,33 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
case VFNCVT_F_F_W:
Format(instr, "vfncvt.f.f.w 'vd, 'vs2'vm");
break;
case VFNCVT_X_F_W:
Format(instr, "vfncvt.x.f.w 'vd, 'vs2'vm");
break;
case VFNCVT_XU_F_W:
Format(instr, "vfncvt.xu.f.w 'vd, 'vs2'vm");
break;
case VFCVT_F_X_V:
Format(instr, "vfcvt.f.x.v 'vd, 'vs2'vm");
break;
case VFCVT_F_XU_V:
Format(instr, "vfcvt.f.xu.v 'vd, 'vs2'vm");
break;
case VFWCVT_XU_F_V:
Format(instr, "vfwcvt.xu.f.v 'vd, 'vs2'vm");
break;
case VFWCVT_X_F_V:
Format(instr, "vfwcvt.x.f.v 'vd, 'vs2'vm");
break;
case VFWCVT_F_X_V:
Format(instr, "vfwcvt.f.x.v 'vd, 'vs2'vm");
break;
case VFWCVT_F_XU_V:
Format(instr, "vfwcvt.f.xu.v 'vd, 'vs2'vm");
break;
case VFWCVT_F_F_V:
Format(instr, "vfwcvt.f.f.v 'vd, 'vs2'vm");
break;
default:
UNSUPPORTED_RISCV();
break;
......
This diff is collapsed.
......@@ -396,7 +396,7 @@ class Simulator : public SimulatorBase {
if ((rvv_vtype() & 0b100) == 0) {
return static_cast<float>(0x1 << (rvv_vtype() & 0x7));
} else {
return 1.0 / static_cast<float>(0x1 << (4 - rvv_vtype() & 0x7));
return 1.0 / static_cast<float>(0x1 << (4 - rvv_vtype() & 0x3));
}
}
inline uint32_t rvv_vsew() const { return ((rvv_vtype() >> 3) & 0x7); }
......@@ -434,7 +434,7 @@ class Simulator : public SimulatorBase {
}
inline uint64_t rvv_vlmax() const {
if ((rvv_vlmul() & 0b100) != 0) {
return (rvv_vlen() / rvv_sew()) >> (rvv_vlmul() & 0b11);
return (rvv_vlen() / rvv_sew()) >> (4 - (rvv_vlmul() & 0b11));
} else {
return ((rvv_vlen() << rvv_vlmul()) / rvv_sew());
}
......
......@@ -2003,7 +2003,7 @@ TEST(RVV_VSETIVLI) {
HandleScope scope(isolate);
auto fn = [](MacroAssembler& assm) {
__ VU.set(t0, VSew::E8, Vlmul::m1);
__ vsetivli(t0, 16, VSew::E128, Vlmul::m1);
__ vsetivli(t0, 16, VSew::E64, Vlmul::m1);
};
GenAndRunTest(fn);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment