Commit 68bf491f authored by Liu Yu's avatar Liu Yu Committed by Commit Bot

[mips][wasm-simd] Prototype i64x2 widen i32x4 instructions

Port: 646bdbf8

Bug: v8:10972
Change-Id: I9b199dc75d0e759a768da55298af383ebeb30e90
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2632351
Auto-Submit: Liu yu <liuyu@loongson.cn>
Reviewed-by: 's avatarZhi An Ng <zhin@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72108}
parent d44ab87f
...@@ -2749,7 +2749,7 @@ void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); } ...@@ -2749,7 +2749,7 @@ void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
// && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_MIPS // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \ #if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
!V8_TARGET_ARCH_ARM !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_MIPS
// TODO(v8:10972) Prototype i64x2 widen i32x4. // TODO(v8:10972) Prototype i64x2 widen i32x4.
void InstructionSelector::VisitI64x2SConvertI32x4Low(Node* node) { void InstructionSelector::VisitI64x2SConvertI32x4Low(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
...@@ -2767,7 +2767,8 @@ void InstructionSelector::VisitI64x2UConvertI32x4High(Node* node) { ...@@ -2767,7 +2767,8 @@ void InstructionSelector::VisitI64x2UConvertI32x4High(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
#endif // !V8_TARGET_ARCH_ARM64 || !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 #endif // !V8_TARGET_ARCH_ARM64 || !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
// && !V8_TARGET_ARCH_ARM // && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64 &&
// !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_ARM64 #if !V8_TARGET_ARCH_ARM64
// TODO(v8:11168): Prototyping prefetch. // TODO(v8:11168): Prototyping prefetch.
......
...@@ -2295,6 +2295,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2295,6 +2295,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1)); i.InputSimd128Register(1));
break; break;
} }
case kMipsI64x2SConvertI32x4Low: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvr_w(kSimd128ScratchReg, src, src);
__ slli_d(dst, kSimd128ScratchReg, 32);
__ srai_d(dst, dst, 32);
break;
}
case kMipsI64x2SConvertI32x4High: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvl_w(kSimd128ScratchReg, src, src);
__ slli_d(dst, kSimd128ScratchReg, 32);
__ srai_d(dst, dst, 32);
break;
}
case kMipsI64x2UConvertI32x4Low: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI64x2UConvertI32x4High: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI64x2ExtMulLowI32x4S: case kMipsI64x2ExtMulLowI32x4S:
ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_s_d); ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_s_d);
break; break;
......
...@@ -169,6 +169,10 @@ namespace compiler { ...@@ -169,6 +169,10 @@ namespace compiler {
V(MipsI64x2ShrU) \ V(MipsI64x2ShrU) \
V(MipsI64x2BitMask) \ V(MipsI64x2BitMask) \
V(MipsI64x2Eq) \ V(MipsI64x2Eq) \
V(MipsI64x2SConvertI32x4Low) \
V(MipsI64x2SConvertI32x4High) \
V(MipsI64x2UConvertI32x4Low) \
V(MipsI64x2UConvertI32x4High) \
V(MipsI64x2ExtMulLowI32x4S) \ V(MipsI64x2ExtMulLowI32x4S) \
V(MipsI64x2ExtMulHighI32x4S) \ V(MipsI64x2ExtMulHighI32x4S) \
V(MipsI64x2ExtMulLowI32x4U) \ V(MipsI64x2ExtMulLowI32x4U) \
......
...@@ -72,6 +72,10 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -72,6 +72,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI64x2ShrU: case kMipsI64x2ShrU:
case kMipsI64x2BitMask: case kMipsI64x2BitMask:
case kMipsI64x2Eq: case kMipsI64x2Eq:
case kMipsI64x2SConvertI32x4Low:
case kMipsI64x2SConvertI32x4High:
case kMipsI64x2UConvertI32x4Low:
case kMipsI64x2UConvertI32x4High:
case kMipsI64x2ExtMulLowI32x4S: case kMipsI64x2ExtMulLowI32x4S:
case kMipsI64x2ExtMulHighI32x4S: case kMipsI64x2ExtMulHighI32x4S:
case kMipsI64x2ExtMulLowI32x4U: case kMipsI64x2ExtMulLowI32x4U:
......
...@@ -2172,107 +2172,111 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { ...@@ -2172,107 +2172,111 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16ShrS) \ V(I8x16ShrS) \
V(I8x16ShrU) V(I8x16ShrU)
#define SIMD_BINOP_LIST(V) \ #define SIMD_BINOP_LIST(V) \
V(F64x2Add, kMipsF64x2Add) \ V(F64x2Add, kMipsF64x2Add) \
V(F64x2Sub, kMipsF64x2Sub) \ V(F64x2Sub, kMipsF64x2Sub) \
V(F64x2Mul, kMipsF64x2Mul) \ V(F64x2Mul, kMipsF64x2Mul) \
V(F64x2Div, kMipsF64x2Div) \ V(F64x2Div, kMipsF64x2Div) \
V(F64x2Min, kMipsF64x2Min) \ V(F64x2Min, kMipsF64x2Min) \
V(F64x2Max, kMipsF64x2Max) \ V(F64x2Max, kMipsF64x2Max) \
V(F64x2Eq, kMipsF64x2Eq) \ V(F64x2Eq, kMipsF64x2Eq) \
V(F64x2Ne, kMipsF64x2Ne) \ V(F64x2Ne, kMipsF64x2Ne) \
V(F64x2Lt, kMipsF64x2Lt) \ V(F64x2Lt, kMipsF64x2Lt) \
V(F64x2Le, kMipsF64x2Le) \ V(F64x2Le, kMipsF64x2Le) \
V(I64x2Eq, kMipsI64x2Eq) \ V(I64x2Eq, kMipsI64x2Eq) \
V(I64x2Add, kMipsI64x2Add) \ V(I64x2Add, kMipsI64x2Add) \
V(I64x2Sub, kMipsI64x2Sub) \ V(I64x2Sub, kMipsI64x2Sub) \
V(I64x2Mul, kMipsI64x2Mul) \ V(I64x2Mul, kMipsI64x2Mul) \
V(I64x2ExtMulLowI32x4S, kMipsI64x2ExtMulLowI32x4S) \ V(I64x2SConvertI32x4Low, kMipsI64x2SConvertI32x4Low) \
V(I64x2ExtMulHighI32x4S, kMipsI64x2ExtMulHighI32x4S) \ V(I64x2SConvertI32x4High, kMipsI64x2SConvertI32x4High) \
V(I64x2ExtMulLowI32x4U, kMipsI64x2ExtMulLowI32x4U) \ V(I64x2UConvertI32x4Low, kMipsI64x2UConvertI32x4Low) \
V(I64x2ExtMulHighI32x4U, kMipsI64x2ExtMulHighI32x4U) \ V(I64x2UConvertI32x4High, kMipsI64x2UConvertI32x4High) \
V(F32x4Add, kMipsF32x4Add) \ V(I64x2ExtMulLowI32x4S, kMipsI64x2ExtMulLowI32x4S) \
V(F32x4AddHoriz, kMipsF32x4AddHoriz) \ V(I64x2ExtMulHighI32x4S, kMipsI64x2ExtMulHighI32x4S) \
V(F32x4Sub, kMipsF32x4Sub) \ V(I64x2ExtMulLowI32x4U, kMipsI64x2ExtMulLowI32x4U) \
V(F32x4Mul, kMipsF32x4Mul) \ V(I64x2ExtMulHighI32x4U, kMipsI64x2ExtMulHighI32x4U) \
V(F32x4Div, kMipsF32x4Div) \ V(F32x4Add, kMipsF32x4Add) \
V(F32x4Max, kMipsF32x4Max) \ V(F32x4AddHoriz, kMipsF32x4AddHoriz) \
V(F32x4Min, kMipsF32x4Min) \ V(F32x4Sub, kMipsF32x4Sub) \
V(F32x4Eq, kMipsF32x4Eq) \ V(F32x4Mul, kMipsF32x4Mul) \
V(F32x4Ne, kMipsF32x4Ne) \ V(F32x4Div, kMipsF32x4Div) \
V(F32x4Lt, kMipsF32x4Lt) \ V(F32x4Max, kMipsF32x4Max) \
V(F32x4Le, kMipsF32x4Le) \ V(F32x4Min, kMipsF32x4Min) \
V(I32x4Add, kMipsI32x4Add) \ V(F32x4Eq, kMipsF32x4Eq) \
V(I32x4AddHoriz, kMipsI32x4AddHoriz) \ V(F32x4Ne, kMipsF32x4Ne) \
V(I32x4Sub, kMipsI32x4Sub) \ V(F32x4Lt, kMipsF32x4Lt) \
V(I32x4Mul, kMipsI32x4Mul) \ V(F32x4Le, kMipsF32x4Le) \
V(I32x4MaxS, kMipsI32x4MaxS) \ V(I32x4Add, kMipsI32x4Add) \
V(I32x4MinS, kMipsI32x4MinS) \ V(I32x4AddHoriz, kMipsI32x4AddHoriz) \
V(I32x4MaxU, kMipsI32x4MaxU) \ V(I32x4Sub, kMipsI32x4Sub) \
V(I32x4MinU, kMipsI32x4MinU) \ V(I32x4Mul, kMipsI32x4Mul) \
V(I32x4Eq, kMipsI32x4Eq) \ V(I32x4MaxS, kMipsI32x4MaxS) \
V(I32x4Ne, kMipsI32x4Ne) \ V(I32x4MinS, kMipsI32x4MinS) \
V(I32x4GtS, kMipsI32x4GtS) \ V(I32x4MaxU, kMipsI32x4MaxU) \
V(I32x4GeS, kMipsI32x4GeS) \ V(I32x4MinU, kMipsI32x4MinU) \
V(I32x4GtU, kMipsI32x4GtU) \ V(I32x4Eq, kMipsI32x4Eq) \
V(I32x4GeU, kMipsI32x4GeU) \ V(I32x4Ne, kMipsI32x4Ne) \
V(I32x4Abs, kMipsI32x4Abs) \ V(I32x4GtS, kMipsI32x4GtS) \
V(I32x4DotI16x8S, kMipsI32x4DotI16x8S) \ V(I32x4GeS, kMipsI32x4GeS) \
V(I32x4ExtMulLowI16x8S, kMipsI32x4ExtMulLowI16x8S) \ V(I32x4GtU, kMipsI32x4GtU) \
V(I32x4ExtMulHighI16x8S, kMipsI32x4ExtMulHighI16x8S) \ V(I32x4GeU, kMipsI32x4GeU) \
V(I32x4ExtMulLowI16x8U, kMipsI32x4ExtMulLowI16x8U) \ V(I32x4Abs, kMipsI32x4Abs) \
V(I32x4ExtMulHighI16x8U, kMipsI32x4ExtMulHighI16x8U) \ V(I32x4DotI16x8S, kMipsI32x4DotI16x8S) \
V(I16x8Add, kMipsI16x8Add) \ V(I32x4ExtMulLowI16x8S, kMipsI32x4ExtMulLowI16x8S) \
V(I16x8AddSatS, kMipsI16x8AddSatS) \ V(I32x4ExtMulHighI16x8S, kMipsI32x4ExtMulHighI16x8S) \
V(I16x8AddSatU, kMipsI16x8AddSatU) \ V(I32x4ExtMulLowI16x8U, kMipsI32x4ExtMulLowI16x8U) \
V(I16x8AddHoriz, kMipsI16x8AddHoriz) \ V(I32x4ExtMulHighI16x8U, kMipsI32x4ExtMulHighI16x8U) \
V(I16x8Sub, kMipsI16x8Sub) \ V(I16x8Add, kMipsI16x8Add) \
V(I16x8SubSatS, kMipsI16x8SubSatS) \ V(I16x8AddSatS, kMipsI16x8AddSatS) \
V(I16x8SubSatU, kMipsI16x8SubSatU) \ V(I16x8AddSatU, kMipsI16x8AddSatU) \
V(I16x8Mul, kMipsI16x8Mul) \ V(I16x8AddHoriz, kMipsI16x8AddHoriz) \
V(I16x8MaxS, kMipsI16x8MaxS) \ V(I16x8Sub, kMipsI16x8Sub) \
V(I16x8MinS, kMipsI16x8MinS) \ V(I16x8SubSatS, kMipsI16x8SubSatS) \
V(I16x8MaxU, kMipsI16x8MaxU) \ V(I16x8SubSatU, kMipsI16x8SubSatU) \
V(I16x8MinU, kMipsI16x8MinU) \ V(I16x8Mul, kMipsI16x8Mul) \
V(I16x8Eq, kMipsI16x8Eq) \ V(I16x8MaxS, kMipsI16x8MaxS) \
V(I16x8Ne, kMipsI16x8Ne) \ V(I16x8MinS, kMipsI16x8MinS) \
V(I16x8GtS, kMipsI16x8GtS) \ V(I16x8MaxU, kMipsI16x8MaxU) \
V(I16x8GeS, kMipsI16x8GeS) \ V(I16x8MinU, kMipsI16x8MinU) \
V(I16x8GtU, kMipsI16x8GtU) \ V(I16x8Eq, kMipsI16x8Eq) \
V(I16x8GeU, kMipsI16x8GeU) \ V(I16x8Ne, kMipsI16x8Ne) \
V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4) \ V(I16x8GtS, kMipsI16x8GtS) \
V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \ V(I16x8GeS, kMipsI16x8GeS) \
V(I16x8Q15MulRSatS, kMipsI16x8Q15MulRSatS) \ V(I16x8GtU, kMipsI16x8GtU) \
V(I16x8ExtMulLowI8x16S, kMipsI16x8ExtMulLowI8x16S) \ V(I16x8GeU, kMipsI16x8GeU) \
V(I16x8ExtMulHighI8x16S, kMipsI16x8ExtMulHighI8x16S) \ V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4) \
V(I16x8ExtMulLowI8x16U, kMipsI16x8ExtMulLowI8x16U) \ V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \
V(I16x8ExtMulHighI8x16U, kMipsI16x8ExtMulHighI8x16U) \ V(I16x8Q15MulRSatS, kMipsI16x8Q15MulRSatS) \
V(I16x8RoundingAverageU, kMipsI16x8RoundingAverageU) \ V(I16x8ExtMulLowI8x16S, kMipsI16x8ExtMulLowI8x16S) \
V(I16x8Abs, kMipsI16x8Abs) \ V(I16x8ExtMulHighI8x16S, kMipsI16x8ExtMulHighI8x16S) \
V(I8x16Add, kMipsI8x16Add) \ V(I16x8ExtMulLowI8x16U, kMipsI16x8ExtMulLowI8x16U) \
V(I8x16AddSatS, kMipsI8x16AddSatS) \ V(I16x8ExtMulHighI8x16U, kMipsI16x8ExtMulHighI8x16U) \
V(I8x16AddSatU, kMipsI8x16AddSatU) \ V(I16x8RoundingAverageU, kMipsI16x8RoundingAverageU) \
V(I8x16Sub, kMipsI8x16Sub) \ V(I16x8Abs, kMipsI16x8Abs) \
V(I8x16SubSatS, kMipsI8x16SubSatS) \ V(I8x16Add, kMipsI8x16Add) \
V(I8x16SubSatU, kMipsI8x16SubSatU) \ V(I8x16AddSatS, kMipsI8x16AddSatS) \
V(I8x16Mul, kMipsI8x16Mul) \ V(I8x16AddSatU, kMipsI8x16AddSatU) \
V(I8x16MaxS, kMipsI8x16MaxS) \ V(I8x16Sub, kMipsI8x16Sub) \
V(I8x16MinS, kMipsI8x16MinS) \ V(I8x16SubSatS, kMipsI8x16SubSatS) \
V(I8x16MaxU, kMipsI8x16MaxU) \ V(I8x16SubSatU, kMipsI8x16SubSatU) \
V(I8x16MinU, kMipsI8x16MinU) \ V(I8x16Mul, kMipsI8x16Mul) \
V(I8x16Eq, kMipsI8x16Eq) \ V(I8x16MaxS, kMipsI8x16MaxS) \
V(I8x16Ne, kMipsI8x16Ne) \ V(I8x16MinS, kMipsI8x16MinS) \
V(I8x16GtS, kMipsI8x16GtS) \ V(I8x16MaxU, kMipsI8x16MaxU) \
V(I8x16GeS, kMipsI8x16GeS) \ V(I8x16MinU, kMipsI8x16MinU) \
V(I8x16GtU, kMipsI8x16GtU) \ V(I8x16Eq, kMipsI8x16Eq) \
V(I8x16GeU, kMipsI8x16GeU) \ V(I8x16Ne, kMipsI8x16Ne) \
V(I8x16RoundingAverageU, kMipsI8x16RoundingAverageU) \ V(I8x16GtS, kMipsI8x16GtS) \
V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \ V(I8x16GeS, kMipsI8x16GeS) \
V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \ V(I8x16GtU, kMipsI8x16GtU) \
V(I8x16Abs, kMipsI8x16Abs) \ V(I8x16GeU, kMipsI8x16GeU) \
V(S128And, kMipsS128And) \ V(I8x16RoundingAverageU, kMipsI8x16RoundingAverageU) \
V(S128Or, kMipsS128Or) \ V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \
V(S128Xor, kMipsS128Xor) \ V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \
V(I8x16Abs, kMipsI8x16Abs) \
V(S128And, kMipsS128And) \
V(S128Or, kMipsS128Or) \
V(S128Xor, kMipsS128Xor) \
V(S128AndNot, kMipsS128AndNot) V(S128AndNot, kMipsS128AndNot)
void InstructionSelector::VisitS128Const(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitS128Const(Node* node) { UNIMPLEMENTED(); }
......
...@@ -2441,6 +2441,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2441,6 +2441,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1)); i.InputSimd128Register(1));
break; break;
} }
case kMips64I64x2SConvertI32x4Low: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvr_w(kSimd128ScratchReg, src, src);
__ slli_d(dst, kSimd128ScratchReg, 32);
__ srai_d(dst, dst, 32);
break;
}
case kMips64I64x2SConvertI32x4High: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvl_w(kSimd128ScratchReg, src, src);
__ slli_d(dst, kSimd128ScratchReg, 32);
__ srai_d(dst, dst, 32);
break;
}
case kMips64I64x2UConvertI32x4Low: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I64x2UConvertI32x4High: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64ExtMulLow: { case kMips64ExtMulLow: {
auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode())); auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
__ ExtMulLow(dt, i.OutputSimd128Register(), i.InputSimd128Register(0), __ ExtMulLow(dt, i.OutputSimd128Register(), i.InputSimd128Register(0),
......
...@@ -222,6 +222,10 @@ namespace compiler { ...@@ -222,6 +222,10 @@ namespace compiler {
V(Mips64I64x2ShrU) \ V(Mips64I64x2ShrU) \
V(Mips64I64x2BitMask) \ V(Mips64I64x2BitMask) \
V(Mips64I64x2Eq) \ V(Mips64I64x2Eq) \
V(Mips64I64x2SConvertI32x4Low) \
V(Mips64I64x2SConvertI32x4High) \
V(Mips64I64x2UConvertI32x4Low) \
V(Mips64I64x2UConvertI32x4High) \
V(Mips64ExtMulLow) \ V(Mips64ExtMulLow) \
V(Mips64ExtMulHigh) \ V(Mips64ExtMulHigh) \
V(Mips64F32x4Abs) \ V(Mips64F32x4Abs) \
......
...@@ -100,6 +100,10 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -100,6 +100,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I64x2ShrU: case kMips64I64x2ShrU:
case kMips64I64x2BitMask: case kMips64I64x2BitMask:
case kMips64I64x2Eq: case kMips64I64x2Eq:
case kMips64I64x2SConvertI32x4Low:
case kMips64I64x2SConvertI32x4High:
case kMips64I64x2UConvertI32x4Low:
case kMips64I64x2UConvertI32x4High:
case kMips64ExtMulLow: case kMips64ExtMulLow:
case kMips64ExtMulHigh: case kMips64ExtMulHigh:
case kMips64F32x4Abs: case kMips64F32x4Abs:
......
...@@ -2895,6 +2895,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { ...@@ -2895,6 +2895,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Floor, kMips64F32x4Floor) \ V(F32x4Floor, kMips64F32x4Floor) \
V(F32x4Trunc, kMips64F32x4Trunc) \ V(F32x4Trunc, kMips64F32x4Trunc) \
V(F32x4NearestInt, kMips64F32x4NearestInt) \ V(F32x4NearestInt, kMips64F32x4NearestInt) \
V(I64x2SConvertI32x4Low, kMips64I64x2SConvertI32x4Low) \
V(I64x2SConvertI32x4High, kMips64I64x2SConvertI32x4High) \
V(I64x2UConvertI32x4Low, kMips64I64x2UConvertI32x4Low) \
V(I64x2UConvertI32x4High, kMips64I64x2UConvertI32x4High) \
V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \ V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \ V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
V(I32x4Neg, kMips64I32x4Neg) \ V(I32x4Neg, kMips64I32x4Neg) \
......
...@@ -1807,7 +1807,7 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) { ...@@ -1807,7 +1807,7 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
// TODO(v8:10972) Prototyping i64x2 convert from i32x4. // TODO(v8:10972) Prototyping i64x2 convert from i32x4.
// Tests both signed and unsigned conversion from I32x4 (unpacking). // Tests both signed and unsigned conversion from I32x4 (unpacking).
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || \ #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || \
V8_TARGET_ARCH_ARM V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) { WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
FLAG_SCOPE(wasm_simd_post_mvp); FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
...@@ -1844,7 +1844,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) { ...@@ -1844,7 +1844,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
} }
} }
#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || #endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 ||
// V8_TARGET_ARCH_ARM // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd, void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int32UnOp expected_op) { WasmOpcode opcode, Int32UnOp expected_op) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment