Commit f0485efb authored by Dusan Simicic's avatar Dusan Simicic Committed by Commit Bot

MIPS[64]: Implement convert SIMD operations

Add support for I32x4SConvertI16x8Low, I32x4SConvertI16x8High,
I32x4UConvertI16x8Low, I32x4UConvertI16x8High, I16x8SConvertI8x16Low,
I16x8SConvertI8x16High,I16x8SConvertI32x4, I16x8UConvertI32x4,
I16x8UConvertI8x16Low, I16x8UConvertI8x16High, I8x16SConvertI16x8,
I8x16UConvertI16x8 operations for mips32 and mips64 architectures.

Bug: 
Change-Id: I32f24956fc8e3c7df7f525bf0d4518161493a3ed
Reviewed-on: https://chromium-review.googlesource.com/517500
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Reviewed-by: 's avatarBenedikt Meurer <bmeurer@chromium.org>
Reviewed-by: 's avatarMircea Trofin <mtrofin@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarMiran Karić <Miran.Karic@imgtec.com>
Reviewed-by: 's avatarIvica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Cr-Commit-Position: refs/heads/master@{#46260}
parent ab028038
......@@ -2154,7 +2154,8 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}
......@@ -2182,7 +2183,8 @@ void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
......@@ -2262,7 +2264,8 @@ void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
......@@ -2274,7 +2277,8 @@ void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
......@@ -2304,11 +2308,13 @@ void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
......@@ -2346,11 +2352,13 @@ void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
......
......@@ -2568,6 +2568,110 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
break;
}
case kMipsI32x4SConvertI16x8Low: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvr_h(kSimd128ScratchReg, src, src);
__ slli_w(dst, kSimd128ScratchReg, 16);
__ srai_w(dst, dst, 16);
break;
}
case kMipsI32x4SConvertI16x8High: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvl_h(kSimd128ScratchReg, src, src);
__ slli_w(dst, kSimd128ScratchReg, 16);
__ srai_w(dst, dst, 16);
break;
}
case kMipsI32x4UConvertI16x8Low: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI32x4UConvertI16x8High: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI16x8SConvertI8x16Low: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvr_b(kSimd128ScratchReg, src, src);
__ slli_h(dst, kSimd128ScratchReg, 8);
__ srai_h(dst, dst, 8);
break;
}
case kMipsI16x8SConvertI8x16High: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvl_b(kSimd128ScratchReg, src, src);
__ slli_h(dst, kSimd128ScratchReg, 8);
__ srai_h(dst, dst, 8);
break;
}
case kMipsI16x8SConvertI32x4: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
__ sat_s_w(kSimd128ScratchReg, src0, 15);
__ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
__ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
break;
}
case kMipsI16x8UConvertI32x4: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
__ sat_u_w(kSimd128ScratchReg, src0, 15);
__ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
__ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
break;
}
case kMipsI16x8UConvertI8x16Low: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI16x8UConvertI8x16High: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI8x16SConvertI16x8: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
__ sat_s_h(kSimd128ScratchReg, src0, 7);
__ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
__ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
break;
}
case kMipsI8x16UConvertI16x8: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
__ sat_u_h(kSimd128ScratchReg, src0, 7);
__ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
__ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
break;
}
}
return kSuccess;
} // NOLINT(readability/fn_size)
......
......@@ -260,7 +260,19 @@ namespace compiler {
V(MipsS8x4Reverse) \
V(MipsS8x2Reverse) \
V(MipsMsaLd) \
V(MipsMsaSt)
V(MipsMsaSt) \
V(MipsI32x4SConvertI16x8Low) \
V(MipsI32x4SConvertI16x8High) \
V(MipsI32x4UConvertI16x8Low) \
V(MipsI32x4UConvertI16x8High) \
V(MipsI16x8SConvertI8x16Low) \
V(MipsI16x8SConvertI8x16High) \
V(MipsI16x8SConvertI32x4) \
V(MipsI16x8UConvertI32x4) \
V(MipsI16x8UConvertI8x16Low) \
V(MipsI16x8UConvertI8x16High) \
V(MipsI8x16SConvertI16x8) \
V(MipsI8x16UConvertI16x8)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -1945,24 +1945,32 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(16x8) \
V(8x16)
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
V(F32x4Abs, kMipsF32x4Abs) \
V(F32x4Neg, kMipsF32x4Neg) \
V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
V(I32x4Neg, kMipsI32x4Neg) \
V(I16x8Neg, kMipsI16x8Neg) \
V(I8x16Neg, kMipsI8x16Neg) \
V(S128Not, kMipsS128Not) \
V(S1x4AnyTrue, kMipsS1x4AnyTrue) \
V(S1x4AllTrue, kMipsS1x4AllTrue) \
V(S1x8AnyTrue, kMipsS1x8AnyTrue) \
V(S1x8AllTrue, kMipsS1x8AllTrue) \
V(S1x16AnyTrue, kMipsS1x16AnyTrue) \
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
V(F32x4Abs, kMipsF32x4Abs) \
V(F32x4Neg, kMipsF32x4Neg) \
V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
V(I32x4Neg, kMipsI32x4Neg) \
V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
V(I16x8Neg, kMipsI16x8Neg) \
V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
V(I8x16Neg, kMipsI8x16Neg) \
V(S128Not, kMipsS128Not) \
V(S1x4AnyTrue, kMipsS1x4AnyTrue) \
V(S1x4AllTrue, kMipsS1x4AllTrue) \
V(S1x8AnyTrue, kMipsS1x8AnyTrue) \
V(S1x8AllTrue, kMipsS1x8AllTrue) \
V(S1x16AnyTrue, kMipsS1x16AnyTrue) \
V(S1x16AllTrue, kMipsS1x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
......@@ -1976,65 +1984,69 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16ShrS) \
V(I8x16ShrU)
#define SIMD_BINOP_LIST(V) \
V(F32x4Add, kMipsF32x4Add) \
V(F32x4Sub, kMipsF32x4Sub) \
V(F32x4Mul, kMipsF32x4Mul) \
V(F32x4Max, kMipsF32x4Max) \
V(F32x4Min, kMipsF32x4Min) \
V(F32x4Eq, kMipsF32x4Eq) \
V(F32x4Ne, kMipsF32x4Ne) \
V(F32x4Lt, kMipsF32x4Lt) \
V(F32x4Le, kMipsF32x4Le) \
V(I32x4Add, kMipsI32x4Add) \
V(I32x4Sub, kMipsI32x4Sub) \
V(I32x4Mul, kMipsI32x4Mul) \
V(I32x4MaxS, kMipsI32x4MaxS) \
V(I32x4MinS, kMipsI32x4MinS) \
V(I32x4MaxU, kMipsI32x4MaxU) \
V(I32x4MinU, kMipsI32x4MinU) \
V(I32x4Eq, kMipsI32x4Eq) \
V(I32x4Ne, kMipsI32x4Ne) \
V(I32x4GtS, kMipsI32x4GtS) \
V(I32x4GeS, kMipsI32x4GeS) \
V(I32x4GtU, kMipsI32x4GtU) \
V(I32x4GeU, kMipsI32x4GeU) \
V(I16x8Add, kMipsI16x8Add) \
V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
V(I16x8Sub, kMipsI16x8Sub) \
V(I16x8SubSaturateS, kMipsI16x8SubSaturateS) \
V(I16x8SubSaturateU, kMipsI16x8SubSaturateU) \
V(I16x8Mul, kMipsI16x8Mul) \
V(I16x8MaxS, kMipsI16x8MaxS) \
V(I16x8MinS, kMipsI16x8MinS) \
V(I16x8MaxU, kMipsI16x8MaxU) \
V(I16x8MinU, kMipsI16x8MinU) \
V(I16x8Eq, kMipsI16x8Eq) \
V(I16x8Ne, kMipsI16x8Ne) \
V(I16x8GtS, kMipsI16x8GtS) \
V(I16x8GeS, kMipsI16x8GeS) \
V(I16x8GtU, kMipsI16x8GtU) \
V(I16x8GeU, kMipsI16x8GeU) \
V(I8x16Add, kMipsI8x16Add) \
V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
V(I8x16Sub, kMipsI8x16Sub) \
V(I8x16SubSaturateS, kMipsI8x16SubSaturateS) \
V(I8x16SubSaturateU, kMipsI8x16SubSaturateU) \
V(I8x16Mul, kMipsI8x16Mul) \
V(I8x16MaxS, kMipsI8x16MaxS) \
V(I8x16MinS, kMipsI8x16MinS) \
V(I8x16MaxU, kMipsI8x16MaxU) \
V(I8x16MinU, kMipsI8x16MinU) \
V(I8x16Eq, kMipsI8x16Eq) \
V(I8x16Ne, kMipsI8x16Ne) \
V(I8x16GtS, kMipsI8x16GtS) \
V(I8x16GeS, kMipsI8x16GeS) \
V(I8x16GtU, kMipsI8x16GtU) \
V(I8x16GeU, kMipsI8x16GeU) \
V(S128And, kMipsS128And) \
V(S128Or, kMipsS128Or) \
#define SIMD_BINOP_LIST(V) \
V(F32x4Add, kMipsF32x4Add) \
V(F32x4Sub, kMipsF32x4Sub) \
V(F32x4Mul, kMipsF32x4Mul) \
V(F32x4Max, kMipsF32x4Max) \
V(F32x4Min, kMipsF32x4Min) \
V(F32x4Eq, kMipsF32x4Eq) \
V(F32x4Ne, kMipsF32x4Ne) \
V(F32x4Lt, kMipsF32x4Lt) \
V(F32x4Le, kMipsF32x4Le) \
V(I32x4Add, kMipsI32x4Add) \
V(I32x4Sub, kMipsI32x4Sub) \
V(I32x4Mul, kMipsI32x4Mul) \
V(I32x4MaxS, kMipsI32x4MaxS) \
V(I32x4MinS, kMipsI32x4MinS) \
V(I32x4MaxU, kMipsI32x4MaxU) \
V(I32x4MinU, kMipsI32x4MinU) \
V(I32x4Eq, kMipsI32x4Eq) \
V(I32x4Ne, kMipsI32x4Ne) \
V(I32x4GtS, kMipsI32x4GtS) \
V(I32x4GeS, kMipsI32x4GeS) \
V(I32x4GtU, kMipsI32x4GtU) \
V(I32x4GeU, kMipsI32x4GeU) \
V(I16x8Add, kMipsI16x8Add) \
V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
V(I16x8Sub, kMipsI16x8Sub) \
V(I16x8SubSaturateS, kMipsI16x8SubSaturateS) \
V(I16x8SubSaturateU, kMipsI16x8SubSaturateU) \
V(I16x8Mul, kMipsI16x8Mul) \
V(I16x8MaxS, kMipsI16x8MaxS) \
V(I16x8MinS, kMipsI16x8MinS) \
V(I16x8MaxU, kMipsI16x8MaxU) \
V(I16x8MinU, kMipsI16x8MinU) \
V(I16x8Eq, kMipsI16x8Eq) \
V(I16x8Ne, kMipsI16x8Ne) \
V(I16x8GtS, kMipsI16x8GtS) \
V(I16x8GeS, kMipsI16x8GeS) \
V(I16x8GtU, kMipsI16x8GtU) \
V(I16x8GeU, kMipsI16x8GeU) \
V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4) \
V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \
V(I8x16Add, kMipsI8x16Add) \
V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
V(I8x16Sub, kMipsI8x16Sub) \
V(I8x16SubSaturateS, kMipsI8x16SubSaturateS) \
V(I8x16SubSaturateU, kMipsI8x16SubSaturateU) \
V(I8x16Mul, kMipsI8x16Mul) \
V(I8x16MaxS, kMipsI8x16MaxS) \
V(I8x16MinS, kMipsI8x16MinS) \
V(I8x16MaxU, kMipsI8x16MaxU) \
V(I8x16MinU, kMipsI8x16MinU) \
V(I8x16Eq, kMipsI8x16Eq) \
V(I8x16Ne, kMipsI8x16Ne) \
V(I8x16GtS, kMipsI8x16GtS) \
V(I8x16GeS, kMipsI8x16GeS) \
V(I8x16GtU, kMipsI8x16GtU) \
V(I8x16GeU, kMipsI8x16GeU) \
V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \
V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \
V(S128And, kMipsS128And) \
V(S128Or, kMipsS128Or) \
V(S128Xor, kMipsS128Xor)
void InstructionSelector::VisitS128Zero(Node* node) {
......
......@@ -2887,6 +2887,110 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
break;
}
case kMips64I32x4SConvertI16x8Low: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvr_h(kSimd128ScratchReg, src, src);
__ slli_w(dst, kSimd128ScratchReg, 16);
__ srai_w(dst, dst, 16);
break;
}
case kMips64I32x4SConvertI16x8High: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvl_h(kSimd128ScratchReg, src, src);
__ slli_w(dst, kSimd128ScratchReg, 16);
__ srai_w(dst, dst, 16);
break;
}
case kMips64I32x4UConvertI16x8Low: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I32x4UConvertI16x8High: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I16x8SConvertI8x16Low: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvr_b(kSimd128ScratchReg, src, src);
__ slli_h(dst, kSimd128ScratchReg, 8);
__ srai_h(dst, dst, 8);
break;
}
case kMips64I16x8SConvertI8x16High: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(0);
__ ilvl_b(kSimd128ScratchReg, src, src);
__ slli_h(dst, kSimd128ScratchReg, 8);
__ srai_h(dst, dst, 8);
break;
}
case kMips64I16x8SConvertI32x4: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
__ sat_s_w(kSimd128ScratchReg, src0, 15);
__ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
__ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
break;
}
case kMips64I16x8UConvertI32x4: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
__ sat_u_w(kSimd128ScratchReg, src0, 15);
__ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
__ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
break;
}
case kMips64I16x8UConvertI8x16Low: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I16x8UConvertI8x16High: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I8x16SConvertI16x8: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
__ sat_s_h(kSimd128ScratchReg, src0, 7);
__ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
__ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
break;
}
case kMips64I8x16UConvertI16x8: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
__ sat_u_h(kSimd128ScratchReg, src0, 7);
__ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
__ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
break;
}
}
return kSuccess;
} // NOLINT(readability/fn_size)
......
......@@ -294,7 +294,19 @@ namespace compiler {
V(Mips64S8x4Reverse) \
V(Mips64S8x2Reverse) \
V(Mips64MsaLd) \
V(Mips64MsaSt)
V(Mips64MsaSt) \
V(Mips64I32x4SConvertI16x8Low) \
V(Mips64I32x4SConvertI16x8High) \
V(Mips64I32x4UConvertI16x8Low) \
V(Mips64I32x4UConvertI16x8High) \
V(Mips64I16x8SConvertI8x16Low) \
V(Mips64I16x8SConvertI8x16High) \
V(Mips64I16x8SConvertI32x4) \
V(Mips64I16x8UConvertI32x4) \
V(Mips64I16x8UConvertI8x16Low) \
V(Mips64I16x8UConvertI8x16High) \
V(Mips64I8x16SConvertI16x8) \
V(Mips64I8x16UConvertI16x8)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -2694,24 +2694,32 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(16x8) \
V(8x16)
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
V(F32x4Abs, kMips64F32x4Abs) \
V(F32x4Neg, kMips64F32x4Neg) \
V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
V(I32x4Neg, kMips64I32x4Neg) \
V(I16x8Neg, kMips64I16x8Neg) \
V(I8x16Neg, kMips64I8x16Neg) \
V(S128Not, kMips64S128Not) \
V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
V(S1x4AllTrue, kMips64S1x4AllTrue) \
V(S1x8AnyTrue, kMips64S1x8AnyTrue) \
V(S1x8AllTrue, kMips64S1x8AllTrue) \
V(S1x16AnyTrue, kMips64S1x16AnyTrue) \
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
V(F32x4Abs, kMips64F32x4Abs) \
V(F32x4Neg, kMips64F32x4Neg) \
V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
V(I32x4Neg, kMips64I32x4Neg) \
V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
V(I16x8Neg, kMips64I16x8Neg) \
V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
V(I8x16Neg, kMips64I8x16Neg) \
V(S128Not, kMips64S128Not) \
V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
V(S1x4AllTrue, kMips64S1x4AllTrue) \
V(S1x8AnyTrue, kMips64S1x8AnyTrue) \
V(S1x8AllTrue, kMips64S1x8AllTrue) \
V(S1x16AnyTrue, kMips64S1x16AnyTrue) \
V(S1x16AllTrue, kMips64S1x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
......@@ -2725,65 +2733,69 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16ShrS) \
V(I8x16ShrU)
#define SIMD_BINOP_LIST(V) \
V(F32x4Add, kMips64F32x4Add) \
V(F32x4Sub, kMips64F32x4Sub) \
V(F32x4Mul, kMips64F32x4Mul) \
V(F32x4Max, kMips64F32x4Max) \
V(F32x4Min, kMips64F32x4Min) \
V(F32x4Eq, kMips64F32x4Eq) \
V(F32x4Ne, kMips64F32x4Ne) \
V(F32x4Lt, kMips64F32x4Lt) \
V(F32x4Le, kMips64F32x4Le) \
V(I32x4Add, kMips64I32x4Add) \
V(I32x4Sub, kMips64I32x4Sub) \
V(I32x4Mul, kMips64I32x4Mul) \
V(I32x4MaxS, kMips64I32x4MaxS) \
V(I32x4MinS, kMips64I32x4MinS) \
V(I32x4MaxU, kMips64I32x4MaxU) \
V(I32x4MinU, kMips64I32x4MinU) \
V(I32x4Eq, kMips64I32x4Eq) \
V(I32x4Ne, kMips64I32x4Ne) \
V(I32x4GtS, kMips64I32x4GtS) \
V(I32x4GeS, kMips64I32x4GeS) \
V(I32x4GtU, kMips64I32x4GtU) \
V(I32x4GeU, kMips64I32x4GeU) \
V(I16x8Add, kMips64I16x8Add) \
V(I16x8AddSaturateS, kMips64I16x8AddSaturateS) \
V(I16x8AddSaturateU, kMips64I16x8AddSaturateU) \
V(I16x8Sub, kMips64I16x8Sub) \
V(I16x8SubSaturateS, kMips64I16x8SubSaturateS) \
V(I16x8SubSaturateU, kMips64I16x8SubSaturateU) \
V(I16x8Mul, kMips64I16x8Mul) \
V(I16x8MaxS, kMips64I16x8MaxS) \
V(I16x8MinS, kMips64I16x8MinS) \
V(I16x8MaxU, kMips64I16x8MaxU) \
V(I16x8MinU, kMips64I16x8MinU) \
V(I16x8Eq, kMips64I16x8Eq) \
V(I16x8Ne, kMips64I16x8Ne) \
V(I16x8GtS, kMips64I16x8GtS) \
V(I16x8GeS, kMips64I16x8GeS) \
V(I16x8GtU, kMips64I16x8GtU) \
V(I16x8GeU, kMips64I16x8GeU) \
V(I8x16Add, kMips64I8x16Add) \
V(I8x16AddSaturateS, kMips64I8x16AddSaturateS) \
V(I8x16AddSaturateU, kMips64I8x16AddSaturateU) \
V(I8x16Sub, kMips64I8x16Sub) \
V(I8x16SubSaturateS, kMips64I8x16SubSaturateS) \
V(I8x16SubSaturateU, kMips64I8x16SubSaturateU) \
V(I8x16Mul, kMips64I8x16Mul) \
V(I8x16MaxS, kMips64I8x16MaxS) \
V(I8x16MinS, kMips64I8x16MinS) \
V(I8x16MaxU, kMips64I8x16MaxU) \
V(I8x16MinU, kMips64I8x16MinU) \
V(I8x16Eq, kMips64I8x16Eq) \
V(I8x16Ne, kMips64I8x16Ne) \
V(I8x16GtS, kMips64I8x16GtS) \
V(I8x16GeS, kMips64I8x16GeS) \
V(I8x16GtU, kMips64I8x16GtU) \
V(I8x16GeU, kMips64I8x16GeU) \
V(S128And, kMips64S128And) \
V(S128Or, kMips64S128Or) \
#define SIMD_BINOP_LIST(V) \
V(F32x4Add, kMips64F32x4Add) \
V(F32x4Sub, kMips64F32x4Sub) \
V(F32x4Mul, kMips64F32x4Mul) \
V(F32x4Max, kMips64F32x4Max) \
V(F32x4Min, kMips64F32x4Min) \
V(F32x4Eq, kMips64F32x4Eq) \
V(F32x4Ne, kMips64F32x4Ne) \
V(F32x4Lt, kMips64F32x4Lt) \
V(F32x4Le, kMips64F32x4Le) \
V(I32x4Add, kMips64I32x4Add) \
V(I32x4Sub, kMips64I32x4Sub) \
V(I32x4Mul, kMips64I32x4Mul) \
V(I32x4MaxS, kMips64I32x4MaxS) \
V(I32x4MinS, kMips64I32x4MinS) \
V(I32x4MaxU, kMips64I32x4MaxU) \
V(I32x4MinU, kMips64I32x4MinU) \
V(I32x4Eq, kMips64I32x4Eq) \
V(I32x4Ne, kMips64I32x4Ne) \
V(I32x4GtS, kMips64I32x4GtS) \
V(I32x4GeS, kMips64I32x4GeS) \
V(I32x4GtU, kMips64I32x4GtU) \
V(I32x4GeU, kMips64I32x4GeU) \
V(I16x8Add, kMips64I16x8Add) \
V(I16x8AddSaturateS, kMips64I16x8AddSaturateS) \
V(I16x8AddSaturateU, kMips64I16x8AddSaturateU) \
V(I16x8Sub, kMips64I16x8Sub) \
V(I16x8SubSaturateS, kMips64I16x8SubSaturateS) \
V(I16x8SubSaturateU, kMips64I16x8SubSaturateU) \
V(I16x8Mul, kMips64I16x8Mul) \
V(I16x8MaxS, kMips64I16x8MaxS) \
V(I16x8MinS, kMips64I16x8MinS) \
V(I16x8MaxU, kMips64I16x8MaxU) \
V(I16x8MinU, kMips64I16x8MinU) \
V(I16x8Eq, kMips64I16x8Eq) \
V(I16x8Ne, kMips64I16x8Ne) \
V(I16x8GtS, kMips64I16x8GtS) \
V(I16x8GeS, kMips64I16x8GeS) \
V(I16x8GtU, kMips64I16x8GtU) \
V(I16x8GeU, kMips64I16x8GeU) \
V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
V(I8x16Add, kMips64I8x16Add) \
V(I8x16AddSaturateS, kMips64I8x16AddSaturateS) \
V(I8x16AddSaturateU, kMips64I8x16AddSaturateU) \
V(I8x16Sub, kMips64I8x16Sub) \
V(I8x16SubSaturateS, kMips64I8x16SubSaturateS) \
V(I8x16SubSaturateU, kMips64I8x16SubSaturateU) \
V(I8x16Mul, kMips64I8x16Mul) \
V(I8x16MaxS, kMips64I8x16MaxS) \
V(I8x16MinS, kMips64I8x16MinS) \
V(I8x16MaxU, kMips64I8x16MaxU) \
V(I8x16MinU, kMips64I8x16MinU) \
V(I8x16Eq, kMips64I8x16Eq) \
V(I8x16Ne, kMips64I8x16Ne) \
V(I8x16GtS, kMips64I8x16GtS) \
V(I8x16GeS, kMips64I8x16GeS) \
V(I8x16GtU, kMips64I8x16GtU) \
V(I8x16GeU, kMips64I8x16GeU) \
V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
V(S128And, kMips64S128And) \
V(S128Or, kMips64S128Or) \
V(S128Xor, kMips64S128Xor)
void InstructionSelector::VisitS128Zero(Node* node) {
......
......@@ -902,7 +902,8 @@ WASM_SIMD_TEST(I32x4ConvertF32x4) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET ||
// V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I16x8 (unpacking).
WASM_SIMD_TEST(I32x4ConvertI16x8) {
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
......@@ -927,7 +928,8 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
CHECK_EQ(1, r.Call(*i, unpacked_signed, unpacked_unsigned));
}
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
......@@ -1075,7 +1077,8 @@ WASM_SIMD_TEST(I32x4ShrU) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
// SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I8x16 (unpacking).
WASM_SIMD_TEST(I16x8ConvertI8x16) {
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
......@@ -1099,7 +1102,8 @@ WASM_SIMD_TEST(I16x8ConvertI8x16) {
CHECK_EQ(1, r.Call(*i, unpacked_signed, unpacked_unsigned));
}
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if SIMD_LOWERING_TARGET || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
......@@ -1120,7 +1124,8 @@ WASM_SIMD_TEST(I16x8Neg) { RunI16x8UnOpTest(kExprI16x8Neg, Negate); }
// V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I32x4 (packing).
WASM_SIMD_TEST(I16x8ConvertI32x4) {
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
......@@ -1148,7 +1153,8 @@ WASM_SIMD_TEST(I16x8ConvertI32x4) {
CHECK_EQ(1, r.Call(*i, packed_signed, packed_unsigned));
}
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
......@@ -1306,7 +1312,8 @@ WASM_SIMD_TEST(I8x16Neg) { RunI8x16UnOpTest(kExprI8x16Neg, Negate); }
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64 || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
// Tests both signed and unsigned conversion from I16x8 (packing).
WASM_SIMD_TEST(I8x16ConvertI16x8) {
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
......@@ -1334,7 +1341,8 @@ WASM_SIMD_TEST(I8x16ConvertI16x8) {
CHECK_EQ(1, r.Call(*i, packed_signed, packed_unsigned));
}
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment