Commit a3752460 authored by LiuYu's avatar LiuYu Committed by Commit Bot

[mips][wasm-simd] Implement double precision conversion

Port: 3b6eb335

Bug: v8:11265

Change-Id: I6ecd95e64b18a8f45f0aaa2f40d15f8c8cd43340
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2704212
Auto-Submit: Liu yu <liuyu@loongson.cn>
Reviewed-by: 's avatarZhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/master@{#72842}
parent f3ec6d77
...@@ -2202,6 +2202,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2202,6 +2202,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ctcmsa(MSACSR, kScratchReg); __ ctcmsa(MSACSR, kScratchReg);
break; break;
} }
case kMipsF64x2ConvertLowI32x4S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
__ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
__ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
__ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero);
break;
}
case kMipsF64x2ConvertLowI32x4U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
__ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
break;
}
case kMipsF64x2PromoteLowF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsI64x2Add: { case kMipsI64x2Add: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), __ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
...@@ -2365,6 +2386,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2365,6 +2386,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break; break;
} }
case kMipsF32x4DemoteF64x2Zero: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI32x4Mul: { case kMipsI32x4Mul: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
...@@ -2649,6 +2677,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2649,6 +2677,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1)); i.InputSimd128Register(1));
break; break;
} }
case kMipsI32x4TruncSatF64x2SZero: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
__ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
__ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
kSimd128ScratchReg);
break;
}
case kMipsI32x4TruncSatF64x2UZero: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
__ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
__ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
kSimd128ScratchReg);
break;
}
case kMipsI16x8Splat: { case kMipsI16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0)); __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
......
...@@ -160,6 +160,9 @@ namespace compiler { ...@@ -160,6 +160,9 @@ namespace compiler {
V(MipsF64x2Floor) \ V(MipsF64x2Floor) \
V(MipsF64x2Trunc) \ V(MipsF64x2Trunc) \
V(MipsF64x2NearestInt) \ V(MipsF64x2NearestInt) \
V(MipsF64x2ConvertLowI32x4S) \
V(MipsF64x2ConvertLowI32x4U) \
V(MipsF64x2PromoteLowF32x4) \
V(MipsI64x2Add) \ V(MipsI64x2Add) \
V(MipsI64x2Sub) \ V(MipsI64x2Sub) \
V(MipsI64x2Mul) \ V(MipsI64x2Mul) \
...@@ -182,6 +185,7 @@ namespace compiler { ...@@ -182,6 +185,7 @@ namespace compiler {
V(MipsF32x4ReplaceLane) \ V(MipsF32x4ReplaceLane) \
V(MipsF32x4SConvertI32x4) \ V(MipsF32x4SConvertI32x4) \
V(MipsF32x4UConvertI32x4) \ V(MipsF32x4UConvertI32x4) \
V(MipsF32x4DemoteF64x2Zero) \
V(MipsI32x4Mul) \ V(MipsI32x4Mul) \
V(MipsI32x4MaxS) \ V(MipsI32x4MaxS) \
V(MipsI32x4MinS) \ V(MipsI32x4MinS) \
...@@ -231,6 +235,8 @@ namespace compiler { ...@@ -231,6 +235,8 @@ namespace compiler {
V(MipsI32x4ExtMulHighI16x8S) \ V(MipsI32x4ExtMulHighI16x8S) \
V(MipsI32x4ExtMulLowI16x8U) \ V(MipsI32x4ExtMulLowI16x8U) \
V(MipsI32x4ExtMulHighI16x8U) \ V(MipsI32x4ExtMulHighI16x8U) \
V(MipsI32x4TruncSatF64x2SZero) \
V(MipsI32x4TruncSatF64x2UZero) \
V(MipsI16x8Splat) \ V(MipsI16x8Splat) \
V(MipsI16x8ExtractLaneU) \ V(MipsI16x8ExtractLaneU) \
V(MipsI16x8ExtractLaneS) \ V(MipsI16x8ExtractLaneS) \
......
...@@ -63,6 +63,9 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -63,6 +63,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF64x2Floor: case kMipsF64x2Floor:
case kMipsF64x2Trunc: case kMipsF64x2Trunc:
case kMipsF64x2NearestInt: case kMipsF64x2NearestInt:
case kMipsF64x2ConvertLowI32x4S:
case kMipsF64x2ConvertLowI32x4U:
case kMipsF64x2PromoteLowF32x4:
case kMipsI64x2Add: case kMipsI64x2Add:
case kMipsI64x2Sub: case kMipsI64x2Sub:
case kMipsI64x2Mul: case kMipsI64x2Mul:
...@@ -107,6 +110,7 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -107,6 +110,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF32x4Floor: case kMipsF32x4Floor:
case kMipsF32x4Trunc: case kMipsF32x4Trunc:
case kMipsF32x4NearestInt: case kMipsF32x4NearestInt:
case kMipsF32x4DemoteF64x2Zero:
case kMipsFloat32Max: case kMipsFloat32Max:
case kMipsFloat32Min: case kMipsFloat32Min:
case kMipsFloat32RoundDown: case kMipsFloat32RoundDown:
...@@ -200,6 +204,8 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -200,6 +204,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI32x4ExtMulHighI16x8S: case kMipsI32x4ExtMulHighI16x8S:
case kMipsI32x4ExtMulLowI16x8U: case kMipsI32x4ExtMulLowI16x8U:
case kMipsI32x4ExtMulHighI16x8U: case kMipsI32x4ExtMulHighI16x8U:
case kMipsI32x4TruncSatF64x2SZero:
case kMipsI32x4TruncSatF64x2UZero:
case kMipsI8x16Add: case kMipsI8x16Add:
case kMipsI8x16AddSatS: case kMipsI8x16AddSatS:
case kMipsI8x16AddSatU: case kMipsI8x16AddSatU:
......
...@@ -2115,48 +2115,54 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { ...@@ -2115,48 +2115,54 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \ V(I16x8) \
V(I8x16) V(I8x16)
#define SIMD_UNOP_LIST(V) \ #define SIMD_UNOP_LIST(V) \
V(F64x2Abs, kMipsF64x2Abs) \ V(F64x2Abs, kMipsF64x2Abs) \
V(F64x2Neg, kMipsF64x2Neg) \ V(F64x2Neg, kMipsF64x2Neg) \
V(F64x2Sqrt, kMipsF64x2Sqrt) \ V(F64x2Sqrt, kMipsF64x2Sqrt) \
V(F64x2Ceil, kMipsF64x2Ceil) \ V(F64x2Ceil, kMipsF64x2Ceil) \
V(F64x2Floor, kMipsF64x2Floor) \ V(F64x2Floor, kMipsF64x2Floor) \
V(F64x2Trunc, kMipsF64x2Trunc) \ V(F64x2Trunc, kMipsF64x2Trunc) \
V(F64x2NearestInt, kMipsF64x2NearestInt) \ V(F64x2NearestInt, kMipsF64x2NearestInt) \
V(I64x2Neg, kMipsI64x2Neg) \ V(F64x2ConvertLowI32x4S, kMipsF64x2ConvertLowI32x4S) \
V(I64x2BitMask, kMipsI64x2BitMask) \ V(F64x2ConvertLowI32x4U, kMipsF64x2ConvertLowI32x4U) \
V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \ V(F64x2PromoteLowF32x4, kMipsF64x2PromoteLowF32x4) \
V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \ V(I64x2Neg, kMipsI64x2Neg) \
V(F32x4Abs, kMipsF32x4Abs) \ V(I64x2BitMask, kMipsI64x2BitMask) \
V(F32x4Neg, kMipsF32x4Neg) \ V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
V(F32x4Sqrt, kMipsF32x4Sqrt) \ V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
V(F32x4RecipApprox, kMipsF32x4RecipApprox) \ V(F32x4Abs, kMipsF32x4Abs) \
V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \ V(F32x4Neg, kMipsF32x4Neg) \
V(F32x4Ceil, kMipsF32x4Ceil) \ V(F32x4Sqrt, kMipsF32x4Sqrt) \
V(F32x4Floor, kMipsF32x4Floor) \ V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
V(F32x4Trunc, kMipsF32x4Trunc) \ V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
V(F32x4NearestInt, kMipsF32x4NearestInt) \ V(F32x4Ceil, kMipsF32x4Ceil) \
V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \ V(F32x4Floor, kMipsF32x4Floor) \
V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \ V(F32x4Trunc, kMipsF32x4Trunc) \
V(I32x4Neg, kMipsI32x4Neg) \ V(F32x4NearestInt, kMipsF32x4NearestInt) \
V(I32x4BitMask, kMipsI32x4BitMask) \ V(F32x4DemoteF64x2Zero, kMipsF32x4DemoteF64x2Zero) \
V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \ V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \ V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \ V(I32x4Neg, kMipsI32x4Neg) \
V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \ V(I32x4BitMask, kMipsI32x4BitMask) \
V(I16x8Neg, kMipsI16x8Neg) \ V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \
V(I16x8BitMask, kMipsI16x8BitMask) \ V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \ V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \
V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \ V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \ V(I32x4TruncSatF64x2SZero, kMipsI32x4TruncSatF64x2SZero) \
V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \ V(I32x4TruncSatF64x2UZero, kMipsI32x4TruncSatF64x2UZero) \
V(I8x16Neg, kMipsI8x16Neg) \ V(I16x8Neg, kMipsI16x8Neg) \
V(I8x16Popcnt, kMipsI8x16Popcnt) \ V(I16x8BitMask, kMipsI16x8BitMask) \
V(I8x16BitMask, kMipsI8x16BitMask) \ V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \
V(S128Not, kMipsS128Not) \ V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
V(V32x4AllTrue, kMipsV32x4AllTrue) \ V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \
V(V16x8AllTrue, kMipsV16x8AllTrue) \ V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
V(V8x16AllTrue, kMipsV8x16AllTrue) \ V(I8x16Neg, kMipsI8x16Neg) \
V(I8x16Popcnt, kMipsI8x16Popcnt) \
V(I8x16BitMask, kMipsI8x16BitMask) \
V(S128Not, kMipsS128Not) \
V(V32x4AllTrue, kMipsV32x4AllTrue) \
V(V16x8AllTrue, kMipsV16x8AllTrue) \
V(V8x16AllTrue, kMipsV8x16AllTrue) \
V(V128AnyTrue, kMipsV128AnyTrue) V(V128AnyTrue, kMipsV128AnyTrue)
#define SIMD_SHIFT_OP_LIST(V) \ #define SIMD_SHIFT_OP_LIST(V) \
......
...@@ -2316,6 +2316,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2316,6 +2316,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kRoundToNearest); kRoundToNearest);
break; break;
} }
case kMips64F64x2ConvertLowI32x4S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
__ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
__ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
__ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero);
break;
}
case kMips64F64x2ConvertLowI32x4U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
__ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
break;
}
case kMips64F64x2PromoteLowF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I64x2ReplaceLane: { case kMips64I64x2ReplaceLane: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
...@@ -2737,6 +2758,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2737,6 +2758,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kRoundToNearest); kRoundToNearest);
break; break;
} }
case kMips64F32x4DemoteF64x2Zero: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I32x4SConvertF32x4: { case kMips64I32x4SConvertF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
...@@ -2811,6 +2839,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2811,6 +2839,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1)); i.InputSimd128Register(1));
break; break;
} }
case kMips64I32x4TruncSatF64x2SZero: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
__ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
__ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
kSimd128ScratchReg);
break;
}
case kMips64I32x4TruncSatF64x2UZero: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
__ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
__ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
kSimd128ScratchReg);
break;
}
case kMips64I16x8Splat: { case kMips64I16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0)); __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
......
...@@ -210,6 +210,9 @@ namespace compiler { ...@@ -210,6 +210,9 @@ namespace compiler {
V(Mips64F64x2Floor) \ V(Mips64F64x2Floor) \
V(Mips64F64x2Trunc) \ V(Mips64F64x2Trunc) \
V(Mips64F64x2NearestInt) \ V(Mips64F64x2NearestInt) \
V(Mips64F64x2ConvertLowI32x4S) \
V(Mips64F64x2ConvertLowI32x4U) \
V(Mips64F64x2PromoteLowF32x4) \
V(Mips64I64x2Splat) \ V(Mips64I64x2Splat) \
V(Mips64I64x2ExtractLane) \ V(Mips64I64x2ExtractLane) \
V(Mips64I64x2ReplaceLane) \ V(Mips64I64x2ReplaceLane) \
...@@ -250,6 +253,7 @@ namespace compiler { ...@@ -250,6 +253,7 @@ namespace compiler {
V(Mips64F32x4Floor) \ V(Mips64F32x4Floor) \
V(Mips64F32x4Trunc) \ V(Mips64F32x4Trunc) \
V(Mips64F32x4NearestInt) \ V(Mips64F32x4NearestInt) \
V(Mips64F32x4DemoteF64x2Zero) \
V(Mips64I32x4SConvertF32x4) \ V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \ V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \ V(Mips64I32x4Neg) \
...@@ -260,6 +264,8 @@ namespace compiler { ...@@ -260,6 +264,8 @@ namespace compiler {
V(Mips64I32x4Abs) \ V(Mips64I32x4Abs) \
V(Mips64I32x4BitMask) \ V(Mips64I32x4BitMask) \
V(Mips64I32x4DotI16x8S) \ V(Mips64I32x4DotI16x8S) \
V(Mips64I32x4TruncSatF64x2SZero) \
V(Mips64I32x4TruncSatF64x2UZero) \
V(Mips64I16x8Splat) \ V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \ V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \ V(Mips64I16x8ExtractLaneS) \
......
...@@ -88,6 +88,9 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -88,6 +88,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F64x2Floor: case kMips64F64x2Floor:
case kMips64F64x2Trunc: case kMips64F64x2Trunc:
case kMips64F64x2NearestInt: case kMips64F64x2NearestInt:
case kMips64F64x2ConvertLowI32x4S:
case kMips64F64x2ConvertLowI32x4U:
case kMips64F64x2PromoteLowF32x4:
case kMips64I64x2Splat: case kMips64I64x2Splat:
case kMips64I64x2ExtractLane: case kMips64I64x2ExtractLane:
case kMips64I64x2ReplaceLane: case kMips64I64x2ReplaceLane:
...@@ -133,6 +136,7 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -133,6 +136,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F32x4Floor: case kMips64F32x4Floor:
case kMips64F32x4Trunc: case kMips64F32x4Trunc:
case kMips64F32x4NearestInt: case kMips64F32x4NearestInt:
case kMips64F32x4DemoteF64x2Zero:
case kMips64F64x2Splat: case kMips64F64x2Splat:
case kMips64F64x2ExtractLane: case kMips64F64x2ExtractLane:
case kMips64F64x2ReplaceLane: case kMips64F64x2ReplaceLane:
...@@ -223,6 +227,8 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -223,6 +227,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I32x4Abs: case kMips64I32x4Abs:
case kMips64I32x4BitMask: case kMips64I32x4BitMask:
case kMips64I32x4DotI16x8S: case kMips64I32x4DotI16x8S:
case kMips64I32x4TruncSatF64x2SZero:
case kMips64I32x4TruncSatF64x2UZero:
case kMips64I8x16Add: case kMips64I8x16Add:
case kMips64I8x16AddSatS: case kMips64I8x16AddSatS:
case kMips64I8x16AddSatU: case kMips64I8x16AddSatU:
......
...@@ -2846,55 +2846,61 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { ...@@ -2846,55 +2846,61 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \ V(I16x8) \
V(I8x16) V(I8x16)
#define SIMD_UNOP_LIST(V) \ #define SIMD_UNOP_LIST(V) \
V(F64x2Abs, kMips64F64x2Abs) \ V(F64x2Abs, kMips64F64x2Abs) \
V(F64x2Neg, kMips64F64x2Neg) \ V(F64x2Neg, kMips64F64x2Neg) \
V(F64x2Sqrt, kMips64F64x2Sqrt) \ V(F64x2Sqrt, kMips64F64x2Sqrt) \
V(F64x2Ceil, kMips64F64x2Ceil) \ V(F64x2Ceil, kMips64F64x2Ceil) \
V(F64x2Floor, kMips64F64x2Floor) \ V(F64x2Floor, kMips64F64x2Floor) \
V(F64x2Trunc, kMips64F64x2Trunc) \ V(F64x2Trunc, kMips64F64x2Trunc) \
V(F64x2NearestInt, kMips64F64x2NearestInt) \ V(F64x2NearestInt, kMips64F64x2NearestInt) \
V(I64x2Neg, kMips64I64x2Neg) \ V(I64x2Neg, kMips64I64x2Neg) \
V(I64x2BitMask, kMips64I64x2BitMask) \ V(I64x2BitMask, kMips64I64x2BitMask) \
V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \ V(F64x2ConvertLowI32x4S, kMips64F64x2ConvertLowI32x4S) \
V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \ V(F64x2ConvertLowI32x4U, kMips64F64x2ConvertLowI32x4U) \
V(F32x4Abs, kMips64F32x4Abs) \ V(F64x2PromoteLowF32x4, kMips64F64x2PromoteLowF32x4) \
V(F32x4Neg, kMips64F32x4Neg) \ V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
V(F32x4Sqrt, kMips64F32x4Sqrt) \ V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
V(F32x4RecipApprox, kMips64F32x4RecipApprox) \ V(F32x4Abs, kMips64F32x4Abs) \
V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \ V(F32x4Neg, kMips64F32x4Neg) \
V(F32x4Ceil, kMips64F32x4Ceil) \ V(F32x4Sqrt, kMips64F32x4Sqrt) \
V(F32x4Floor, kMips64F32x4Floor) \ V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
V(F32x4Trunc, kMips64F32x4Trunc) \ V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
V(F32x4NearestInt, kMips64F32x4NearestInt) \ V(F32x4Ceil, kMips64F32x4Ceil) \
V(I64x2SConvertI32x4Low, kMips64I64x2SConvertI32x4Low) \ V(F32x4Floor, kMips64F32x4Floor) \
V(I64x2SConvertI32x4High, kMips64I64x2SConvertI32x4High) \ V(F32x4Trunc, kMips64F32x4Trunc) \
V(I64x2UConvertI32x4Low, kMips64I64x2UConvertI32x4Low) \ V(F32x4NearestInt, kMips64F32x4NearestInt) \
V(I64x2UConvertI32x4High, kMips64I64x2UConvertI32x4High) \ V(F32x4DemoteF64x2Zero, kMips64F32x4DemoteF64x2Zero) \
V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \ V(I64x2SConvertI32x4Low, kMips64I64x2SConvertI32x4Low) \
V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \ V(I64x2SConvertI32x4High, kMips64I64x2SConvertI32x4High) \
V(I32x4Neg, kMips64I32x4Neg) \ V(I64x2UConvertI32x4Low, kMips64I64x2UConvertI32x4Low) \
V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \ V(I64x2UConvertI32x4High, kMips64I64x2UConvertI32x4High) \
V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \ V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \ V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \ V(I32x4Neg, kMips64I32x4Neg) \
V(I32x4Abs, kMips64I32x4Abs) \ V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
V(I32x4BitMask, kMips64I32x4BitMask) \ V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
V(I16x8Neg, kMips64I16x8Neg) \ V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \ V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \ V(I32x4Abs, kMips64I32x4Abs) \
V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \ V(I32x4BitMask, kMips64I32x4BitMask) \
V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \ V(I32x4TruncSatF64x2SZero, kMips64I32x4TruncSatF64x2SZero) \
V(I16x8Abs, kMips64I16x8Abs) \ V(I32x4TruncSatF64x2UZero, kMips64I32x4TruncSatF64x2UZero) \
V(I16x8BitMask, kMips64I16x8BitMask) \ V(I16x8Neg, kMips64I16x8Neg) \
V(I8x16Neg, kMips64I8x16Neg) \ V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
V(I8x16Abs, kMips64I8x16Abs) \ V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
V(I8x16Popcnt, kMips64I8x16Popcnt) \ V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
V(I8x16BitMask, kMips64I8x16BitMask) \ V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
V(S128Not, kMips64S128Not) \ V(I16x8Abs, kMips64I16x8Abs) \
V(V32x4AllTrue, kMips64V32x4AllTrue) \ V(I16x8BitMask, kMips64I16x8BitMask) \
V(V16x8AllTrue, kMips64V16x8AllTrue) \ V(I8x16Neg, kMips64I8x16Neg) \
V(V8x16AllTrue, kMips64V8x16AllTrue) \ V(I8x16Abs, kMips64I8x16Abs) \
V(I8x16Popcnt, kMips64I8x16Popcnt) \
V(I8x16BitMask, kMips64I8x16BitMask) \
V(S128Not, kMips64S128Not) \
V(V32x4AllTrue, kMips64V32x4AllTrue) \
V(V16x8AllTrue, kMips64V16x8AllTrue) \
V(V8x16AllTrue, kMips64V8x16AllTrue) \
V(V128AnyTrue, kMips64V128AnyTrue) V(V128AnyTrue, kMips64V128AnyTrue)
#define SIMD_SHIFT_OP_LIST(V) \ #define SIMD_SHIFT_OP_LIST(V) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment