Commit 554a3478 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd] Remove f64x2.convert_i64x2_s and _u

These conversion instructions were removed from the proposal in
https://github.com/WebAssembly/simd/pull/178.

Change-Id: I212ca2f923362bf08e178f6d28cc2338cf6f5927
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2016006Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66015}
parent 60d5f8ce
...@@ -1832,8 +1832,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1832,8 +1832,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0); __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0);
break; break;
} }
SIMD_UNOP_CASE(kArm64F64x2SConvertI64x2, Scvtf, 2D);
SIMD_UNOP_CASE(kArm64F64x2UConvertI64x2, Ucvtf, 2D);
SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D); SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D);
SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D); SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D);
SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D); SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D);
......
...@@ -171,8 +171,6 @@ namespace compiler { ...@@ -171,8 +171,6 @@ namespace compiler {
V(Arm64F64x2Splat) \ V(Arm64F64x2Splat) \
V(Arm64F64x2ExtractLane) \ V(Arm64F64x2ExtractLane) \
V(Arm64F64x2ReplaceLane) \ V(Arm64F64x2ReplaceLane) \
V(Arm64F64x2SConvertI64x2) \
V(Arm64F64x2UConvertI64x2) \
V(Arm64F64x2Abs) \ V(Arm64F64x2Abs) \
V(Arm64F64x2Neg) \ V(Arm64F64x2Neg) \
V(Arm64F64x2Sqrt) \ V(Arm64F64x2Sqrt) \
......
...@@ -141,8 +141,6 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -141,8 +141,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Splat: case kArm64F64x2Splat:
case kArm64F64x2ExtractLane: case kArm64F64x2ExtractLane:
case kArm64F64x2ReplaceLane: case kArm64F64x2ReplaceLane:
case kArm64F64x2SConvertI64x2:
case kArm64F64x2UConvertI64x2:
case kArm64F64x2Abs: case kArm64F64x2Abs:
case kArm64F64x2Neg: case kArm64F64x2Neg:
case kArm64F64x2Sqrt: case kArm64F64x2Sqrt:
......
...@@ -3142,8 +3142,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { ...@@ -3142,8 +3142,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Abs, kArm64F64x2Abs) \ V(F64x2Abs, kArm64F64x2Abs) \
V(F64x2Neg, kArm64F64x2Neg) \ V(F64x2Neg, kArm64F64x2Neg) \
V(F64x2Sqrt, kArm64F64x2Sqrt) \ V(F64x2Sqrt, kArm64F64x2Sqrt) \
V(F64x2SConvertI64x2, kArm64F64x2SConvertI64x2) \
V(F64x2UConvertI64x2, kArm64F64x2UConvertI64x2) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
V(F32x4Abs, kArm64F32x4Abs) \ V(F32x4Abs, kArm64F32x4Abs) \
......
...@@ -1852,10 +1852,6 @@ void InstructionSelector::VisitNode(Node* node) { ...@@ -1852,10 +1852,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitF64x2ExtractLane(node); return MarkAsFloat64(node), VisitF64x2ExtractLane(node);
case IrOpcode::kF64x2ReplaceLane: case IrOpcode::kF64x2ReplaceLane:
return MarkAsSimd128(node), VisitF64x2ReplaceLane(node); return MarkAsSimd128(node), VisitF64x2ReplaceLane(node);
case IrOpcode::kF64x2SConvertI64x2:
return MarkAsSimd128(node), VisitF64x2SConvertI64x2(node);
case IrOpcode::kF64x2UConvertI64x2:
return MarkAsSimd128(node), VisitF64x2UConvertI64x2(node);
case IrOpcode::kF64x2Abs: case IrOpcode::kF64x2Abs:
return MarkAsSimd128(node), VisitF64x2Abs(node); return MarkAsSimd128(node), VisitF64x2Abs(node);
case IrOpcode::kF64x2Neg: case IrOpcode::kF64x2Neg:
...@@ -2626,12 +2622,6 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) { ...@@ -2626,12 +2622,6 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
#if !V8_TARGET_ARCH_X64 #if !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_ARM64 #if !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitF64x2SConvertI64x2(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitF64x2UConvertI64x2(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
......
...@@ -2283,40 +2283,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2283,40 +2283,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
} }
case kX64F64x2SConvertI64x2: {
XMMRegister dst = i.OutputSimd128Register();
Register tmp1 = i.TempRegister(0);
Register tmp2 = i.TempRegister(1);
DCHECK_EQ(dst, i.InputSimd128Register(0));
// Move low quardword into tmp1, high quadword into tmp2.
__ movq(tmp1, dst);
__ pextrq(tmp2, dst, 1);
// Convert tmp2, then copy from low to high quadword of dst.
__ cvtqsi2sd(dst, tmp2);
__ movlhps(dst, dst);
// Finally convert tmp1.
__ cvtqsi2sd(dst, tmp1);
break;
}
case kX64F64x2UConvertI64x2: {
XMMRegister dst = i.OutputSimd128Register();
Register tmp = i.TempRegister(0);
XMMRegister tmp_xmm = i.TempSimd128Register(1);
DCHECK_EQ(dst, i.InputSimd128Register(0));
// Extract high quardword.
__ Pextrq(tmp, dst, static_cast<int8_t>(1));
// We cannot convert directly into dst, as the next call to Cvtqui2sd will
// zero it out, so be careful to make sure dst is unique to tmp_xmm.
__ Cvtqui2sd(tmp_xmm, tmp);
// Extract low quadword and convert.
__ Movq(tmp, dst);
__ Cvtqui2sd(dst, tmp);
// Move converted high quadword to top of dst.
__ Movlhps(dst, tmp_xmm);
break;
}
case kX64F64x2ExtractLane: { case kX64F64x2ExtractLane: {
CpuFeatureScope sse_scope(tasm(), SSE4_1); CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1)); __ Pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
......
...@@ -155,8 +155,6 @@ namespace compiler { ...@@ -155,8 +155,6 @@ namespace compiler {
V(X64F64x2Splat) \ V(X64F64x2Splat) \
V(X64F64x2ExtractLane) \ V(X64F64x2ExtractLane) \
V(X64F64x2ReplaceLane) \ V(X64F64x2ReplaceLane) \
V(X64F64x2SConvertI64x2) \
V(X64F64x2UConvertI64x2) \
V(X64F64x2Abs) \ V(X64F64x2Abs) \
V(X64F64x2Neg) \ V(X64F64x2Neg) \
V(X64F64x2Sqrt) \ V(X64F64x2Sqrt) \
......
...@@ -127,8 +127,6 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -127,8 +127,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2Splat: case kX64F64x2Splat:
case kX64F64x2ExtractLane: case kX64F64x2ExtractLane:
case kX64F64x2ReplaceLane: case kX64F64x2ReplaceLane:
case kX64F64x2SConvertI64x2:
case kX64F64x2UConvertI64x2:
case kX64F64x2Abs: case kX64F64x2Abs:
case kX64F64x2Neg: case kX64F64x2Neg:
case kX64F64x2Sqrt: case kX64F64x2Sqrt:
......
...@@ -2913,21 +2913,6 @@ void InstructionSelector::VisitF64x2Neg(Node* node) { ...@@ -2913,21 +2913,6 @@ void InstructionSelector::VisitF64x2Neg(Node* node) {
arraysize(temps), temps); arraysize(temps), temps);
} }
void InstructionSelector::VisitF64x2SConvertI64x2(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
Emit(kX64F64x2SConvertI64x2, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
void InstructionSelector::VisitF64x2UConvertI64x2(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
// Need dst to be unique to temp because Cvtqui2sd will zero temp.
Emit(kX64F64x2UConvertI64x2, g.DefineSameAsFirst(node),
g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
}
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node), Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node),
......
...@@ -309,8 +309,6 @@ MachineType AtomicOpType(Operator const* op) { ...@@ -309,8 +309,6 @@ MachineType AtomicOpType(Operator const* op) {
V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \ V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \ V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \ V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(F64x2SConvertI64x2, Operator::kNoProperties, 1, 0, 1) \
V(F64x2UConvertI64x2, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \ V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \ V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Sqrt, Operator::kNoProperties, 1, 0, 1) \ V(F64x2Sqrt, Operator::kNoProperties, 1, 0, 1) \
......
...@@ -536,8 +536,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final ...@@ -536,8 +536,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// SIMD operators. // SIMD operators.
const Operator* F64x2Splat(); const Operator* F64x2Splat();
const Operator* F64x2SConvertI64x2();
const Operator* F64x2UConvertI64x2();
const Operator* F64x2Abs(); const Operator* F64x2Abs();
const Operator* F64x2Neg(); const Operator* F64x2Neg();
const Operator* F64x2Sqrt(); const Operator* F64x2Sqrt();
......
...@@ -742,8 +742,6 @@ ...@@ -742,8 +742,6 @@
#define MACHINE_SIMD_OP_LIST(V) \ #define MACHINE_SIMD_OP_LIST(V) \
V(F64x2Splat) \ V(F64x2Splat) \
V(F64x2SConvertI64x2) \
V(F64x2UConvertI64x2) \
V(F64x2ExtractLane) \ V(F64x2ExtractLane) \
V(F64x2ReplaceLane) \ V(F64x2ReplaceLane) \
V(F64x2Abs) \ V(F64x2Abs) \
......
...@@ -4068,12 +4068,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { ...@@ -4068,12 +4068,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
switch (opcode) { switch (opcode) {
case wasm::kExprF64x2Splat: case wasm::kExprF64x2Splat:
return graph()->NewNode(mcgraph()->machine()->F64x2Splat(), inputs[0]); return graph()->NewNode(mcgraph()->machine()->F64x2Splat(), inputs[0]);
case wasm::kExprF64x2SConvertI64x2:
return graph()->NewNode(mcgraph()->machine()->F64x2SConvertI64x2(),
inputs[0]);
case wasm::kExprF64x2UConvertI64x2:
return graph()->NewNode(mcgraph()->machine()->F64x2UConvertI64x2(),
inputs[0]);
case wasm::kExprF64x2Abs: case wasm::kExprF64x2Abs:
return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]); return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]);
case wasm::kExprF64x2Neg: case wasm::kExprF64x2Neg:
......
...@@ -2534,10 +2534,6 @@ class ThreadImpl { ...@@ -2534,10 +2534,6 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \ Push(WasmValue(Simd128(res))); \
return true; \ return true; \
} }
CONVERT_CASE(F64x2SConvertI64x2, int2, i64x2, float2, 2, 0, int64_t,
static_cast<double>(a))
CONVERT_CASE(F64x2UConvertI64x2, int2, i64x2, float2, 2, 0, uint64_t,
static_cast<double>(a))
CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t, CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
static_cast<float>(a)) static_cast<float>(a))
CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t, CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t,
......
...@@ -261,7 +261,6 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) { ...@@ -261,7 +261,6 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_F32x4_OP(Le, "le") CASE_F32x4_OP(Le, "le")
CASE_F32x4_OP(Gt, "gt") CASE_F32x4_OP(Gt, "gt")
CASE_F32x4_OP(Ge, "ge") CASE_F32x4_OP(Ge, "ge")
CASE_CONVERT_OP(Convert, F64x2, I64x2, "i64", "convert")
CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert") CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert") CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i32", "convert") CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i32", "convert")
......
...@@ -420,8 +420,6 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&); ...@@ -420,8 +420,6 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F32x4SConvertI32x4, 0xfdaf, s_s) \ V(F32x4SConvertI32x4, 0xfdaf, s_s) \
V(F32x4UConvertI32x4, 0xfdb0, s_s) \ V(F32x4UConvertI32x4, 0xfdb0, s_s) \
V(S8x16Swizzle, 0xfdc0, s_ss) \ V(S8x16Swizzle, 0xfdc0, s_ss) \
V(F64x2SConvertI64x2, 0xfdb1, s_s) \
V(F64x2UConvertI64x2, 0xfdb2, s_s) \
V(S8x16LoadSplat, 0xfdc2, s_i) \ V(S8x16LoadSplat, 0xfdc2, s_i) \
V(S16x8LoadSplat, 0xfdc3, s_i) \ V(S16x8LoadSplat, 0xfdc3, s_i) \
V(S32x4LoadSplat, 0xfdc4, s_i) \ V(S32x4LoadSplat, 0xfdc4, s_i) \
......
...@@ -1548,32 +1548,6 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Qfms) { ...@@ -1548,32 +1548,6 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Qfms) {
} }
} }
} }
WASM_SIMD_TEST_NO_LOWERING(F64x2ConvertI64x2) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
// Create two output vectors to hold signed and unsigned results.
double* g0 = r.builder().AddGlobal<double>(kWasmS128);
double* g1 = r.builder().AddGlobal<double>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(
0, WASM_SIMD_UNOP(kExprF64x2SConvertI64x2, WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(
1, WASM_SIMD_UNOP(kExprF64x2UConvertI64x2, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT64_INPUTS(x) {
r.Call(x);
double expected_signed = static_cast<double>(x);
double expected_unsigned = static_cast<double>(static_cast<uint64_t>(x));
for (int i = 0; i < 2; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<double>(&g0[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<double>(&g1[i]));
}
}
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST(I32x4Splat) { WASM_SIMD_TEST(I32x4Splat) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment