Commit ea06b01e authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd] Implement i64x2 add sub for arm

Also some cleanup reordering of instruction codes.

Bug: v8:9813
Change-Id: I35caad0b84dd5824090046cba964454eac45d5d8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1925613
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65088}
parent aafbc138
...@@ -1923,6 +1923,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1923,6 +1923,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ReplaceLane(dst, dst, i.InputRegister(3), NeonS32, lane * 2 + 1); __ ReplaceLane(dst, dst, i.InputRegister(3), NeonS32, lane * 2 + 1);
break; break;
} }
case kArmI64x2Add: {
__ vadd(Neon64, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmI64x2Sub: {
__ vsub(Neon64, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmI64x2Neg: { case kArmI64x2Neg: {
Simd128Register dst = i.OutputSimd128Register(); Simd128Register dst = i.OutputSimd128Register();
__ vmov(dst, static_cast<uint64_t>(0)); __ vmov(dst, static_cast<uint64_t>(0));
......
...@@ -142,12 +142,6 @@ namespace compiler { ...@@ -142,12 +142,6 @@ namespace compiler {
V(ArmF64x2Ne) \ V(ArmF64x2Ne) \
V(ArmF64x2Lt) \ V(ArmF64x2Lt) \
V(ArmF64x2Le) \ V(ArmF64x2Le) \
V(ArmI64x2SplatI32Pair) \
V(ArmI64x2ReplaceLaneI32Pair) \
V(ArmI64x2Neg) \
V(ArmI64x2Shl) \
V(ArmI64x2ShrS) \
V(ArmI64x2ShrU) \
V(ArmF32x4Splat) \ V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \ V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \ V(ArmF32x4ReplaceLane) \
...@@ -169,6 +163,14 @@ namespace compiler { ...@@ -169,6 +163,14 @@ namespace compiler {
V(ArmF32x4Ne) \ V(ArmF32x4Ne) \
V(ArmF32x4Lt) \ V(ArmF32x4Lt) \
V(ArmF32x4Le) \ V(ArmF32x4Le) \
V(ArmI64x2SplatI32Pair) \
V(ArmI64x2ReplaceLaneI32Pair) \
V(ArmI64x2Neg) \
V(ArmI64x2Shl) \
V(ArmI64x2ShrS) \
V(ArmI64x2Add) \
V(ArmI64x2Sub) \
V(ArmI64x2ShrU) \
V(ArmI32x4Splat) \ V(ArmI32x4Splat) \
V(ArmI32x4ExtractLane) \ V(ArmI32x4ExtractLane) \
V(ArmI32x4ReplaceLane) \ V(ArmI32x4ReplaceLane) \
......
...@@ -122,12 +122,6 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -122,12 +122,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF64x2Ne: case kArmF64x2Ne:
case kArmF64x2Lt: case kArmF64x2Lt:
case kArmF64x2Le: case kArmF64x2Le:
case kArmI64x2SplatI32Pair:
case kArmI64x2ReplaceLaneI32Pair:
case kArmI64x2Neg:
case kArmI64x2Shl:
case kArmI64x2ShrS:
case kArmI64x2ShrU:
case kArmF32x4Splat: case kArmF32x4Splat:
case kArmF32x4ExtractLane: case kArmF32x4ExtractLane:
case kArmF32x4ReplaceLane: case kArmF32x4ReplaceLane:
...@@ -149,6 +143,14 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -149,6 +143,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4Ne: case kArmF32x4Ne:
case kArmF32x4Lt: case kArmF32x4Lt:
case kArmF32x4Le: case kArmF32x4Le:
case kArmI64x2SplatI32Pair:
case kArmI64x2ReplaceLaneI32Pair:
case kArmI64x2Neg:
case kArmI64x2Shl:
case kArmI64x2ShrS:
case kArmI64x2Add:
case kArmI64x2Sub:
case kArmI64x2ShrU:
case kArmI32x4Splat: case kArmI32x4Splat:
case kArmI32x4ExtractLane: case kArmI32x4ExtractLane:
case kArmI32x4ReplaceLane: case kArmI32x4ReplaceLane:
......
...@@ -2463,6 +2463,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { ...@@ -2463,6 +2463,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(F32x4Ne, kArmF32x4Ne) \ V(F32x4Ne, kArmF32x4Ne) \
V(F32x4Lt, kArmF32x4Lt) \ V(F32x4Lt, kArmF32x4Lt) \
V(F32x4Le, kArmF32x4Le) \ V(F32x4Le, kArmF32x4Le) \
V(I64x2Add, kArmI64x2Add) \
V(I64x2Sub, kArmI64x2Sub) \
V(I32x4Add, kArmI32x4Add) \ V(I32x4Add, kArmI32x4Add) \
V(I32x4AddHoriz, kArmI32x4AddHoriz) \ V(I32x4AddHoriz, kArmI32x4AddHoriz) \
V(I32x4Sub, kArmI32x4Sub) \ V(I32x4Sub, kArmI32x4Sub) \
......
...@@ -2625,8 +2625,6 @@ void InstructionSelector::VisitLoadTransform(Node* node) { UNIMPLEMENTED(); } ...@@ -2625,8 +2625,6 @@ void InstructionSelector::VisitLoadTransform(Node* node) { UNIMPLEMENTED(); }
#if !V8_TARGET_ARCH_IA32 #if !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_IA32 #endif // !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
......
...@@ -4395,8 +4395,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) { ...@@ -4395,8 +4395,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
case Neon32: case Neon32:
Add<uint32_t, kSimd128Size>(this, Vd, Vm, Vn); Add<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
break; break;
default: case Neon64:
UNREACHABLE(); Add<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
break; break;
} }
} else { } else {
...@@ -4828,8 +4828,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) { ...@@ -4828,8 +4828,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
case Neon32: case Neon32:
Sub<uint32_t, kSimd128Size>(this, Vd, Vm, Vn); Sub<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
break; break;
default: case Neon64:
UNREACHABLE(); Sub<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
break; break;
} }
} else { } else {
......
...@@ -1024,7 +1024,6 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) { ...@@ -1024,7 +1024,6 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) {
LogicalShiftRight); LogicalShiftRight);
} }
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd, void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int64BinOp expected_op) { WasmOpcode opcode, Int64BinOp expected_op) {
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd); WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
...@@ -1104,7 +1103,6 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) { ...@@ -1104,7 +1103,6 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) {
UnsignedGreaterEqual); UnsignedGreaterEqual);
} }
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST_NO_LOWERING(F64x2Splat) { WASM_SIMD_TEST_NO_LOWERING(F64x2Splat) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd); WasmRunner<int32_t, double> r(execution_tier, lower_simd);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment