Commit c8c03c15 authored by gdeepti's avatar gdeepti Committed by Commit bot

[wasm] Implement wasm x64 I16x8 Ops

 - Add I16x8 Splat, ExtractLane, ReplaceLane, shift ops, Some BinOps and compare ops
 - Add pshufhw, pshuflw in the assembler, disassembler
 - Fix incorrect modrm for pextrw, this bug disregards the register allocated and always makes pextrw use rax.
 - Fix pextrw disasm to take the 0 - 7 bits of the immediate instead of 0 - 3.
 - Pextrw, pinsrw are in the assembler use 128 bit encodings, pextrw, pinsrw in the disassembler use legacy encodings, fix inconsistencies causing weird code gen when --print-code is used.

Review-Url: https://codereview.chromium.org/2767983002
Cr-Commit-Position: refs/heads/master@{#44664}
parent 5b9b7765
......@@ -2198,31 +2198,19 @@ void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
void InstructionSelector::VisitI32x4LtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4LeU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
......@@ -2245,35 +2233,51 @@ void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8LtS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8LeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8LtS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8LeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8LtU(Node* node) { UNIMPLEMENTED(); }
......@@ -2370,9 +2374,11 @@ void InstructionSelector::VisitS32x4Select(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
// !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS16x8Select(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS8x16Select(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4And(Node* node) { UNIMPLEMENTED(); }
......
......@@ -2235,7 +2235,103 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xorps(dst, dst);
break;
}
case kX64S32x4Select: {
case kX64I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0));
__ pshuflw(dst, dst, 0x0);
__ pshufhw(dst, dst, 0x0);
__ pshufd(dst, dst, 0x0);
break;
}
case kX64I16x8ExtractLane: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
Register dst = i.OutputRegister();
__ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsxwl(dst, dst);
break;
}
case kX64I16x8ReplaceLane: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
if (instr->InputAt(2)->IsRegister()) {
__ pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
} else {
__ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
}
break;
}
case kX64I16x8Shl: {
__ psllw(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kX64I16x8ShrS: {
__ psraw(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kX64I16x8Add: {
__ paddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8AddSaturateS: {
__ paddsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8Sub: {
__ psubw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8SubSaturateS: {
__ psubsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8Mul: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MinS: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MaxS: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8Eq: {
__ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8Ne: {
__ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
__ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kX64I16x8ShrU: {
__ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kX64I16x8AddSaturateU: {
__ paddusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8SubSaturateU: {
__ psubusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MinU: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MaxU: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64S128Select: {
// Mask used here is stored in dst.
XMMRegister dst = i.OutputSimd128Register();
__ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
......
......@@ -158,7 +158,26 @@ namespace compiler {
V(X64I32x4ShrU) \
V(X64I32x4MinU) \
V(X64I32x4MaxU) \
V(X64S32x4Select) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLane) \
V(X64I16x8ReplaceLane) \
V(X64I16x8Shl) \
V(X64I16x8ShrS) \
V(X64I16x8Add) \
V(X64I16x8AddSaturateS) \
V(X64I16x8Sub) \
V(X64I16x8SubSaturateS) \
V(X64I16x8Mul) \
V(X64I16x8MinS) \
V(X64I16x8MaxS) \
V(X64I16x8Eq) \
V(X64I16x8Ne) \
V(X64I16x8ShrU) \
V(X64I16x8AddSaturateU) \
V(X64I16x8SubSaturateU) \
V(X64I16x8MinU) \
V(X64I16x8MaxU) \
V(X64S128Select) \
V(X64S128Zero)
// Addressing modes represent the "shape" of inputs to an instruction.
......
......@@ -138,8 +138,27 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4ShrU:
case kX64I32x4MinU:
case kX64I32x4MaxU:
case kX64I16x8Splat:
case kX64I16x8ExtractLane:
case kX64I16x8ReplaceLane:
case kX64I16x8Shl:
case kX64I16x8ShrS:
case kX64I16x8Add:
case kX64I16x8AddSaturateS:
case kX64I16x8Sub:
case kX64I16x8SubSaturateS:
case kX64I16x8Mul:
case kX64I16x8MinS:
case kX64I16x8MaxS:
case kX64I16x8Eq:
case kX64I16x8Ne:
case kX64I16x8ShrU:
case kX64I16x8AddSaturateU:
case kX64I16x8SubSaturateU:
case kX64I16x8MinU:
case kX64I16x8MaxU:
case kX64S128Select:
case kX64S128Zero:
case kX64S32x4Select:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
......
......@@ -2438,7 +2438,13 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_TYPES(V) V(I32x4)
#define SIMD_TYPES(V) \
V(I32x4) \
V(I16x8)
#define SIMD_FORMAT_LIST(V) \
V(32x4) \
V(16x8)
#define SIMD_ZERO_OP_LIST(V) \
V(S128Zero) \
......@@ -2446,11 +2452,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(S1x8Zero) \
V(S1x16Zero)
#define SIMD_SHIFT_OPCODES(V) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU)
#define SIMD_BINOP_LIST(V) \
V(I32x4Add) \
V(I32x4Sub) \
......@@ -2460,7 +2461,28 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4MinU) \
V(I32x4MaxU)
V(I32x4MaxU) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
V(I16x8Sub) \
V(I16x8SubSaturateS) \
V(I16x8Mul) \
V(I16x8MinS) \
V(I16x8MaxS) \
V(I16x8Eq) \
V(I16x8Ne) \
V(I16x8AddSaturateU) \
V(I16x8SubSaturateU) \
V(I16x8MinU) \
V(I16x8MaxU)
#define SIMD_SHIFT_OPCODES(V) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU) \
V(I16x8Shl) \
V(I16x8ShrS) \
V(I16x8ShrU)
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
......@@ -2519,12 +2541,15 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
void InstructionSelector::VisitS32x4Select(Node* node) {
X64OperandGenerator g(this);
Emit(kX64S32x4Select, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
g.UseRegister(node->InputAt(2)));
}
#define SIMD_VISIT_SELECT_OP(format) \
void InstructionSelector::VisitS##format##Select(Node* node) { \
X64OperandGenerator g(this); \
Emit(kX64S128Select, g.DefineSameAsFirst(node), \
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
g.UseRegister(node->InputAt(2))); \
}
SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
#undef SIMD_VISIT_SELECT_OP
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
......
......@@ -935,7 +935,6 @@ void Assembler::cld() {
emit(0xFC);
}
void Assembler::cdq() {
EnsureSpace ensure_space(this);
emit(0x99);
......@@ -2892,11 +2891,11 @@ void Assembler::pextrw(Register dst, XMMRegister src, int8_t imm8) {
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
emit_optional_rex_32(src, dst);
emit(0x0F);
emit(0x3A);
emit(0x15);
emit_sse_operand(dst, src);
emit_sse_operand(src, dst);
emit(imm8);
}
......@@ -4636,6 +4635,26 @@ void Assembler::psrldq(XMMRegister dst, uint8_t shift) {
emit(shift);
}
void Assembler::pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x70);
emit_sse_operand(dst, src);
emit(shuffle);
}
void Assembler::pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x70);
emit_sse_operand(dst, src);
emit(shuffle);
}
void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0x66);
......
......@@ -1305,6 +1305,8 @@ class Assembler : public AssemblerBase {
void psrldq(XMMRegister dst, uint8_t shift);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void cvtdq2ps(XMMRegister dst, XMMRegister src);
void cvtdq2ps(XMMRegister dst, const Operand& src);
......
......@@ -1682,7 +1682,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pextrw "); // reg/m32, xmm, imm8
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 7);
current += 1;
} else if (third_byte == 0x16) {
get_modrm(*current, &mod, &regop, &rm);
......@@ -1788,6 +1788,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else if (opcode == 0xB1) {
current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
} else if (opcode == 0xC4) {
AppendToBuffer("pinsrw %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", (*current) & 7);
current += 1;
} else {
const char* mnemonic = "?";
if (opcode == 0x54) {
......@@ -1824,10 +1829,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "punpckhdq";
} else if (opcode == 0x6B) {
mnemonic = "packssdw";
} else if (opcode == 0xC4) {
mnemonic = "pinsrw";
} else if (opcode == 0xC5) {
mnemonic = "pextrw";
} else if (opcode == 0xD1) {
mnemonic = "psrlw";
} else if (opcode == 0xD2) {
......@@ -1941,6 +1942,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x70) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pshuflw %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
AppendToBuffer(", %d", (*current) & 7);
current += 1;
} else if (opcode == 0xC2) {
// Intel manual 2A, Table 3-18.
int mod, regop, rm;
......@@ -1996,6 +2004,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("cvttss2si%c %s,",
operand_size_code(), NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x70) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pshufhw %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
AppendToBuffer(", %d", (*current) & 7);
current += 1;
} else if (opcode == 0x7E) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
......
......@@ -469,6 +469,9 @@ TEST(DisasmX64) {
__ punpckldq(xmm5, Operand(rdx, 4));
__ punpckhdq(xmm8, xmm15);
__ pshuflw(xmm2, xmm4, 3);
__ pshufhw(xmm1, xmm9, 6);
#define EMIT_SSE2_INSTR(instruction, notUsed1, notUsed2, notUsed3) \
__ instruction(xmm5, xmm1); \
__ instruction(xmm5, Operand(rdx, 4));
......@@ -521,6 +524,7 @@ TEST(DisasmX64) {
__ insertps(xmm5, xmm1, 123);
__ extractps(rax, xmm1, 0);
__ pextrw(rbx, xmm2, 1);
__ pinsrw(xmm2, rcx, 1);
__ pextrd(rbx, xmm15, 0);
__ pextrd(r12, xmm0, 1);
__ pinsrd(xmm9, r9, 0);
......
......@@ -686,8 +686,7 @@ WASM_EXEC_COMPILED_TEST(I32x4ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2));
}
#if V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
WASM_EXEC_COMPILED_TEST(I16x8Splat) {
FLAG_wasm_simd_prototype = true;
......@@ -750,7 +749,9 @@ WASM_EXEC_COMPILED_TEST(I16x8ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2));
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM
WASM_EXEC_COMPILED_TEST(I8x16Splat) {
FLAG_wasm_simd_prototype = true;
......@@ -1204,7 +1205,9 @@ WASM_EXEC_COMPILED_TEST(I16x8ConvertI32x4) {
CHECK_EQ(1, r.Call(*i, packed_signed, packed_unsigned));
}
}
#endif // V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
void RunI16x8BinOpTest(WasmOpcode simd_op, Int16BinOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
......@@ -1288,7 +1291,9 @@ WASM_EXEC_COMPILED_TEST(I16x8Eq) { RunI16x8CompareOpTest(kExprI16x8Eq, Equal); }
WASM_EXEC_COMPILED_TEST(I16x8Ne) {
RunI16x8CompareOpTest(kExprI16x8Ne, NotEqual);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM
WASM_EXEC_COMPILED_TEST(I16x8LtS) {
RunI16x8CompareOpTest(kExprI16x8LtS, Less);
}
......@@ -1320,7 +1325,9 @@ WASM_EXEC_COMPILED_TEST(I16x8LtU) {
WASM_EXEC_COMPILED_TEST(I16x8LeU) {
RunI16x8CompareOpTest(kExprI16x8LeU, UnsignedLessEqual);
}
#endif // V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
void RunI16x8ShiftOpTest(WasmOpcode simd_op, Int16ShiftOp expected_op,
int shift) {
FLAG_wasm_simd_prototype = true;
......@@ -1347,7 +1354,9 @@ WASM_EXEC_COMPILED_TEST(I16x8ShrS) {
WASM_EXEC_COMPILED_TEST(I16x8ShrU) {
RunI16x8ShiftOpTest(kExprI16x8ShrU, LogicalShiftRight, 1);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM
void RunI8x16UnOpTest(WasmOpcode simd_op, Int8UnOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
......@@ -1577,8 +1586,11 @@ WASM_SIMD_SELECT_TEST(32x4)
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
WASM_SIMD_SELECT_TEST(16x8)
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM
WASM_SIMD_SELECT_TEST(8x16)
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment