Commit 23c2edd4 authored by jing.bao's avatar jing.bao Committed by Commit Bot

[ia32][wasm] Add I8x16/I16x8 Splat/ExtractLane/ReplaceLane

Add Pxor, Pshuflw, Pshufb, Pextrb, Pextrw macros
Reconstruct SIMD opcodes to macros

BUG=

Review-Url: https://codereview.chromium.org/2937653002
Cr-Commit-Position: refs/heads/master@{#46400}
parent 7b4e940a
...@@ -1907,32 +1907,79 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1907,32 +1907,79 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); __ pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break; break;
} }
case kSSEI32x4Add: {
__ paddd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kSSEI32x4Sub: {
__ psubd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4ReplaceLane: { case kAVXI32x4ReplaceLane: {
CpuFeatureScope avx_scope(masm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
__ vpinsrd(i.OutputSimd128Register(), i.InputSimd128Register(0), __ vpinsrd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1)); i.InputOperand(2), i.InputInt8(1));
break; break;
} }
case kSSEI32x4Add: {
__ paddd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4Add: { case kAVXI32x4Add: {
CpuFeatureScope avx_scope(masm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
__ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0), __ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1)); i.InputOperand(1));
break; break;
} }
case kSSEI32x4Sub: {
__ psubd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4Sub: { case kAVXI32x4Sub: {
CpuFeatureScope avx_scope(masm(), AVX); CpuFeatureScope avx_scope(masm(), AVX);
__ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0), __ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1)); i.InputOperand(1));
break; break;
} }
case kIA32I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
__ Pshuflw(dst, dst, 0x0);
__ Pshufd(dst, dst, 0x0);
break;
}
case kIA32I16x8ExtractLane: {
Register dst = i.OutputRegister();
__ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsx_w(dst, dst);
break;
}
case kSSEI16x8ReplaceLane: {
__ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
}
case kAVXI16x8ReplaceLane: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vpinsrw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1));
break;
}
case kIA32I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
__ Pxor(kScratchDoubleReg, kScratchDoubleReg);
__ Pshufb(dst, kScratchDoubleReg);
break;
}
case kIA32I8x16ExtractLane: {
Register dst = i.OutputRegister();
__ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsx_b(dst, dst);
break;
}
case kSSEI8x16ReplaceLane: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
}
case kAVXI8x16ReplaceLane: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vpinsrb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1));
break;
}
case kCheckedLoadInt8: case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b); ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break; break;
......
...@@ -114,11 +114,19 @@ namespace compiler { ...@@ -114,11 +114,19 @@ namespace compiler {
V(IA32I32x4Splat) \ V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \ V(IA32I32x4ExtractLane) \
V(SSEI32x4ReplaceLane) \ V(SSEI32x4ReplaceLane) \
V(SSEI32x4Add) \
V(SSEI32x4Sub) \
V(AVXI32x4ReplaceLane) \ V(AVXI32x4ReplaceLane) \
V(SSEI32x4Add) \
V(AVXI32x4Add) \ V(AVXI32x4Add) \
V(AVXI32x4Sub) V(SSEI32x4Sub) \
V(AVXI32x4Sub) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLane) \
V(SSEI16x8ReplaceLane) \
V(AVXI16x8ReplaceLane) \
V(IA32I8x16Splat) \
V(IA32I8x16ExtractLane) \
V(SSEI8x16ReplaceLane) \
V(AVXI8x16ReplaceLane)
// Addressing modes represent the "shape" of inputs to an instruction. // Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes // Many instructions support multiple addressing modes. Addressing modes
......
...@@ -100,11 +100,19 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -100,11 +100,19 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4Splat: case kIA32I32x4Splat:
case kIA32I32x4ExtractLane: case kIA32I32x4ExtractLane:
case kSSEI32x4ReplaceLane: case kSSEI32x4ReplaceLane:
case kSSEI32x4Add:
case kSSEI32x4Sub:
case kAVXI32x4ReplaceLane: case kAVXI32x4ReplaceLane:
case kSSEI32x4Add:
case kAVXI32x4Add: case kAVXI32x4Add:
case kSSEI32x4Sub:
case kAVXI32x4Sub: case kAVXI32x4Sub:
case kIA32I16x8Splat:
case kIA32I16x8ExtractLane:
case kSSEI16x8ReplaceLane:
case kAVXI16x8ReplaceLane:
case kIA32I8x16Splat:
case kIA32I8x16ExtractLane:
case kSSEI8x16ReplaceLane:
case kAVXI8x16ReplaceLane:
return (instr->addressing_mode() == kMode_None) return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags ? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect; : kIsLoadOperation | kHasSideEffect;
......
...@@ -888,9 +888,7 @@ void InstructionSelector::VisitWord32Ror(Node* node) { ...@@ -888,9 +888,7 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \ V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \ V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \ V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \ V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
V(I32x4Add, kAVXI32x4Add, kSSEI32x4Add) \
V(I32x4Sub, kAVXI32x4Sub, kSSEI32x4Sub)
#define FLOAT_UNOP_LIST(V) \ #define FLOAT_UNOP_LIST(V) \
V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \ V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
...@@ -1893,30 +1891,55 @@ VISIT_ATOMIC_BINOP(Or) ...@@ -1893,30 +1891,55 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor) VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP #undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitI32x4Splat(Node* node) { #define SIMD_TYPES(V) \
VisitRO(this, node, kIA32I32x4Splat); V(I32x4) \
} V(I16x8) \
V(I8x16)
void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
IA32OperandGenerator g(this); #define SIMD_BINOP_LIST(V) \
int32_t lane = OpParameter<int32_t>(node); V(I32x4Add) \
Emit(kIA32I32x4ExtractLane, g.DefineAsRegister(node), V(I32x4Sub)
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
} #define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { VisitRO(this, node, kIA32##Type##Splat); \
IA32OperandGenerator g(this); }
InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); SIMD_TYPES(VISIT_SIMD_SPLAT)
InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node)); #undef VISIT_SIMD_SPLAT
InstructionOperand operand2 = g.Use(node->InputAt(1));
if (IsSupported(AVX)) { #define VISIT_SIMD_EXTRACT_LANE(Type) \
Emit(kAVXI32x4ReplaceLane, g.DefineAsRegister(node), operand0, operand1, void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
operand2); IA32OperandGenerator g(this); \
} else { int32_t lane = OpParameter<int32_t>(node); \
Emit(kSSEI32x4ReplaceLane, g.DefineSameAsFirst(node), operand0, operand1, Emit(kIA32##Type##ExtractLane, g.DefineAsRegister(node), \
operand2); g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
} }
} SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
#undef VISIT_SIMD_EXTRACT_LANE
#define VISIT_SIMD_REPLACE_LANE(Type) \
void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
IA32OperandGenerator g(this); \
InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node)); \
InstructionOperand operand2 = g.Use(node->InputAt(1)); \
if (IsSupported(AVX)) { \
Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
operand1, operand2); \
} else { \
Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
operand1, operand2); \
} \
}
SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
#undef VISIT_SIMD_REPLACE_LANE
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
VisitRROFloat(this, node, kAVX##Opcode, kSSE##Opcode); \
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE(); UNREACHABLE();
......
...@@ -2192,13 +2192,22 @@ void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); } ...@@ -2192,13 +2192,22 @@ void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
// !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
...@@ -2292,14 +2301,15 @@ void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); } ...@@ -2292,14 +2301,15 @@ void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
// && !V8_TARGET_ARCH_MIPS64 // && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \ #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 #endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
// !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \ #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 !V8_TARGET_ARCH_MIPS64
......
...@@ -2088,6 +2088,25 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) { ...@@ -2088,6 +2088,25 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
} }
} }
void MacroAssembler::Pxor(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpxor(dst, dst, src);
} else {
pxor(dst, src);
}
}
void MacroAssembler::Pshuflw(XMMRegister dst, const Operand& src,
uint8_t shuffle) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpshuflw(dst, src, shuffle);
} else {
pshuflw(dst, src, shuffle);
}
}
void MacroAssembler::Pshufd(XMMRegister dst, const Operand& src, void MacroAssembler::Pshufd(XMMRegister dst, const Operand& src,
uint8_t shuffle) { uint8_t shuffle) {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
...@@ -2098,6 +2117,48 @@ void MacroAssembler::Pshufd(XMMRegister dst, const Operand& src, ...@@ -2098,6 +2117,48 @@ void MacroAssembler::Pshufd(XMMRegister dst, const Operand& src,
} }
} }
void MacroAssembler::Pshufb(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpshufb(dst, dst, src);
return;
}
if (CpuFeatures::IsSupported(SSSE3)) {
CpuFeatureScope sse_scope(this, SSSE3);
pshufb(dst, src);
return;
}
UNREACHABLE();
}
void MacroAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpextrb(dst, src, imm8);
return;
}
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pextrb(dst, src, imm8);
return;
}
UNREACHABLE();
}
void MacroAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpextrw(dst, src, imm8);
return;
}
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pextrw(dst, src, imm8);
return;
}
UNREACHABLE();
}
void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) { void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
if (imm8 == 0) { if (imm8 == 0) {
Movd(dst, src); Movd(dst, src);
......
...@@ -723,12 +723,24 @@ class MacroAssembler: public Assembler { ...@@ -723,12 +723,24 @@ class MacroAssembler: public Assembler {
#undef AVX_OP2_WITH_TYPE #undef AVX_OP2_WITH_TYPE
void Pxor(XMMRegister dst, XMMRegister src) { Pxor(dst, Operand(src)); }
void Pxor(XMMRegister dst, const Operand& src);
void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
Pshuflw(dst, Operand(src), shuffle);
}
void Pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) { void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
Pshufd(dst, Operand(src), shuffle); Pshufd(dst, Operand(src), shuffle);
} }
void Pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle); void Pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
// Non-SSE2 instructions. // Non-SSE2 instructions.
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
void Pshufb(XMMRegister dst, const Operand& src);
void Pextrb(Register dst, XMMRegister src, int8_t imm8);
void Pextrw(Register dst, XMMRegister src, int8_t imm8);
void Pextrd(Register dst, XMMRegister src, int8_t imm8); void Pextrd(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8, void Pinsrd(XMMRegister dst, Register src, int8_t imm8,
bool is_64_bits = false) { bool is_64_bits = false) {
......
...@@ -650,8 +650,6 @@ WASM_SIMD_TEST(I32x4ReplaceLane) { ...@@ -650,8 +650,6 @@ WASM_SIMD_TEST(I32x4ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2)); CHECK_EQ(1, r.Call(1, 2));
} }
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(I16x8Splat) { WASM_SIMD_TEST(I16x8Splat) {
WasmRunner<int32_t, int32_t> r(kExecuteCompiled); WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
byte lane_val = 0; byte lane_val = 0;
...@@ -711,11 +709,7 @@ WASM_SIMD_TEST(I16x8ReplaceLane) { ...@@ -711,11 +709,7 @@ WASM_SIMD_TEST(I16x8ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2)); CHECK_EQ(1, r.Call(1, 2));
} }
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
// SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || SIMD_LOWERING_TARGET
WASM_SIMD_TEST(I8x16Splat) { WASM_SIMD_TEST(I8x16Splat) {
WasmRunner<int32_t, int32_t> r(kExecuteCompiled); WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
byte lane_val = 0; byte lane_val = 0;
...@@ -830,8 +824,6 @@ WASM_SIMD_TEST(I8x16ReplaceLane) { ...@@ -830,8 +824,6 @@ WASM_SIMD_TEST(I8x16ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2)); CHECK_EQ(1, r.Call(1, 2));
} }
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
// V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \ #if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || SIMD_LOWERING_TARGET || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment