Commit 8ef2f799 authored by Kong, Fanchen's avatar Kong, Fanchen Committed by Commit Bot

[wasm-simd] [liftoff] Implement not/and/or/xor/andnot on x64 and ia32

Bug: v8:9909
Change-Id: Ic71a2a012cbd538d65afb047fe73e98be6454aed
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2150157
Commit-Queue: Fanchen Kong <fanchen.kong@intel.com>
Reviewed-by: 's avatarZhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67191}
parent a8b789fc
...@@ -2223,6 +2223,31 @@ void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst, ...@@ -2223,6 +2223,31 @@ void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
bailout(kSimd, "i16x8_uconvert_i32x4"); bailout(kSimd, "i16x8_uconvert_i32x4");
} }
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
bailout(kSimd, "s128_not");
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "s128_and");
}
void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "s128_or");
}
void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "s128_xor");
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "s128_and_not");
}
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
......
...@@ -1558,6 +1558,31 @@ void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst, ...@@ -1558,6 +1558,31 @@ void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
bailout(kSimd, "i16x8_uconvert_i32x4"); bailout(kSimd, "i16x8_uconvert_i32x4");
} }
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
bailout(kSimd, "s128_not");
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "s128_and");
}
void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "s128_or");
}
void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "s128_xor");
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "s128_and_not");
}
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
......
...@@ -2049,6 +2049,34 @@ void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -2049,6 +2049,34 @@ void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs); this, dst, lhs, rhs);
} }
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
if (dst.fp() != src.fp()) {
Pcmpeqd(dst.fp(), dst.fp());
Pxor(dst.fp(), src.fp());
} else {
Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
Pxor(dst.fp(), liftoff::kScratchDoubleReg);
}
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpand, &Assembler::pand>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpor, &Assembler::por>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpxor, &Assembler::pxor>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
if (dst.fp() == src.fp()) { if (dst.fp() == src.fp()) {
...@@ -2529,6 +2557,13 @@ void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst, ...@@ -2529,6 +2557,13 @@ void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
rhs, SSE4_1); rhs, SSE4_1);
} }
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vandnps, &Assembler::andnps>(
this, dst, rhs, lhs);
}
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
......
...@@ -739,6 +739,13 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -739,6 +739,13 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs); LiftoffRegister rhs);
inline void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, inline void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs); LiftoffRegister rhs);
inline void emit_s128_not(LiftoffRegister dst, LiftoffRegister src);
inline void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src); inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs); LiftoffRegister rhs);
...@@ -849,6 +856,8 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -849,6 +856,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, inline void emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister lhs,
LiftoffRegister rhs); LiftoffRegister rhs);
inline void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_rounding_average_u(LiftoffRegister dst, inline void emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister lhs,
LiftoffRegister rhs); LiftoffRegister rhs);
......
...@@ -2352,6 +2352,14 @@ class LiftoffCompiler { ...@@ -2352,6 +2352,14 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_eq); return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_eq);
case wasm::kExprF64x2Eq: case wasm::kExprF64x2Eq:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_eq); return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_eq);
case wasm::kExprS128Not:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_s128_not);
case wasm::kExprS128And:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_and);
case wasm::kExprS128Or:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_or);
case wasm::kExprS128Xor:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_xor);
case wasm::kExprI8x16Neg: case wasm::kExprI8x16Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg); return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg);
case wasm::kExprI8x16Add: case wasm::kExprI8x16Add:
...@@ -2472,6 +2480,8 @@ class LiftoffCompiler { ...@@ -2472,6 +2480,8 @@ class LiftoffCompiler {
case wasm::kExprI16x8UConvertI32x4: case wasm::kExprI16x8UConvertI32x4:
return EmitBinOp<kS128, kS128>( return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i16x8_uconvert_i32x4); &LiftoffAssembler::emit_i16x8_uconvert_i32x4);
case wasm::kExprS128AndNot:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_and_not);
case wasm::kExprI8x16RoundingAverageU: case wasm::kExprI8x16RoundingAverageU:
return EmitBinOp<kS128, kS128>( return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i8x16_rounding_average_u); &LiftoffAssembler::emit_i8x16_rounding_average_u);
......
...@@ -2012,6 +2012,34 @@ void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -2012,6 +2012,34 @@ void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs); this, dst, lhs, rhs);
} }
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
if (dst.fp() != src.fp()) {
Pcmpeqd(dst.fp(), dst.fp());
Pxor(dst.fp(), src.fp());
} else {
Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
Pxor(dst.fp(), kScratchDoubleReg);
}
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpand, &Assembler::pand>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpor, &Assembler::por>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpxor, &Assembler::pxor>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
if (dst.fp() == src.fp()) { if (dst.fp() == src.fp()) {
...@@ -2491,6 +2519,13 @@ void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst, ...@@ -2491,6 +2519,13 @@ void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
rhs, SSE4_1); rhs, SSE4_1);
} }
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vandnps, &Assembler::andnps>(
this, dst, rhs, lhs);
}
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment