Commit f5794378 authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[wasm-simd] Add flag for post-MVP SIMD, gate opcodes with it

Some opcodes are introduced in V8 for prototyping, and performance
measurements that are not officially a part of the current SIMD proposal
but may be included in future, gate these by a separate flag.

Change-Id: Icc6a9e89c6196c8ff144d2e0193d707e1f60c38b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2079539
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarZhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66542}
parent c9a27038
......@@ -773,12 +773,20 @@ DEFINE_BOOL(wasm_lazy_compilation, false,
"enable lazy compilation for all wasm modules")
DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
"trace lazy compilation of wasm functions")
DEFINE_BOOL(wasm_grow_shared_memory, true,
"allow growing shared WebAssembly memory objects")
DEFINE_BOOL(wasm_atomics_on_non_shared_memory, false,
"allow atomic operations on non-shared WebAssembly memory")
DEFINE_BOOL(wasm_lazy_validation, false,
"enable lazy validation for lazily compiled wasm functions")
// Flags for wasm prototyping that are not strictly features i.e., part of
// an existing proposal that may be conditionally enabled.
DEFINE_BOOL(wasm_atomics_on_non_shared_memory, false,
"allow atomic operations on non-shared WebAssembly memory")
DEFINE_BOOL(wasm_grow_shared_memory, true,
"allow growing shared WebAssembly memory objects")
DEFINE_BOOL(wasm_simd_post_mvp, false,
"allow experimental SIMD operations for prototyping that are not "
"included in the current proposal")
DEFINE_IMPLICATION(wasm_simd_post_mvp, experimental_wasm_simd)
// wasm-interpret-all resets {asm-,}wasm-lazy-compilation.
DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
......
......@@ -2779,6 +2779,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
LoadTransformationKind::kExtend);
break;
default: {
if (!FLAG_wasm_simd_post_mvp &&
WasmOpcodes::IsSimdPostMvpOpcode(opcode)) {
this->error(
"simd opcode not available, enable with --wasm-simd-post-mvp");
break;
}
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
this->error("invalid simd opcode");
......
......@@ -429,6 +429,17 @@ bool WasmOpcodes::IsThrowingOpcode(WasmOpcode opcode) {
}
}
bool WasmOpcodes::IsSimdPostMvpOpcode(WasmOpcode opcode) {
switch (opcode) {
#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
FOREACH_SIMD_POST_MVP_OPCODE(CHECK_OPCODE)
#undef CHECK_OPCODE
return true;
default:
return false;
}
}
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
......
......@@ -279,7 +279,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(S8x16Shuffle, 0xfd03, s_ss)
#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
#define FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
V(I8x16Splat, 0xfd04, s_i) \
V(I16x8Splat, 0xfd08, s_i) \
V(I32x4Splat, 0xfd0c, s_i) \
......@@ -316,16 +316,6 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I32x4LeU, 0xfd33, s_ss) \
V(I32x4GeS, 0xfd34, s_ss) \
V(I32x4GeU, 0xfd35, s_ss) \
V(I64x2Eq, 0xfd36, s_ss) \
V(I64x2Ne, 0xfd37, s_ss) \
V(I64x2LtS, 0xfd38, s_ss) \
V(I64x2LtU, 0xfd39, s_ss) \
V(I64x2GtS, 0xfd3a, s_ss) \
V(I64x2GtU, 0xfd3b, s_ss) \
V(I64x2LeS, 0xfd3c, s_ss) \
V(I64x2LeU, 0xfd3d, s_ss) \
V(I64x2GeS, 0xfd3e, s_ss) \
V(I64x2GeU, 0xfd3f, s_ss) \
V(F32x4Eq, 0xfd40, s_ss) \
V(F32x4Ne, 0xfd41, s_ss) \
V(F32x4Lt, 0xfd42, s_ss) \
......@@ -391,23 +381,15 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I32x4MaxS, 0xfd82, s_ss) \
V(I32x4MaxU, 0xfd83, s_ss) \
V(I64x2Neg, 0xfd84, s_s) \
V(S1x2AnyTrue, 0xfd85, i_s) \
V(S1x2AllTrue, 0xfd86, i_s) \
V(I64x2Shl, 0xfd87, s_si) \
V(I64x2ShrS, 0xfd88, s_si) \
V(I64x2ShrU, 0xfd89, s_si) \
V(I64x2Add, 0xfd8a, s_ss) \
V(I64x2Sub, 0xfd8d, s_ss) \
V(I64x2Mul, 0xfd8c, s_ss) \
V(I64x2MinS, 0xfd8e, s_ss) \
V(I64x2MinU, 0xfd8f, s_ss) \
V(I64x2MaxS, 0xfd90, s_ss) \
V(I64x2MaxU, 0xfd91, s_ss) \
V(F32x4Abs, 0xfd95, s_s) \
V(F32x4Neg, 0xfd96, s_s) \
V(F32x4Sqrt, 0xfd97, s_s) \
V(F32x4Qfma, 0xfd98, s_sss) \
V(F32x4Qfms, 0xfd99, s_sss) \
V(F32x4Add, 0xfd9a, s_ss) \
V(F32x4Sub, 0xfd9b, s_ss) \
V(F32x4Mul, 0xfd9c, s_ss) \
......@@ -417,8 +399,6 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F64x2Abs, 0xfda0, s_s) \
V(F64x2Neg, 0xfda1, s_s) \
V(F64x2Sqrt, 0xfda2, s_s) \
V(F64x2Qfma, 0xfda3, s_sss) \
V(F64x2Qfms, 0xfda4, s_sss) \
V(F64x2Add, 0xfda5, s_ss) \
V(F64x2Sub, 0xfda6, s_ss) \
V(F64x2Mul, 0xfda7, s_ss) \
......@@ -445,12 +425,34 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(S128AndNot, 0xfdd8, s_ss) \
V(I8x16RoundingAverageU, 0xfdd9, s_ss) \
V(I16x8RoundingAverageU, 0xfdda, s_ss) \
V(I8x16Abs, 0xfde1, s_s) \
V(I16x8Abs, 0xfde2, s_s) \
V(I32x4Abs, 0xfde3, s_s)
#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
V(I64x2Eq, 0xfd36, s_ss) \
V(I64x2Ne, 0xfd37, s_ss) \
V(I64x2LtS, 0xfd38, s_ss) \
V(I64x2LtU, 0xfd39, s_ss) \
V(I64x2GtS, 0xfd3a, s_ss) \
V(I64x2GtU, 0xfd3b, s_ss) \
V(I64x2LeS, 0xfd3c, s_ss) \
V(I64x2LeU, 0xfd3d, s_ss) \
V(I64x2GeS, 0xfd3e, s_ss) \
V(I64x2GeU, 0xfd3f, s_ss) \
V(S1x2AnyTrue, 0xfd85, i_s) \
V(S1x2AllTrue, 0xfd86, i_s) \
V(I64x2MinS, 0xfd8e, s_ss) \
V(I64x2MinU, 0xfd8f, s_ss) \
V(I64x2MaxS, 0xfd90, s_ss) \
V(I64x2MaxU, 0xfd91, s_ss) \
V(F32x4Qfma, 0xfd98, s_sss) \
V(F32x4Qfms, 0xfd99, s_sss) \
V(F64x2Qfma, 0xfda3, s_sss) \
V(F64x2Qfms, 0xfda4, s_sss) \
V(I16x8AddHoriz, 0xfdbd, s_ss) \
V(I32x4AddHoriz, 0xfdbe, s_ss) \
V(F32x4AddHoriz, 0xfdbf, s_ss) \
V(I8x16Abs, 0xfde1, s_s) \
V(I16x8Abs, 0xfde2, s_s) \
V(I32x4Abs, 0xfde3, s_s) \
V(F32x4RecipApprox, 0xfdee, s_s) \
V(F32x4RecipSqrtApprox, 0xfdef, s_s)
......@@ -472,6 +474,10 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F32x4ReplaceLane, 0xfd14, _) \
V(F64x2ReplaceLane, 0xfd17, _)
#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
FOREACH_SIMD_POST_MVP_OPCODE(V)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
......@@ -674,6 +680,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static bool IsControlOpcode(WasmOpcode opcode);
static bool IsAnyRefOpcode(WasmOpcode opcode);
static bool IsThrowingOpcode(WasmOpcode opcode);
static bool IsSimdPostMvpOpcode(WasmOpcode opcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
static bool IsUnconditionalJump(WasmOpcode opcode);
......
......@@ -743,11 +743,13 @@ WASM_SIMD_TEST(F32x4Sqrt) {
}
WASM_SIMD_TEST(F32x4RecipApprox) {
FLAG_SCOPE(wasm_simd_post_mvp);
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
base::Recip, false /* !exact */);
}
WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
FLAG_SCOPE(wasm_simd_post_mvp);
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
base::RecipSqrt, false /* !exact */);
}
......@@ -875,6 +877,7 @@ WASM_SIMD_TEST(F32x4Le) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(F32x4Qfma) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
// Set up global to hold mask output.
float* g = r.builder().AddGlobal<float>(kWasmS128);
......@@ -899,6 +902,7 @@ WASM_SIMD_TEST_NO_LOWERING(F32x4Qfma) {
}
WASM_SIMD_TEST_NO_LOWERING(F32x4Qfms) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
// Set up global to hold mask output.
float* g = r.builder().AddGlobal<float>(kWasmS128);
......@@ -1048,6 +1052,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) {
void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int64BinOp expected_op) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
// Global to hold output.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
......@@ -1498,6 +1503,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2MaxU) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(F64x2Qfma) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
// Set up global to hold mask output.
double* g = r.builder().AddGlobal<double>(kWasmS128);
......@@ -1522,6 +1528,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Qfma) {
}
WASM_SIMD_TEST_NO_LOWERING(F64x2Qfms) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
// Set up global to hold mask output.
double* g = r.builder().AddGlobal<double>(kWasmS128);
......@@ -2618,18 +2625,21 @@ void RunBinaryLaneOpTest(
}
WASM_SIMD_TEST(I32x4AddHoriz) {
FLAG_SCOPE(wasm_simd_post_mvp);
// Inputs are [0 1 2 3] and [4 5 6 7].
RunBinaryLaneOpTest<int32_t>(execution_tier, lower_simd, kExprI32x4AddHoriz,
{{1, 5, 9, 13}});
}
WASM_SIMD_TEST(I16x8AddHoriz) {
FLAG_SCOPE(wasm_simd_post_mvp);
// Inputs are [0 1 2 3 4 5 6 7] and [8 9 10 11 12 13 14 15].
RunBinaryLaneOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8AddHoriz,
{{1, 5, 9, 13, 17, 21, 25, 29}});
}
WASM_SIMD_TEST(F32x4AddHoriz) {
FLAG_SCOPE(wasm_simd_post_mvp);
// Inputs are [0.0f 1.0f 2.0f 3.0f] and [4.0f 5.0f 6.0f 7.0f].
RunBinaryLaneOpTest<float>(execution_tier, lower_simd, kExprF32x4AddHoriz,
{{1.0f, 5.0f, 9.0f, 13.0f}});
......@@ -2965,6 +2975,7 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
// test inputs. Test inputs with all true, all false, one true, and one false.
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type) \
WASM_SIMD_TEST(ReductionTest##lanes) { \
FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte zero = r.AllocateLocal(kWasmS128); \
......@@ -3426,6 +3437,7 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2Load32x2S) {
V8_TARGET_ARCH_ARM
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AnyTrue) { \
FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
......@@ -3446,6 +3458,7 @@ WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AllTrue) { \
FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment