Commit d39d75b5 authored by Jakob Kummerow's avatar Jakob Kummerow Committed by V8 LUCI CQ

[wasm][cleanup] Merge opcode names into main macros

This merges the separate opcode name definitions from wasm-opcodes-inl.h
into the main opcode-defining macros in wasm-opcodes.h. This is simpler
(avoids a bunch of fairly complex macros) and easier to update when we
add new opcodes in the future.
The tests become obsolete because they would simply repeat the implementation.

Change-Id: Ib6421da5670079e7725659c1f4008251f8ff7aed
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3714244
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81280}
parent d750358a
......@@ -2064,7 +2064,7 @@ class WasmDecoder : public Decoder {
// Prefixed opcodes (already handled, included here for completeness of
// switch)
FOREACH_SIMD_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_NUMERIC_OPCODE(DECLARE_OPCODE_CASE, DECLARE_OPCODE_CASE)
FOREACH_NUMERIC_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_GC_OPCODE(DECLARE_OPCODE_CASE)
......@@ -2087,7 +2087,7 @@ class WasmDecoder : public Decoder {
if (!sig) sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) return {sig->parameter_count(), sig->return_count()};
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
#define DECLARE_OPCODE_CASE(name, ...) case kExpr##name:
// clang-format off
switch (opcode) {
case kExprSelect:
......@@ -2743,12 +2743,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 1;
}
#define BUILD_SIMPLE_OPCODE(op, _, sig) \
#define BUILD_SIMPLE_OPCODE(op, _, sig, ...) \
DECODE(op) { return BuildSimpleOperator_##sig(kExpr##op); }
FOREACH_SIMPLE_NON_CONST_OPCODE(BUILD_SIMPLE_OPCODE)
#undef BUILD_SIMPLE_OPCODE
#define BUILD_SIMPLE_OPCODE(op, _, sig) \
#define BUILD_SIMPLE_OPCODE(op, _, sig, ...) \
DECODE(op) { \
if (decoding_mode == kConstantExpression) { \
if (!VALIDATE(this->enabled_.has_extended_const())) { \
......@@ -3657,7 +3657,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
}
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
#define SIMPLE_PROTOTYPE_CASE(name, ...) \
DECODE(name) { return BuildSimplePrototypeOperator(opcode); }
FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
#undef SIMPLE_PROTOTYPE_CASE
......@@ -3704,10 +3704,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
static constexpr OpcodeHandler GetOpcodeHandlerTableEntry(size_t idx) {
DECODE_IMPL(Nop);
#define BUILD_SIMPLE_OPCODE(op, _, sig) DECODE_IMPL(op);
#define BUILD_SIMPLE_OPCODE(op, ...) DECODE_IMPL(op);
FOREACH_SIMPLE_NON_CONST_OPCODE(BUILD_SIMPLE_OPCODE)
#undef BUILD_SIMPLE_OPCODE
#define BUILD_SIMPLE_EXTENDED_CONST_OPCODE(op, _, sig) DECODE_IMPL_CONST(op);
#define BUILD_SIMPLE_EXTENDED_CONST_OPCODE(op, ...) DECODE_IMPL_CONST(op);
FOREACH_SIMPLE_EXTENDED_CONST_OPCODE(BUILD_SIMPLE_EXTENDED_CONST_OPCODE)
#undef BUILD_SIMPLE_EXTENDED_CONST_OPCODE
DECODE_IMPL(Block);
......@@ -3766,7 +3766,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
DECODE_IMPL_CONST2(kSimdPrefix, Simd);
DECODE_IMPL2(kAtomicPrefix, Atomic);
DECODE_IMPL_CONST2(kGCPrefix, GC);
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) DECODE_IMPL(name);
#define SIMPLE_PROTOTYPE_CASE(name, ...) DECODE_IMPL(name);
FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
#undef SIMPLE_PROTOTYPE_CASE
return &WasmFullDecoder::DecodeUnknownOrAsmJs;
......
......@@ -21,473 +21,20 @@ namespace v8 {
namespace internal {
namespace wasm {
#define CASE_OP(name, str) \
case kExpr##name: \
return str;
#define CASE_I32_OP(name, str) CASE_OP(I32##name, "i32." str)
#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
#define CASE_REF_OP(name, str) CASE_OP(Ref##name, "ref." str)
#define CASE_F64x2_OP(name, str) CASE_OP(F64x2##name, "f64x2." str)
#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
#define CASE_I64x2_OP(name, str) CASE_OP(I64x2##name, "i64x2." str)
#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
#define CASE_S128_OP(name, str) CASE_OP(S128##name, "v128." str)
#define CASE_V128_OP(name, str) CASE_OP(V128##name, "v128." str)
#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
#define CASE_SIMD_OP(name, str) \
CASE_F64x2_OP(name, str) CASE_I64x2_OP(name, str) CASE_F32x4_OP(name, str) \
CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
CASE_I8x16_OP(name, str)
#define CASE_SIMDF_OP(name, str) \
CASE_F32x4_OP(name, str) CASE_F64x2_OP(name, str)
#define CASE_SIMDI_OP(name, str) \
CASE_I64x2_OP(name, str) CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
CASE_I8x16_OP(name, str)
#define CASE_SIMDI_NO64X2_OP(name, str) \
CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
#define CASE_SIGN_OP(TYPE, name, str) \
CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
#define CASE_UNSIGNED_OP(TYPE, name, str) CASE_##TYPE##_OP(name##U, str "_u")
#define CASE_ALL_SIGN_OP(name, str) \
CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
CASE_##RES##_OP(U##name##SRC, str "_" src_suffix "_u") \
CASE_##RES##_OP(S##name##SRC, str "_" src_suffix "_s")
#define CASE_CONVERT_SAT_OP(name, RES, SRC, src_suffix, str) \
CASE_##RES##_OP(U##name##Sat##SRC, str "_sat_" src_suffix "_u") \
CASE_##RES##_OP(S##name##Sat##SRC, str "_sat_" src_suffix "_s")
#define CASE_L32_OP(name, str) \
CASE_SIGN_OP(I32, name##8, str "8") \
CASE_SIGN_OP(I32, name##16, str "16") \
CASE_I32_OP(name, str "32")
#define CASE_ATOMIC_LOAD(name, str) \
CASE_INT_OP(name, str) \
CASE_INT_OP(name##8U, str "8_u") \
CASE_INT_OP(name##16U, str "16_u") \
CASE_I64_OP(name##32U, str "32_u")
#define CASE_ATOMIC_STORE(name, str) \
CASE_INT_OP(name, str) \
CASE_INT_OP(name##8U, str "8") \
CASE_INT_OP(name##16U, str "16") \
CASE_I64_OP(name##32U, str "32")
#define CASE_ATOMIC_RMW(Name, str) \
CASE_INT_OP(Name, "atomic.rmw." str) \
CASE_INT_OP(Name##8U, "atomic.rmw8." str "_u") \
CASE_INT_OP(Name##16U, "atomic.rmw16." str "_u") \
CASE_I64_OP(Name##32U, "atomic.rmw32." str "_u")
// static
constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
switch (opcode) {
// clang-format off
// Standard opcodes
CASE_INT_OP(Eqz, "eqz")
CASE_ALL_OP(Eq, "eq")
CASE_ALL_OP(Ne, "ne")
CASE_ALL_OP(Add, "add")
CASE_ALL_OP(Sub, "sub")
CASE_ALL_OP(Mul, "mul")
CASE_ALL_SIGN_OP(Lt, "lt")
CASE_ALL_SIGN_OP(Gt, "gt")
CASE_ALL_SIGN_OP(Le, "le")
CASE_ALL_SIGN_OP(Ge, "ge")
CASE_INT_OP(Clz, "clz")
CASE_INT_OP(Ctz, "ctz")
CASE_INT_OP(Popcnt, "popcnt")
CASE_ALL_SIGN_OP(Div, "div")
CASE_SIGN_OP(INT, Rem, "rem")
CASE_INT_OP(And, "and")
CASE_INT_OP(Ior, "or")
CASE_INT_OP(Xor, "xor")
CASE_INT_OP(Shl, "shl")
CASE_SIGN_OP(INT, Shr, "shr")
CASE_INT_OP(Rol, "rol")
CASE_INT_OP(Ror, "ror")
CASE_FLOAT_OP(Abs, "abs")
CASE_FLOAT_OP(Neg, "neg")
CASE_FLOAT_OP(Ceil, "ceil")
CASE_FLOAT_OP(Floor, "floor")
CASE_FLOAT_OP(Trunc, "trunc")
CASE_FLOAT_OP(NearestInt, "nearest")
CASE_FLOAT_OP(Sqrt, "sqrt")
CASE_FLOAT_OP(Min, "min")
CASE_FLOAT_OP(Max, "max")
CASE_FLOAT_OP(CopySign, "copysign")
CASE_REF_OP(Null, "null")
CASE_REF_OP(IsNull, "is_null")
CASE_REF_OP(Func, "func")
CASE_REF_OP(AsNonNull, "as_non_null")
CASE_I32_OP(ConvertI64, "wrap_i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
CASE_F32_OP(ConvertF64, "demote_f64")
CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
CASE_F64_OP(ConvertF32, "promote_f32")
CASE_I32_OP(ReinterpretF32, "reinterpret_f32")
CASE_I64_OP(ReinterpretF64, "reinterpret_f64")
CASE_F32_OP(ReinterpretI32, "reinterpret_i32")
CASE_F64_OP(ReinterpretI64, "reinterpret_i64")
CASE_INT_OP(SExtendI8, "extend8_s")
CASE_INT_OP(SExtendI16, "extend16_s")
CASE_I64_OP(SExtendI32, "extend32_s")
CASE_OP(Unreachable, "unreachable")
CASE_OP(Nop, "nop")
CASE_OP(NopForTestingUnsupportedInLiftoff, "nop_for_testing")
CASE_OP(Block, "block")
CASE_OP(Loop, "loop")
CASE_OP(If, "if")
CASE_OP(Else, "else")
CASE_OP(End, "end")
CASE_OP(Br, "br")
CASE_OP(BrIf, "br_if")
CASE_OP(BrTable, "br_table")
CASE_OP(Return, "return")
CASE_OP(CallFunction, "call")
CASE_OP(CallIndirect, "call_indirect")
CASE_OP(ReturnCall, "return_call")
CASE_OP(ReturnCallIndirect, "return_call_indirect")
CASE_OP(CallRef, "call_ref")
CASE_OP(ReturnCallRef, "return_call_ref")
CASE_OP(BrOnNull, "br_on_null")
CASE_OP(BrOnNonNull, "br_on_non_null")
CASE_OP(Drop, "drop")
CASE_OP(Select, "select")
CASE_OP(SelectWithType, "select")
CASE_OP(LocalGet, "local.get")
CASE_OP(LocalSet, "local.set")
CASE_OP(LocalTee, "local.tee")
CASE_OP(GlobalGet, "global.get")
CASE_OP(GlobalSet, "global.set")
CASE_OP(TableGet, "table.get")
CASE_OP(TableSet, "table.set")
CASE_ALL_OP(Const, "const")
CASE_OP(MemorySize, "memory.size")
CASE_OP(MemoryGrow, "memory.grow")
CASE_ALL_OP(LoadMem, "load")
CASE_SIGN_OP(INT, LoadMem8, "load8")
CASE_SIGN_OP(INT, LoadMem16, "load16")
CASE_SIGN_OP(I64, LoadMem32, "load32")
CASE_S128_OP(LoadMem, "load")
CASE_S128_OP(Const, "const")
CASE_ALL_OP(StoreMem, "store")
CASE_INT_OP(StoreMem8, "store8")
CASE_INT_OP(StoreMem16, "store16")
CASE_I64_OP(StoreMem32, "store32")
CASE_S128_OP(StoreMem, "store")
CASE_OP(RefEq, "ref.eq")
CASE_OP(Let, "let")
// Exception handling opcodes.
CASE_OP(Try, "try")
CASE_OP(Catch, "catch")
CASE_OP(Delegate, "delegate")
CASE_OP(Throw, "throw")
CASE_OP(Rethrow, "rethrow")
CASE_OP(CatchAll, "catch-all")
// asm.js-only opcodes.
CASE_F64_OP(Acos, "acos")
CASE_F64_OP(Asin, "asin")
CASE_F64_OP(Atan, "atan")
CASE_F64_OP(Cos, "cos")
CASE_F64_OP(Sin, "sin")
CASE_F64_OP(Tan, "tan")
CASE_F64_OP(Exp, "exp")
CASE_F64_OP(Log, "log")
CASE_F64_OP(Atan2, "atan2")
CASE_F64_OP(Pow, "pow")
CASE_F64_OP(Mod, "mod")
CASE_F32_OP(AsmjsLoadMem, "asmjs_load")
CASE_F64_OP(AsmjsLoadMem, "asmjs_load")
CASE_L32_OP(AsmjsLoadMem, "asmjs_load")
CASE_I32_OP(AsmjsStoreMem, "asmjs_store")
CASE_F32_OP(AsmjsStoreMem, "asmjs_store")
CASE_F64_OP(AsmjsStoreMem, "asmjs_store")
CASE_I32_OP(AsmjsStoreMem8, "asmjs_store8")
CASE_I32_OP(AsmjsStoreMem16, "asmjs_store16")
CASE_SIGN_OP(I32, AsmjsDiv, "asmjs_div")
CASE_SIGN_OP(I32, AsmjsRem, "asmjs_rem")
CASE_I32_OP(AsmjsSConvertF32, "asmjs_convert_f32_s")
CASE_I32_OP(AsmjsUConvertF32, "asmjs_convert_f32_u")
CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_f64_s")
CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_f64_u")
// Numeric Opcodes.
CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
CASE_OP(MemoryInit, "memory.init")
CASE_OP(DataDrop, "data.drop")
CASE_OP(MemoryCopy, "memory.copy")
CASE_OP(MemoryFill, "memory.fill")
CASE_OP(TableInit, "table.init")
CASE_OP(ElemDrop, "elem.drop")
CASE_OP(TableCopy, "table.copy")
CASE_OP(TableGrow, "table.grow")
CASE_OP(TableSize, "table.size")
CASE_OP(TableFill, "table.fill")
// SIMD opcodes.
CASE_SIMD_OP(Splat, "splat")
CASE_SIMD_OP(Neg, "neg")
CASE_SIMDF_OP(Sqrt, "sqrt")
CASE_SIMD_OP(Eq, "eq")
CASE_SIMD_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
CASE_I16x8_OP(Mul, "mul")
CASE_I32x4_OP(Mul, "mul")
CASE_I64x2_OP(Mul, "mul")
CASE_SIMDF_OP(Mul, "mul")
CASE_SIMDF_OP(Div, "div")
CASE_SIMDF_OP(Lt, "lt")
CASE_SIMDF_OP(Le, "le")
CASE_SIMDF_OP(Gt, "gt")
CASE_SIMDF_OP(Ge, "ge")
CASE_SIMDF_OP(Abs, "abs")
CASE_SIMDF_OP(Min, "min")
CASE_SIMDF_OP(Max, "max")
CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32x4", "convert")
CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32x4", "trunc_sat")
CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i16x8", "extend_low")
CASE_CONVERT_OP(Convert, I32x4, I16x8High, "i16x8", "extend_high")
CASE_CONVERT_OP(Convert, I16x8, I32x4, "i32x4", "narrow")
CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i8x16", "extend_low")
CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i8x16", "extend_high")
CASE_CONVERT_OP(Convert, I8x16, I16x8, "i16x8", "narrow")
CASE_SIMDF_OP(ExtractLane, "extract_lane")
CASE_SIMDF_OP(ReplaceLane, "replace_lane")
CASE_I64x2_OP(ExtractLane, "extract_lane")
CASE_I32x4_OP(ExtractLane, "extract_lane")
CASE_SIGN_OP(I16x8, ExtractLane, "extract_lane")
CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
CASE_SIGN_OP(SIMDI_NO64X2, Min, "min")
CASE_SIGN_OP(SIMDI_NO64X2, Max, "max")
CASE_SIGN_OP(SIMDI_NO64X2, Lt, "lt")
CASE_I64x2_OP(LtS, "lt_s")
CASE_I64x2_OP(GtS, "gt_s")
CASE_I64x2_OP(LeS, "le_s")
CASE_I64x2_OP(GeS, "ge_s")
CASE_SIGN_OP(SIMDI_NO64X2, Le, "le")
CASE_SIGN_OP(SIMDI_NO64X2, Gt, "gt")
CASE_SIGN_OP(SIMDI_NO64X2, Ge, "ge")
CASE_CONVERT_OP(Convert, I64x2, I32x4Low, "i32x4", "extend_low")
CASE_CONVERT_OP(Convert, I64x2, I32x4High, "i32x4", "extend_high")
CASE_SIGN_OP(SIMDI, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
CASE_SIGN_OP(I16x8, AddSat, "add_sat")
CASE_SIGN_OP(I8x16, AddSat, "add_sat")
CASE_SIGN_OP(I16x8, SubSat, "sub_sat")
CASE_SIGN_OP(I8x16, SubSat, "sub_sat")
CASE_S128_OP(And, "and")
CASE_S128_OP(Or, "or")
CASE_S128_OP(Xor, "xor")
CASE_S128_OP(Not, "not")
CASE_S128_OP(Select, "bitselect")
CASE_S128_OP(AndNot, "andnot")
CASE_I8x16_OP(Swizzle, "swizzle")
CASE_I8x16_OP(Shuffle, "shuffle")
CASE_V128_OP(AnyTrue, "any_true")
CASE_SIMDI_OP(AllTrue, "all_true")
CASE_S128_OP(Load32Zero, "load32_zero")
CASE_S128_OP(Load64Zero, "load64_zero")
CASE_S128_OP(Load8Splat, "load8_splat")
CASE_S128_OP(Load16Splat, "load16_splat")
CASE_S128_OP(Load32Splat, "load32_splat")
CASE_S128_OP(Load64Splat, "load64_splat")
CASE_S128_OP(Load8x8S, "load8x8_s")
CASE_S128_OP(Load8x8U, "load8x8_u")
CASE_S128_OP(Load16x4S, "load16x4_s")
CASE_S128_OP(Load16x4U, "load16x4_u")
CASE_S128_OP(Load32x2S, "load32x2_s")
CASE_S128_OP(Load32x2U, "load32x2_u")
CASE_S128_OP(Load8Lane, "load8_lane")
CASE_S128_OP(Load16Lane, "load16_lane")
CASE_S128_OP(Load32Lane, "load32_lane")
CASE_S128_OP(Load64Lane, "load64_lane")
CASE_S128_OP(Store8Lane, "store8_lane")
CASE_S128_OP(Store16Lane, "store16_lane")
CASE_S128_OP(Store32Lane, "store32_lane")
CASE_S128_OP(Store64Lane, "store64_lane")
CASE_I8x16_OP(RoundingAverageU, "avgr_u")
CASE_I16x8_OP(RoundingAverageU, "avgr_u")
CASE_I16x8_OP(Q15MulRSatS, "q15mulr_sat_s")
CASE_SIMDI_OP(Abs, "abs")
CASE_SIMDI_OP(BitMask, "bitmask")
CASE_I8x16_OP(Popcnt, "popcnt")
CASE_SIMDF_OP(Pmin, "pmin")
CASE_SIMDF_OP(Pmax, "pmax")
CASE_SIMDF_OP(Ceil, "ceil")
CASE_SIMDF_OP(Floor, "floor")
CASE_SIMDF_OP(Trunc, "trunc")
CASE_SIMDF_OP(NearestInt, "nearest")
CASE_I32x4_OP(DotI16x8S, "dot_i16x8_s")
CASE_SIGN_OP(I16x8, ExtMulLowI8x16, "extmul_low_i8x16")
CASE_SIGN_OP(I16x8, ExtMulHighI8x16, "extmul_high_i8x16")
CASE_SIGN_OP(I32x4, ExtMulLowI16x8, "extmul_low_i16x8")
CASE_SIGN_OP(I32x4, ExtMulHighI16x8, "extmul_high_i16x8")
CASE_SIGN_OP(I64x2, ExtMulLowI32x4, "extmul_low_i32x4")
CASE_SIGN_OP(I64x2, ExtMulHighI32x4, "extmul_high_i32x4")
CASE_SIGN_OP(I32x4, ExtAddPairwiseI16x8, "extadd_pairwise_i16x8")
CASE_SIGN_OP(I16x8, ExtAddPairwiseI8x16, "extadd_pairwise_i8x16")
CASE_F64x2_OP(ConvertLowI32x4S, "convert_low_i32x4_s")
CASE_F64x2_OP(ConvertLowI32x4U, "convert_low_i32x4_u")
CASE_I32x4_OP(TruncSatF64x2SZero, "trunc_sat_f64x2_s_zero")
CASE_I32x4_OP(TruncSatF64x2UZero, "trunc_sat_f64x2_u_zero")
CASE_F32x4_OP(DemoteF64x2Zero, "demote_f64x2_zero")
CASE_F64x2_OP(PromoteLowF32x4, "promote_low_f32x4")
// Relaxed SIMD opcodes.
CASE_SIMDF_OP(Qfma, "qfma")
CASE_SIMDF_OP(Qfms, "qfms")
CASE_I8x16_OP(RelaxedSwizzle, "relaxed_swizzle");
CASE_I8x16_OP(RelaxedLaneSelect, "relaxed_laneselect");
CASE_I16x8_OP(RelaxedLaneSelect, "relaxed_laneselect");
CASE_I32x4_OP(RelaxedLaneSelect, "relaxed_laneselect");
CASE_I64x2_OP(RelaxedLaneSelect, "relaxed_laneselect");
CASE_SIMDF_OP(RelaxedMin, "relaxed_min");
CASE_SIMDF_OP(RelaxedMax, "relaxed_max");
CASE_I32x4_OP(RelaxedTruncF32x4S, "relaxed_trunc_f32x4_s");
CASE_I32x4_OP(RelaxedTruncF32x4U, "relaxed_trunc_f32x4_u");
CASE_I32x4_OP(RelaxedTruncF64x2SZero, "relaxed_trunc_f64x2_s_zero");
CASE_I32x4_OP(RelaxedTruncF64x2UZero, "relaxed_trunc_f64x2_u_zero");
CASE_I16x8_OP(RelaxedQ15MulRS, "relaxed_q15mulr_s")
CASE_I16x8_OP(DotI8x16I7x16S, "dot_i8x16_i7x16_s")
CASE_I32x4_OP(DotI8x16I7x16AddS, "dot_i8x16_i7x16_add_s")
// Atomic operations.
CASE_OP(AtomicNotify, "memory.atomic.notify")
CASE_OP(I32AtomicWait, "memory.atomic.wait32")
CASE_OP(I64AtomicWait, "memory.atomic.wait64")
CASE_OP(AtomicFence, "atomic.fence")
CASE_ATOMIC_LOAD(AtomicLoad, "atomic.load")
CASE_ATOMIC_STORE(AtomicStore, "atomic.store")
CASE_ATOMIC_RMW(AtomicAdd, "add")
CASE_ATOMIC_RMW(AtomicSub, "sub")
CASE_ATOMIC_RMW(AtomicAnd, "and")
CASE_ATOMIC_RMW(AtomicOr, "or")
CASE_ATOMIC_RMW(AtomicXor, "xor")
CASE_ATOMIC_RMW(AtomicExchange, "xchg")
CASE_ATOMIC_RMW(AtomicCompareExchange, "cmpxchg")
// GC operations.
CASE_OP(StructNewWithRtt, "struct.new_with_rtt")
CASE_OP(StructNewDefaultWithRtt, "struct.new_default_with_rtt")
CASE_OP(StructNew, "struct.new")
CASE_OP(StructNewDefault, "struct.new_default")
CASE_OP(StructGet, "struct.get")
CASE_OP(StructGetS, "struct.get_s")
CASE_OP(StructGetU, "struct.get_u")
CASE_OP(StructSet, "struct.set")
CASE_OP(ArrayNewWithRtt, "array.new_with_rtt")
CASE_OP(ArrayNewDefaultWithRtt, "array.new_default_with_rtt")
CASE_OP(ArrayNew, "array.new")
CASE_OP(ArrayNewDefault, "array.new_default")
CASE_OP(ArrayGet, "array.get")
CASE_OP(ArrayGetS, "array.get_s")
CASE_OP(ArrayGetU, "array.get_u")
CASE_OP(ArraySet, "array.set")
CASE_OP(ArrayLen, "array.len")
CASE_OP(ArrayCopy, "array.copy")
CASE_OP(ArrayInit, "array.init")
CASE_OP(ArrayInitStatic, "array.init_static")
CASE_OP(ArrayInitFromData, "array.init_from_data")
CASE_OP(ArrayInitFromDataStatic, "array.init_from_data_static")
CASE_OP(ArrayInitFromElemStatic, "array.init_from_elem_static")
CASE_OP(I31New, "i31.new")
CASE_OP(I31GetS, "i31.get_s")
CASE_OP(I31GetU, "i31.get_u")
CASE_OP(RttCanon, "rtt.canon")
CASE_OP(RefTest, "ref.test")
CASE_OP(RefTestStatic, "ref.test_static")
CASE_OP(RefCast, "ref.cast")
CASE_OP(RefCastStatic, "ref.cast_static")
CASE_OP(RefCastNopStatic, "ref.cast_nop_static")
CASE_OP(BrOnCast, "br_on_cast")
CASE_OP(BrOnCastStatic, "br_on_cast_static")
CASE_OP(BrOnCastFail, "br_on_cast_fail")
CASE_OP(BrOnCastStaticFail, "br_on_cast_static_fail")
CASE_OP(RefIsFunc, "ref.is_func")
CASE_OP(RefIsData, "ref.is_data")
CASE_OP(RefIsI31, "ref.is_i31")
CASE_OP(RefIsArray, "ref.is_array")
CASE_OP(RefAsFunc, "ref.as_func")
CASE_OP(RefAsData, "ref.as_data")
CASE_OP(RefAsI31, "ref.as_i31")
CASE_OP(RefAsArray, "ref.as_array")
CASE_OP(BrOnFunc, "br_on_func")
CASE_OP(BrOnData, "br_on_data")
CASE_OP(BrOnI31, "br_on_i31")
CASE_OP(BrOnArray, "br_on_array")
CASE_OP(BrOnNonFunc, "br_on_non_func")
CASE_OP(BrOnNonData, "br_on_non_data")
CASE_OP(BrOnNonI31, "br_on_non_i31")
CASE_OP(BrOnNonArray, "br_on_non_array")
CASE_OP(StringNewWtf8, "string.new_wtf8")
CASE_OP(StringNewWtf16, "string.new_wtf16")
CASE_OP(StringConst, "string.const")
CASE_OP(StringMeasureUtf8, "string.measure_utf8")
CASE_OP(StringMeasureWtf8, "string.measure_wtf8")
CASE_OP(StringMeasureWtf16, "string.measure_wtf16")
CASE_OP(StringEncodeWtf8, "string.encode_wtf8")
CASE_OP(StringEncodeWtf16, "string.encode_wtf16")
CASE_OP(StringConcat, "string.concat")
CASE_OP(StringEq, "string.eq")
CASE_OP(StringIsUSVSequence, "string.is_usv_sequence")
CASE_OP(StringAsWtf8, "string.as_wtf8")
CASE_OP(StringViewWtf8Advance, "stringview_wtf8.advance")
CASE_OP(StringViewWtf8Encode, "stringview_wtf8.encode")
CASE_OP(StringViewWtf8Slice, "stringview_wtf8.slice")
CASE_OP(StringAsWtf16, "string.as_wtf16")
CASE_OP(StringViewWtf16Length, "stringview_wtf16.length")
CASE_OP(StringViewWtf16GetCodeUnit, "stringview_wtf16.get_codeunit")
CASE_OP(StringViewWtf16Encode, "stringview_wtf16.encode")
CASE_OP(StringViewWtf16Slice, "stringview_wtf16.slice")
CASE_OP(StringAsIter, "string.as_iter")
CASE_OP(StringViewIterCur, "stringview_iter.cur")
CASE_OP(StringViewIterAdvance, "stringview_iter.advance")
CASE_OP(StringViewIterRewind, "stringview_iter.rewind")
CASE_OP(StringViewIterSlice, "stringview_iter.slice")
#define CASE(opcode, binary, sig, name) \
case kExpr##opcode: \
return name;
FOREACH_OPCODE(CASE)
#undef CASE
case kNumericPrefix:
case kSimdPrefix:
case kAtomicPrefix:
case kGCPrefix:
return "unknown";
// clang-format on
}
// Even though the switch above handles all well-defined enum values,
// random modules (e.g. fuzzer generated) can call this function with
......@@ -495,36 +42,6 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
return "invalid opcode";
}
#undef CASE_OP
#undef CASE_I32_OP
#undef CASE_I64_OP
#undef CASE_F32_OP
#undef CASE_F64_OP
#undef CASE_REF_OP
#undef CASE_F64x2_OP
#undef CASE_F32x4_OP
#undef CASE_I64x2_OP
#undef CASE_I32x4_OP
#undef CASE_I16x8_OP
#undef CASE_I8x16_OP
#undef CASE_S128_OP
#undef CASE_V128_OP
#undef CASE_INT_OP
#undef CASE_FLOAT_OP
#undef CASE_ALL_OP
#undef CASE_SIMD_OP
#undef CASE_SIMDI_OP
#undef CASE_SIMDI_NO64X2_OP
#undef CASE_SIGN_OP
#undef CASE_UNSIGNED_OP
#undef CASE_ALL_SIGN_OP
#undef CASE_CONVERT_OP
#undef CASE_CONVERT_SAT_OP
#undef CASE_L32_OP
#undef CASE_ATOMIC_LOAD
#undef CASE_ATOMIC_STORE
#undef CASE_ATOMIC_RMW
// static
constexpr bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
switch (opcode) {
......@@ -540,7 +57,7 @@ constexpr bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
// static
constexpr bool WasmOpcodes::IsControlOpcode(WasmOpcode opcode) {
switch (opcode) {
#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
#define CHECK_OPCODE(name, ...) case kExpr##name:
FOREACH_CONTROL_OPCODE(CHECK_OPCODE)
#undef CHECK_OPCODE
return true;
......@@ -612,7 +129,7 @@ constexpr bool WasmOpcodes::IsRelaxedSimdOpcode(WasmOpcode opcode) {
// Relaxed SIMD opcodes have the SIMD prefix (0xfd) shifted by 12 bits, and
// nibble 3 must be 0x1. I.e. their encoded opcode is in [0xfd100, 0xfd1ff].
static_assert(kSimdPrefix == 0xfd);
#define CHECK_OPCODE(name, opcode, _) \
#define CHECK_OPCODE(name, opcode, ...) \
static_assert((opcode & 0xfff00) == 0xfd100);
FOREACH_RELAXED_SIMD_OPCODE(CHECK_OPCODE)
#undef CHECK_OPCODE
......@@ -648,44 +165,42 @@ constexpr const FunctionSig* kCachedSigs[] = {
#undef DECLARE_SIG_ENTRY
constexpr WasmOpcodeSig GetShortOpcodeSigIndex(byte opcode) {
#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
#define CASE(name, opc, sig, ...) opcode == opc ? kSigEnum_##sig:
return FOREACH_SIMPLE_OPCODE(CASE) FOREACH_SIMPLE_PROTOTYPE_OPCODE(CASE)
kSigEnum_None;
#undef CASE
}
constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
#define CASE(name, opc, sig, ...) opcode == opc ? kSigEnum_##sig:
return FOREACH_ASMJS_COMPAT_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
#define CASE(name, opc, sig, ...) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_SIMD_MVP_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
constexpr WasmOpcodeSig GetRelaxedSimdOpcodeSigIndex(byte opcode) {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
#define CASE(name, opc, sig, ...) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_RELAXED_SIMD_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
constexpr WasmOpcodeSig GetAtomicOpcodeSigIndex(byte opcode) {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
#define CASE(name, opc, sig, ...) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_ATOMIC_OPCODE(CASE) FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE)
kSigEnum_None;
#undef CASE
}
constexpr WasmOpcodeSig GetNumericOpcodeSigIndex(byte opcode) {
#define CASE_SIG(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
#define CASE_VARIADIC(name, opc)
return FOREACH_NUMERIC_OPCODE(CASE_SIG, CASE_VARIADIC) kSigEnum_None;
#undef CASE_SIG
#undef CASE_VARIADIC
#define CASE(name, opc, sig, ...) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_NUMERIC_OPCODE_WITH_SIG(CASE) kSigEnum_None;
#undef CASE
}
constexpr std::array<WasmOpcodeSig, 256> kShortSigTable =
......
......@@ -30,538 +30,543 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
const WasmModule* module,
const WasmFeatures&);
// Format of all opcode macros: kExprName, binary, signature, wat name
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
V(Unreachable, 0x00, _) \
V(Nop, 0x01, _) \
V(Block, 0x02, _) \
V(Loop, 0x03, _) \
V(If, 0x04, _) \
V(Else, 0x05, _) \
V(Try, 0x06, _ /* eh_prototype */) \
V(Catch, 0x07, _ /* eh_prototype */) \
V(Throw, 0x08, _ /* eh_prototype */) \
V(Rethrow, 0x09, _ /* eh_prototype */) \
V(End, 0x0b, _) \
V(Br, 0x0c, _) \
V(BrIf, 0x0d, _) \
V(BrTable, 0x0e, _) \
V(Return, 0x0f, _) \
V(Let, 0x17, _ /* typed_funcref prototype */) \
V(Delegate, 0x18, _ /* eh_prototype */) \
V(CatchAll, 0x19, _ /* eh_prototype */) \
V(BrOnNull, 0xd4, _ /* gc prototype */) \
V(BrOnNonNull, 0xd6, _ /* gc prototype */) \
V(NopForTestingUnsupportedInLiftoff, 0x16, _)
#define FOREACH_CONTROL_OPCODE(V) \
V(Unreachable, 0x00, _, "unreachable") \
V(Nop, 0x01, _, "nop") \
V(Block, 0x02, _, "block") \
V(Loop, 0x03, _, "loop") \
V(If, 0x04, _, "if") \
V(Else, 0x05, _, "else") \
V(Try, 0x06, _, "try") /* eh_prototype */ \
V(Catch, 0x07, _, "catch") /* eh_prototype */ \
V(Throw, 0x08, _, "throw") /* eh_prototype */ \
V(Rethrow, 0x09, _, "rethrow") /* eh_prototype */ \
V(End, 0x0b, _, "end") \
V(Br, 0x0c, _, "br") \
V(BrIf, 0x0d, _, "br_if") \
V(BrTable, 0x0e, _, "br_table") \
V(Return, 0x0f, _, "return") \
V(Let, 0x17, _, "let") /* typed_funcref prototype */ \
V(Delegate, 0x18, _, "delegate") /* eh_prototype */ \
V(CatchAll, 0x19, _, "catch_all") /* eh_prototype */ \
V(BrOnNull, 0xd4, _, "br_on_null") /* gc prototype */ \
V(BrOnNonNull, 0xd6, _, "br_on_non_null") /* gc prototype */ \
V(NopForTestingUnsupportedInLiftoff, 0x16, _, "nop_for_testing")
// Constants, locals, globals, and calls.
#define FOREACH_MISC_OPCODE(V) \
V(CallFunction, 0x10, _) \
V(CallIndirect, 0x11, _) \
V(ReturnCall, 0x12, _) \
V(ReturnCallIndirect, 0x13, _) \
V(CallRef, 0x14, _ /* typed_funcref prototype */) \
V(ReturnCallRef, 0x15, _ /* typed_funcref prototype */) \
V(Drop, 0x1a, _) \
V(Select, 0x1b, _) \
V(SelectWithType, 0x1c, _) \
V(LocalGet, 0x20, _) \
V(LocalSet, 0x21, _) \
V(LocalTee, 0x22, _) \
V(GlobalGet, 0x23, _) \
V(GlobalSet, 0x24, _) \
V(TableGet, 0x25, _) \
V(TableSet, 0x26, _) \
V(I32Const, 0x41, _) \
V(I64Const, 0x42, _) \
V(F32Const, 0x43, _) \
V(F64Const, 0x44, _) \
V(RefNull, 0xd0, _) \
V(RefIsNull, 0xd1, _) \
V(RefFunc, 0xd2, _) \
V(RefAsNonNull, 0xd3, _ /* typed_funcref prototype */)
#define FOREACH_MISC_OPCODE(V) \
V(CallFunction, 0x10, _, "call") \
V(CallIndirect, 0x11, _, "call_indirect") \
V(ReturnCall, 0x12, _, "return_call") \
V(ReturnCallIndirect, 0x13, _, "return_call_indirect") \
V(CallRef, 0x14, _, "call_ref") /* typed_funcref prototype */ \
V(ReturnCallRef, 0x15, _, "return_call_ref") /* typed_funcref prototype */ \
V(Drop, 0x1a, _, "drop") \
V(Select, 0x1b, _, "select") \
V(SelectWithType, 0x1c, _, "select") \
V(LocalGet, 0x20, _, "local.get") \
V(LocalSet, 0x21, _, "local.set") \
V(LocalTee, 0x22, _, "local.tee") \
V(GlobalGet, 0x23, _, "global.get") \
V(GlobalSet, 0x24, _, "global.set") \
V(TableGet, 0x25, _, "table.get") \
V(TableSet, 0x26, _, "table.set") \
V(I32Const, 0x41, _, "i32.const") \
V(I64Const, 0x42, _, "i64.const") \
V(F32Const, 0x43, _, "f32.const") \
V(F64Const, 0x44, _, "f64.const") \
V(RefNull, 0xd0, _, "ref.null") \
V(RefIsNull, 0xd1, _, "ref.is_null") \
V(RefFunc, 0xd2, _, "ref.func") \
V(RefAsNonNull, 0xd3, _, "ref.as_non_null") /* typed_funcref prototype */
// Load memory expressions.
#define FOREACH_LOAD_MEM_OPCODE(V) \
V(I32LoadMem, 0x28, i_i) \
V(I64LoadMem, 0x29, l_i) \
V(F32LoadMem, 0x2a, f_i) \
V(F64LoadMem, 0x2b, d_i) \
V(I32LoadMem8S, 0x2c, i_i) \
V(I32LoadMem8U, 0x2d, i_i) \
V(I32LoadMem16S, 0x2e, i_i) \
V(I32LoadMem16U, 0x2f, i_i) \
V(I64LoadMem8S, 0x30, l_i) \
V(I64LoadMem8U, 0x31, l_i) \
V(I64LoadMem16S, 0x32, l_i) \
V(I64LoadMem16U, 0x33, l_i) \
V(I64LoadMem32S, 0x34, l_i) \
V(I64LoadMem32U, 0x35, l_i)
#define FOREACH_LOAD_MEM_OPCODE(V) \
V(I32LoadMem, 0x28, i_i, "i32.load") \
V(I64LoadMem, 0x29, l_i, "i64.load") \
V(F32LoadMem, 0x2a, f_i, "f32.load") \
V(F64LoadMem, 0x2b, d_i, "f64.load") \
V(I32LoadMem8S, 0x2c, i_i, "i32.load8_s") \
V(I32LoadMem8U, 0x2d, i_i, "i32.load8_u") \
V(I32LoadMem16S, 0x2e, i_i, "i32.load16_s") \
V(I32LoadMem16U, 0x2f, i_i, "i32.load16_u") \
V(I64LoadMem8S, 0x30, l_i, "i64.load8_s") \
V(I64LoadMem8U, 0x31, l_i, "i64.load8_u") \
V(I64LoadMem16S, 0x32, l_i, "i64.load16_s") \
V(I64LoadMem16U, 0x33, l_i, "i64.load16_u") \
V(I64LoadMem32S, 0x34, l_i, "i64.load32_s") \
V(I64LoadMem32U, 0x35, l_i, "i64.load32_u")
// Store memory expressions.
#define FOREACH_STORE_MEM_OPCODE(V) \
V(I32StoreMem, 0x36, v_ii) \
V(I64StoreMem, 0x37, v_il) \
V(F32StoreMem, 0x38, v_if) \
V(F64StoreMem, 0x39, v_id) \
V(I32StoreMem8, 0x3a, v_ii) \
V(I32StoreMem16, 0x3b, v_ii) \
V(I64StoreMem8, 0x3c, v_il) \
V(I64StoreMem16, 0x3d, v_il) \
V(I64StoreMem32, 0x3e, v_il)
#define FOREACH_STORE_MEM_OPCODE(V) \
V(I32StoreMem, 0x36, v_ii, "i32.store") \
V(I64StoreMem, 0x37, v_il, "i64.store") \
V(F32StoreMem, 0x38, v_if, "f32.store") \
V(F64StoreMem, 0x39, v_id, "f64.store") \
V(I32StoreMem8, 0x3a, v_ii, "i32.store8") \
V(I32StoreMem16, 0x3b, v_ii, "i32.store16") \
V(I64StoreMem8, 0x3c, v_il, "i64.store8") \
V(I64StoreMem16, 0x3d, v_il, "i64.store16") \
V(I64StoreMem32, 0x3e, v_il, "i64.store32")
// Miscellaneous memory expressions
#define FOREACH_MISC_MEM_OPCODE(V) \
V(MemorySize, 0x3f, i_v) \
V(MemoryGrow, 0x40, i_i)
#define FOREACH_MISC_MEM_OPCODE(V) \
V(MemorySize, 0x3f, i_v, "memory.size") \
V(MemoryGrow, 0x40, i_i, "memory.grow")
// Expressions with signatures.
// The following opcodes can be used as constant expressions under
// --experimental-wasm-extended-const.
#define FOREACH_SIMPLE_EXTENDED_CONST_OPCODE(V) \
V(I32Add, 0x6a, i_ii) \
V(I32Sub, 0x6b, i_ii) \
V(I32Mul, 0x6c, i_ii) \
V(I64Add, 0x7c, l_ll) \
V(I64Sub, 0x7d, l_ll) \
V(I64Mul, 0x7e, l_ll)
#define FOREACH_SIMPLE_NON_CONST_OPCODE(V) \
V(I32Eqz, 0x45, i_i) \
V(I32Eq, 0x46, i_ii) \
V(I32Ne, 0x47, i_ii) \
V(I32LtS, 0x48, i_ii) \
V(I32LtU, 0x49, i_ii) \
V(I32GtS, 0x4a, i_ii) \
V(I32GtU, 0x4b, i_ii) \
V(I32LeS, 0x4c, i_ii) \
V(I32LeU, 0x4d, i_ii) \
V(I32GeS, 0x4e, i_ii) \
V(I32GeU, 0x4f, i_ii) \
V(I64Eqz, 0x50, i_l) \
V(I64Eq, 0x51, i_ll) \
V(I64Ne, 0x52, i_ll) \
V(I64LtS, 0x53, i_ll) \
V(I64LtU, 0x54, i_ll) \
V(I64GtS, 0x55, i_ll) \
V(I64GtU, 0x56, i_ll) \
V(I64LeS, 0x57, i_ll) \
V(I64LeU, 0x58, i_ll) \
V(I64GeS, 0x59, i_ll) \
V(I64GeU, 0x5a, i_ll) \
V(F32Eq, 0x5b, i_ff) \
V(F32Ne, 0x5c, i_ff) \
V(F32Lt, 0x5d, i_ff) \
V(F32Gt, 0x5e, i_ff) \
V(F32Le, 0x5f, i_ff) \
V(F32Ge, 0x60, i_ff) \
V(F64Eq, 0x61, i_dd) \
V(F64Ne, 0x62, i_dd) \
V(F64Lt, 0x63, i_dd) \
V(F64Gt, 0x64, i_dd) \
V(F64Le, 0x65, i_dd) \
V(F64Ge, 0x66, i_dd) \
V(I32Clz, 0x67, i_i) \
V(I32Ctz, 0x68, i_i) \
V(I32Popcnt, 0x69, i_i) \
V(I32DivS, 0x6d, i_ii) \
V(I32DivU, 0x6e, i_ii) \
V(I32RemS, 0x6f, i_ii) \
V(I32RemU, 0x70, i_ii) \
V(I32And, 0x71, i_ii) \
V(I32Ior, 0x72, i_ii) \
V(I32Xor, 0x73, i_ii) \
V(I32Shl, 0x74, i_ii) \
V(I32ShrS, 0x75, i_ii) \
V(I32ShrU, 0x76, i_ii) \
V(I32Rol, 0x77, i_ii) \
V(I32Ror, 0x78, i_ii) \
V(I64Clz, 0x79, l_l) \
V(I64Ctz, 0x7a, l_l) \
V(I64Popcnt, 0x7b, l_l) \
V(I64DivS, 0x7f, l_ll) \
V(I64DivU, 0x80, l_ll) \
V(I64RemS, 0x81, l_ll) \
V(I64RemU, 0x82, l_ll) \
V(I64And, 0x83, l_ll) \
V(I64Ior, 0x84, l_ll) \
V(I64Xor, 0x85, l_ll) \
V(I64Shl, 0x86, l_ll) \
V(I64ShrS, 0x87, l_ll) \
V(I64ShrU, 0x88, l_ll) \
V(I64Rol, 0x89, l_ll) \
V(I64Ror, 0x8a, l_ll) \
V(F32Abs, 0x8b, f_f) \
V(F32Neg, 0x8c, f_f) \
V(F32Ceil, 0x8d, f_f) \
V(F32Floor, 0x8e, f_f) \
V(F32Trunc, 0x8f, f_f) \
V(F32NearestInt, 0x90, f_f) \
V(F32Sqrt, 0x91, f_f) \
V(F32Add, 0x92, f_ff) \
V(F32Sub, 0x93, f_ff) \
V(F32Mul, 0x94, f_ff) \
V(F32Div, 0x95, f_ff) \
V(F32Min, 0x96, f_ff) \
V(F32Max, 0x97, f_ff) \
V(F32CopySign, 0x98, f_ff) \
V(F64Abs, 0x99, d_d) \
V(F64Neg, 0x9a, d_d) \
V(F64Ceil, 0x9b, d_d) \
V(F64Floor, 0x9c, d_d) \
V(F64Trunc, 0x9d, d_d) \
V(F64NearestInt, 0x9e, d_d) \
V(F64Sqrt, 0x9f, d_d) \
V(F64Add, 0xa0, d_dd) \
V(F64Sub, 0xa1, d_dd) \
V(F64Mul, 0xa2, d_dd) \
V(F64Div, 0xa3, d_dd) \
V(F64Min, 0xa4, d_dd) \
V(F64Max, 0xa5, d_dd) \
V(F64CopySign, 0xa6, d_dd) \
V(I32ConvertI64, 0xa7, i_l) \
V(I32SConvertF32, 0xa8, i_f) \
V(I32UConvertF32, 0xa9, i_f) \
V(I32SConvertF64, 0xaa, i_d) \
V(I32UConvertF64, 0xab, i_d) \
V(I64SConvertI32, 0xac, l_i) \
V(I64UConvertI32, 0xad, l_i) \
V(I64SConvertF32, 0xae, l_f) \
V(I64UConvertF32, 0xaf, l_f) \
V(I64SConvertF64, 0xb0, l_d) \
V(I64UConvertF64, 0xb1, l_d) \
V(F32SConvertI32, 0xb2, f_i) \
V(F32UConvertI32, 0xb3, f_i) \
V(F32SConvertI64, 0xb4, f_l) \
V(F32UConvertI64, 0xb5, f_l) \
V(F32ConvertF64, 0xb6, f_d) \
V(F64SConvertI32, 0xb7, d_i) \
V(F64UConvertI32, 0xb8, d_i) \
V(F64SConvertI64, 0xb9, d_l) \
V(F64UConvertI64, 0xba, d_l) \
V(F64ConvertF32, 0xbb, d_f) \
V(I32ReinterpretF32, 0xbc, i_f) \
V(I64ReinterpretF64, 0xbd, l_d) \
V(F32ReinterpretI32, 0xbe, f_i) \
V(F64ReinterpretI64, 0xbf, d_l) \
V(I32SExtendI8, 0xc0, i_i) \
V(I32SExtendI16, 0xc1, i_i) \
V(I64SExtendI8, 0xc2, l_l) \
V(I64SExtendI16, 0xc3, l_l) \
V(I64SExtendI32, 0xc4, l_l)
V(I32Add, 0x6a, i_ii, "i32.add") \
V(I32Sub, 0x6b, i_ii, "i32.sub") \
V(I32Mul, 0x6c, i_ii, "i32.mul") \
V(I64Add, 0x7c, l_ll, "i64.add") \
V(I64Sub, 0x7d, l_ll, "i64.sub") \
V(I64Mul, 0x7e, l_ll, "i64.mul")
#define FOREACH_SIMPLE_NON_CONST_OPCODE(V) \
V(I32Eqz, 0x45, i_i, "i32.eqz") \
V(I32Eq, 0x46, i_ii, "i32.eq") \
V(I32Ne, 0x47, i_ii, "i32.ne") \
V(I32LtS, 0x48, i_ii, "i32.lt_s") \
V(I32LtU, 0x49, i_ii, "i32.lt_u") \
V(I32GtS, 0x4a, i_ii, "i32.gt_s") \
V(I32GtU, 0x4b, i_ii, "i32.gt_u") \
V(I32LeS, 0x4c, i_ii, "i32.le_s") \
V(I32LeU, 0x4d, i_ii, "i32.le_u") \
V(I32GeS, 0x4e, i_ii, "i32.ge_s") \
V(I32GeU, 0x4f, i_ii, "i32.ge_u") \
V(I64Eqz, 0x50, i_l, "i64.eqz") \
V(I64Eq, 0x51, i_ll, "i64.eq") \
V(I64Ne, 0x52, i_ll, "i64.ne") \
V(I64LtS, 0x53, i_ll, "i64.lt_s") \
V(I64LtU, 0x54, i_ll, "i64.lt_u") \
V(I64GtS, 0x55, i_ll, "i64.gt_s") \
V(I64GtU, 0x56, i_ll, "i64.gt_u") \
V(I64LeS, 0x57, i_ll, "i64.le_s") \
V(I64LeU, 0x58, i_ll, "i64.le_u") \
V(I64GeS, 0x59, i_ll, "i64.ge_s") \
V(I64GeU, 0x5a, i_ll, "i64.ge_u") \
V(F32Eq, 0x5b, i_ff, "f32.eq") \
V(F32Ne, 0x5c, i_ff, "f32.ne") \
V(F32Lt, 0x5d, i_ff, "f32.lt") \
V(F32Gt, 0x5e, i_ff, "f32.gt") \
V(F32Le, 0x5f, i_ff, "f32.le") \
V(F32Ge, 0x60, i_ff, "f32.ge") \
V(F64Eq, 0x61, i_dd, "f64.eq") \
V(F64Ne, 0x62, i_dd, "f64.ne") \
V(F64Lt, 0x63, i_dd, "f64.lt") \
V(F64Gt, 0x64, i_dd, "f64.gt") \
V(F64Le, 0x65, i_dd, "f64.le") \
V(F64Ge, 0x66, i_dd, "f64.ge") \
V(I32Clz, 0x67, i_i, "i32.clz") \
V(I32Ctz, 0x68, i_i, "i32.ctz") \
V(I32Popcnt, 0x69, i_i, "i32.popcnt") \
V(I32DivS, 0x6d, i_ii, "i32.div_s") \
V(I32DivU, 0x6e, i_ii, "i32.div_u") \
V(I32RemS, 0x6f, i_ii, "i32.rem_s") \
V(I32RemU, 0x70, i_ii, "i32.rem_u") \
V(I32And, 0x71, i_ii, "i32.and") \
V(I32Ior, 0x72, i_ii, "i32.or") \
V(I32Xor, 0x73, i_ii, "i32.xor") \
V(I32Shl, 0x74, i_ii, "i32.shl") \
V(I32ShrS, 0x75, i_ii, "i32.shr_s") \
V(I32ShrU, 0x76, i_ii, "i32.shr_u") \
V(I32Rol, 0x77, i_ii, "i32.rol") \
V(I32Ror, 0x78, i_ii, "i32.ror") \
V(I64Clz, 0x79, l_l, "i64.clz") \
V(I64Ctz, 0x7a, l_l, "i64.ctz") \
V(I64Popcnt, 0x7b, l_l, "i64.popcnt") \
V(I64DivS, 0x7f, l_ll, "i64.div_s") \
V(I64DivU, 0x80, l_ll, "i64.div_u") \
V(I64RemS, 0x81, l_ll, "i64.rem_s") \
V(I64RemU, 0x82, l_ll, "i64.rem_u") \
V(I64And, 0x83, l_ll, "i64.and") \
V(I64Ior, 0x84, l_ll, "i64.or") \
V(I64Xor, 0x85, l_ll, "i64.xor") \
V(I64Shl, 0x86, l_ll, "i64.shl") \
V(I64ShrS, 0x87, l_ll, "i64.shr_s") \
V(I64ShrU, 0x88, l_ll, "i64.shr_u") \
V(I64Rol, 0x89, l_ll, "i64.rol") \
V(I64Ror, 0x8a, l_ll, "i64.ror") \
V(F32Abs, 0x8b, f_f, "f32.abs") \
V(F32Neg, 0x8c, f_f, "f32.neg") \
V(F32Ceil, 0x8d, f_f, "f32.ceil") \
V(F32Floor, 0x8e, f_f, "f32.floor") \
V(F32Trunc, 0x8f, f_f, "f32.trunc") \
V(F32NearestInt, 0x90, f_f, "f32.nearest") \
V(F32Sqrt, 0x91, f_f, "f32.sqrt") \
V(F32Add, 0x92, f_ff, "f32.add") \
V(F32Sub, 0x93, f_ff, "f32.sub") \
V(F32Mul, 0x94, f_ff, "f32.mul") \
V(F32Div, 0x95, f_ff, "f32.div") \
V(F32Min, 0x96, f_ff, "f32.min") \
V(F32Max, 0x97, f_ff, "f32.max") \
V(F32CopySign, 0x98, f_ff, "f32.copysign") \
V(F64Abs, 0x99, d_d, "f64.abs") \
V(F64Neg, 0x9a, d_d, "f64.neg") \
V(F64Ceil, 0x9b, d_d, "f64.ceil") \
V(F64Floor, 0x9c, d_d, "f64.floor") \
V(F64Trunc, 0x9d, d_d, "f64.trunc") \
V(F64NearestInt, 0x9e, d_d, "f64.nearest") \
V(F64Sqrt, 0x9f, d_d, "f64.sqrt") \
V(F64Add, 0xa0, d_dd, "f64.add") \
V(F64Sub, 0xa1, d_dd, "f64.sub") \
V(F64Mul, 0xa2, d_dd, "f64.mul") \
V(F64Div, 0xa3, d_dd, "f64.div") \
V(F64Min, 0xa4, d_dd, "f64.min") \
V(F64Max, 0xa5, d_dd, "f64.max") \
V(F64CopySign, 0xa6, d_dd, "f64.copysign") \
V(I32ConvertI64, 0xa7, i_l, "i32.wrap_i64") \
V(I32SConvertF32, 0xa8, i_f, "i32.trunc_f32_s") \
V(I32UConvertF32, 0xa9, i_f, "i32.trunc_f32_u") \
V(I32SConvertF64, 0xaa, i_d, "i32.trunc_f64_s") \
V(I32UConvertF64, 0xab, i_d, "i32.trunc_f64_u") \
V(I64SConvertI32, 0xac, l_i, "i64.extend_i32_s") \
V(I64UConvertI32, 0xad, l_i, "i64.extend_i32_u") \
V(I64SConvertF32, 0xae, l_f, "i64.trunc_f32_s") \
V(I64UConvertF32, 0xaf, l_f, "i64.trunc_f32_u") \
V(I64SConvertF64, 0xb0, l_d, "i64.trunc_f64_s") \
V(I64UConvertF64, 0xb1, l_d, "i64.trunc_f64_u") \
V(F32SConvertI32, 0xb2, f_i, "f32.convert_i32_s") \
V(F32UConvertI32, 0xb3, f_i, "f32.convert_i32_u") \
V(F32SConvertI64, 0xb4, f_l, "f32.convert_i64_s") \
V(F32UConvertI64, 0xb5, f_l, "f32.convert_i64_u") \
V(F32ConvertF64, 0xb6, f_d, "f32.demote_f64") \
V(F64SConvertI32, 0xb7, d_i, "f64.convert_i32_s") \
V(F64UConvertI32, 0xb8, d_i, "f64.convert_i32_u") \
V(F64SConvertI64, 0xb9, d_l, "f64.convert_i64_s") \
V(F64UConvertI64, 0xba, d_l, "f64.convert_i64_u") \
V(F64ConvertF32, 0xbb, d_f, "f64.promote_f32") \
V(I32ReinterpretF32, 0xbc, i_f, "i32.reinterpret_f32") \
V(I64ReinterpretF64, 0xbd, l_d, "i64.reinterpret_f64") \
V(F32ReinterpretI32, 0xbe, f_i, "f32.reinterpret_i32") \
V(F64ReinterpretI64, 0xbf, d_l, "f64.reinterpret_i64") \
V(I32SExtendI8, 0xc0, i_i, "i32.extend8_s") \
V(I32SExtendI16, 0xc1, i_i, "i32.extend16_s") \
V(I64SExtendI8, 0xc2, l_l, "i64.extend8_s") \
V(I64SExtendI16, 0xc3, l_l, "i64.extend16_s") \
V(I64SExtendI32, 0xc4, l_l, "i64.extend32_s")
#define FOREACH_SIMPLE_OPCODE(V) \
FOREACH_SIMPLE_EXTENDED_CONST_OPCODE(V) \
FOREACH_SIMPLE_NON_CONST_OPCODE(V)
#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) V(RefEq, 0xd5, i_qq)
#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) V(RefEq, 0xd5, i_qq, "ref.eq")
// For compatibility with Asm.js.
// These opcodes are not spec'ed (or visible) externally; the idea is
// to use unused ranges for internal purposes.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
V(F64Acos, 0xdc, d_d) \
V(F64Asin, 0xdd, d_d) \
V(F64Atan, 0xde, d_d) \
V(F64Cos, 0xdf, d_d) \
V(F64Sin, 0xe0, d_d) \
V(F64Tan, 0xe1, d_d) \
V(F64Exp, 0xe2, d_d) \
V(F64Log, 0xe3, d_d) \
V(F64Atan2, 0xe4, d_dd) \
V(F64Pow, 0xe5, d_dd) \
V(F64Mod, 0xe6, d_dd) \
V(I32AsmjsDivS, 0xe7, i_ii) \
V(I32AsmjsDivU, 0xe8, i_ii) \
V(I32AsmjsRemS, 0xe9, i_ii) \
V(I32AsmjsRemU, 0xea, i_ii) \
V(I32AsmjsLoadMem8S, 0xeb, i_i) \
V(I32AsmjsLoadMem8U, 0xec, i_i) \
V(I32AsmjsLoadMem16S, 0xed, i_i) \
V(I32AsmjsLoadMem16U, 0xee, i_i) \
V(I32AsmjsLoadMem, 0xef, i_i) \
V(F32AsmjsLoadMem, 0xf0, f_i) \
V(F64AsmjsLoadMem, 0xf1, d_i) \
V(I32AsmjsStoreMem8, 0xf2, i_ii) \
V(I32AsmjsStoreMem16, 0xf3, i_ii) \
V(I32AsmjsStoreMem, 0xf4, i_ii) \
V(F32AsmjsStoreMem, 0xf5, f_if) \
V(F64AsmjsStoreMem, 0xf6, d_id) \
V(I32AsmjsSConvertF32, 0xf7, i_f) \
V(I32AsmjsUConvertF32, 0xf8, i_f) \
V(I32AsmjsSConvertF64, 0xf9, i_d) \
V(I32AsmjsUConvertF64, 0xfa, i_d)
#define FOREACH_SIMD_MEM_OPCODE(V) \
V(S128LoadMem, 0xfd00, s_i) \
V(S128Load8x8S, 0xfd01, s_i) \
V(S128Load8x8U, 0xfd02, s_i) \
V(S128Load16x4S, 0xfd03, s_i) \
V(S128Load16x4U, 0xfd04, s_i) \
V(S128Load32x2S, 0xfd05, s_i) \
V(S128Load32x2U, 0xfd06, s_i) \
V(S128Load8Splat, 0xfd07, s_i) \
V(S128Load16Splat, 0xfd08, s_i) \
V(S128Load32Splat, 0xfd09, s_i) \
V(S128Load64Splat, 0xfd0a, s_i) \
V(S128StoreMem, 0xfd0b, v_is) \
V(S128Load32Zero, 0xfd5c, s_i) \
V(S128Load64Zero, 0xfd5d, s_i)
#define FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
V(S128Load8Lane, 0xfd54, s_is) \
V(S128Load16Lane, 0xfd55, s_is) \
V(S128Load32Lane, 0xfd56, s_is) \
V(S128Load64Lane, 0xfd57, s_is) \
V(S128Store8Lane, 0xfd58, v_is) \
V(S128Store16Lane, 0xfd59, v_is) \
V(S128Store32Lane, 0xfd5a, v_is) \
V(S128Store64Lane, 0xfd5b, v_is)
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _)
#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(I8x16Shuffle, 0xfd0d, s_ss)
#define FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
V(I8x16Swizzle, 0xfd0e, s_ss) \
V(I8x16Splat, 0xfd0f, s_i) \
V(I16x8Splat, 0xfd10, s_i) \
V(I32x4Splat, 0xfd11, s_i) \
V(I64x2Splat, 0xfd12, s_l) \
V(F32x4Splat, 0xfd13, s_f) \
V(F64x2Splat, 0xfd14, s_d) \
V(I8x16Eq, 0xfd23, s_ss) \
V(I8x16Ne, 0xfd24, s_ss) \
V(I8x16LtS, 0xfd25, s_ss) \
V(I8x16LtU, 0xfd26, s_ss) \
V(I8x16GtS, 0xfd27, s_ss) \
V(I8x16GtU, 0xfd28, s_ss) \
V(I8x16LeS, 0xfd29, s_ss) \
V(I8x16LeU, 0xfd2a, s_ss) \
V(I8x16GeS, 0xfd2b, s_ss) \
V(I8x16GeU, 0xfd2c, s_ss) \
V(I16x8Eq, 0xfd2d, s_ss) \
V(I16x8Ne, 0xfd2e, s_ss) \
V(I16x8LtS, 0xfd2f, s_ss) \
V(I16x8LtU, 0xfd30, s_ss) \
V(I16x8GtS, 0xfd31, s_ss) \
V(I16x8GtU, 0xfd32, s_ss) \
V(I16x8LeS, 0xfd33, s_ss) \
V(I16x8LeU, 0xfd34, s_ss) \
V(I16x8GeS, 0xfd35, s_ss) \
V(I16x8GeU, 0xfd36, s_ss) \
V(I32x4Eq, 0xfd37, s_ss) \
V(I32x4Ne, 0xfd38, s_ss) \
V(I32x4LtS, 0xfd39, s_ss) \
V(I32x4LtU, 0xfd3a, s_ss) \
V(I32x4GtS, 0xfd3b, s_ss) \
V(I32x4GtU, 0xfd3c, s_ss) \
V(I32x4LeS, 0xfd3d, s_ss) \
V(I32x4LeU, 0xfd3e, s_ss) \
V(I32x4GeS, 0xfd3f, s_ss) \
V(I32x4GeU, 0xfd40, s_ss) \
V(F32x4Eq, 0xfd41, s_ss) \
V(F32x4Ne, 0xfd42, s_ss) \
V(F32x4Lt, 0xfd43, s_ss) \
V(F32x4Gt, 0xfd44, s_ss) \
V(F32x4Le, 0xfd45, s_ss) \
V(F32x4Ge, 0xfd46, s_ss) \
V(F64x2Eq, 0xfd47, s_ss) \
V(F64x2Ne, 0xfd48, s_ss) \
V(F64x2Lt, 0xfd49, s_ss) \
V(F64x2Gt, 0xfd4a, s_ss) \
V(F64x2Le, 0xfd4b, s_ss) \
V(F64x2Ge, 0xfd4c, s_ss) \
V(S128Not, 0xfd4d, s_s) \
V(S128And, 0xfd4e, s_ss) \
V(S128AndNot, 0xfd4f, s_ss) \
V(S128Or, 0xfd50, s_ss) \
V(S128Xor, 0xfd51, s_ss) \
V(S128Select, 0xfd52, s_sss) \
V(V128AnyTrue, 0xfd53, i_s) \
V(F32x4DemoteF64x2Zero, 0xfd5e, s_s) \
V(F64x2PromoteLowF32x4, 0xfd5f, s_s) \
V(I8x16Abs, 0xfd60, s_s) \
V(I8x16Neg, 0xfd61, s_s) \
V(I8x16Popcnt, 0xfd62, s_s) \
V(I8x16AllTrue, 0xfd63, i_s) \
V(I8x16BitMask, 0xfd64, i_s) \
V(I8x16SConvertI16x8, 0xfd65, s_ss) \
V(I8x16UConvertI16x8, 0xfd66, s_ss) \
V(F32x4Ceil, 0xfd67, s_s) \
V(F32x4Floor, 0xfd68, s_s) \
V(F32x4Trunc, 0xfd69, s_s) \
V(F32x4NearestInt, 0xfd6a, s_s) \
V(I8x16Shl, 0xfd6b, s_si) \
V(I8x16ShrS, 0xfd6c, s_si) \
V(I8x16ShrU, 0xfd6d, s_si) \
V(I8x16Add, 0xfd6e, s_ss) \
V(I8x16AddSatS, 0xfd6f, s_ss) \
V(I8x16AddSatU, 0xfd70, s_ss) \
V(I8x16Sub, 0xfd71, s_ss) \
V(I8x16SubSatS, 0xfd72, s_ss) \
V(I8x16SubSatU, 0xfd73, s_ss) \
V(F64x2Ceil, 0xfd74, s_s) \
V(F64x2Floor, 0xfd75, s_s) \
V(I8x16MinS, 0xfd76, s_ss) \
V(I8x16MinU, 0xfd77, s_ss) \
V(I8x16MaxS, 0xfd78, s_ss) \
V(I8x16MaxU, 0xfd79, s_ss) \
V(F64x2Trunc, 0xfd7a, s_s) \
V(I8x16RoundingAverageU, 0xfd7b, s_ss) \
V(I16x8ExtAddPairwiseI8x16S, 0xfd7c, s_s) \
V(I16x8ExtAddPairwiseI8x16U, 0xfd7d, s_s) \
V(I32x4ExtAddPairwiseI16x8S, 0xfd7e, s_s) \
V(I32x4ExtAddPairwiseI16x8U, 0xfd7f, s_s) \
V(I16x8Abs, 0xfd80, s_s) \
V(I16x8Neg, 0xfd81, s_s) \
V(I16x8Q15MulRSatS, 0xfd82, s_ss) \
V(I16x8AllTrue, 0xfd83, i_s) \
V(I16x8BitMask, 0xfd84, i_s) \
V(I16x8SConvertI32x4, 0xfd85, s_ss) \
V(I16x8UConvertI32x4, 0xfd86, s_ss) \
V(I16x8SConvertI8x16Low, 0xfd87, s_s) \
V(I16x8SConvertI8x16High, 0xfd88, s_s) \
V(I16x8UConvertI8x16Low, 0xfd89, s_s) \
V(I16x8UConvertI8x16High, 0xfd8a, s_s) \
V(I16x8Shl, 0xfd8b, s_si) \
V(I16x8ShrS, 0xfd8c, s_si) \
V(I16x8ShrU, 0xfd8d, s_si) \
V(I16x8Add, 0xfd8e, s_ss) \
V(I16x8AddSatS, 0xfd8f, s_ss) \
V(I16x8AddSatU, 0xfd90, s_ss) \
V(I16x8Sub, 0xfd91, s_ss) \
V(I16x8SubSatS, 0xfd92, s_ss) \
V(I16x8SubSatU, 0xfd93, s_ss) \
V(F64x2NearestInt, 0xfd94, s_s) \
V(I16x8Mul, 0xfd95, s_ss) \
V(I16x8MinS, 0xfd96, s_ss) \
V(I16x8MinU, 0xfd97, s_ss) \
V(I16x8MaxS, 0xfd98, s_ss) \
V(I16x8MaxU, 0xfd99, s_ss) \
V(I16x8RoundingAverageU, 0xfd9b, s_ss) \
V(I16x8ExtMulLowI8x16S, 0xfd9c, s_ss) \
V(I16x8ExtMulHighI8x16S, 0xfd9d, s_ss) \
V(I16x8ExtMulLowI8x16U, 0xfd9e, s_ss) \
V(I16x8ExtMulHighI8x16U, 0xfd9f, s_ss) \
V(I32x4Abs, 0xfda0, s_s) \
V(I32x4Neg, 0xfda1, s_s) \
V(I32x4AllTrue, 0xfda3, i_s) \
V(I32x4BitMask, 0xfda4, i_s) \
V(I32x4SConvertI16x8Low, 0xfda7, s_s) \
V(I32x4SConvertI16x8High, 0xfda8, s_s) \
V(I32x4UConvertI16x8Low, 0xfda9, s_s) \
V(I32x4UConvertI16x8High, 0xfdaa, s_s) \
V(I32x4Shl, 0xfdab, s_si) \
V(I32x4ShrS, 0xfdac, s_si) \
V(I32x4ShrU, 0xfdad, s_si) \
V(I32x4Add, 0xfdae, s_ss) \
V(I32x4Sub, 0xfdb1, s_ss) \
V(I32x4Mul, 0xfdb5, s_ss) \
V(I32x4MinS, 0xfdb6, s_ss) \
V(I32x4MinU, 0xfdb7, s_ss) \
V(I32x4MaxS, 0xfdb8, s_ss) \
V(I32x4MaxU, 0xfdb9, s_ss) \
V(I32x4DotI16x8S, 0xfdba, s_ss) \
V(I32x4ExtMulLowI16x8S, 0xfdbc, s_ss) \
V(I32x4ExtMulHighI16x8S, 0xfdbd, s_ss) \
V(I32x4ExtMulLowI16x8U, 0xfdbe, s_ss) \
V(I32x4ExtMulHighI16x8U, 0xfdbf, s_ss) \
V(I64x2Abs, 0xfdc0, s_s) \
V(I64x2Neg, 0xfdc1, s_s) \
V(I64x2AllTrue, 0xfdc3, i_s) \
V(I64x2BitMask, 0xfdc4, i_s) \
V(I64x2SConvertI32x4Low, 0xfdc7, s_s) \
V(I64x2SConvertI32x4High, 0xfdc8, s_s) \
V(I64x2UConvertI32x4Low, 0xfdc9, s_s) \
V(I64x2UConvertI32x4High, 0xfdca, s_s) \
V(I64x2Shl, 0xfdcb, s_si) \
V(I64x2ShrS, 0xfdcc, s_si) \
V(I64x2ShrU, 0xfdcd, s_si) \
V(I64x2Add, 0xfdce, s_ss) \
V(I64x2Sub, 0xfdd1, s_ss) \
V(I64x2Mul, 0xfdd5, s_ss) \
V(I64x2Eq, 0xfdd6, s_ss) \
V(I64x2Ne, 0xfdd7, s_ss) \
V(I64x2LtS, 0xfdd8, s_ss) \
V(I64x2GtS, 0xfdd9, s_ss) \
V(I64x2LeS, 0xfdda, s_ss) \
V(I64x2GeS, 0xfddb, s_ss) \
V(I64x2ExtMulLowI32x4S, 0xfddc, s_ss) \
V(I64x2ExtMulHighI32x4S, 0xfddd, s_ss) \
V(I64x2ExtMulLowI32x4U, 0xfdde, s_ss) \
V(I64x2ExtMulHighI32x4U, 0xfddf, s_ss) \
V(F32x4Abs, 0xfde0, s_s) \
V(F32x4Neg, 0xfde1, s_s) \
V(F32x4Sqrt, 0xfde3, s_s) \
V(F32x4Add, 0xfde4, s_ss) \
V(F32x4Sub, 0xfde5, s_ss) \
V(F32x4Mul, 0xfde6, s_ss) \
V(F32x4Div, 0xfde7, s_ss) \
V(F32x4Min, 0xfde8, s_ss) \
V(F32x4Max, 0xfde9, s_ss) \
V(F32x4Pmin, 0xfdea, s_ss) \
V(F32x4Pmax, 0xfdeb, s_ss) \
V(F64x2Abs, 0xfdec, s_s) \
V(F64x2Neg, 0xfded, s_s) \
V(F64x2Sqrt, 0xfdef, s_s) \
V(F64x2Add, 0xfdf0, s_ss) \
V(F64x2Sub, 0xfdf1, s_ss) \
V(F64x2Mul, 0xfdf2, s_ss) \
V(F64x2Div, 0xfdf3, s_ss) \
V(F64x2Min, 0xfdf4, s_ss) \
V(F64x2Max, 0xfdf5, s_ss) \
V(F64x2Pmin, 0xfdf6, s_ss) \
V(F64x2Pmax, 0xfdf7, s_ss) \
V(I32x4SConvertF32x4, 0xfdf8, s_s) \
V(I32x4UConvertF32x4, 0xfdf9, s_s) \
V(F32x4SConvertI32x4, 0xfdfa, s_s) \
V(F32x4UConvertI32x4, 0xfdfb, s_s) \
V(I32x4TruncSatF64x2SZero, 0xfdfc, s_s) \
V(I32x4TruncSatF64x2UZero, 0xfdfd, s_s) \
V(F64x2ConvertLowI32x4S, 0xfdfe, s_s) \
V(F64x2ConvertLowI32x4U, 0xfdff, s_s)
#define FOREACH_RELAXED_SIMD_OPCODE(V) \
V(I8x16RelaxedSwizzle, 0xfd100, s_ss) \
V(I32x4RelaxedTruncF32x4S, 0xfd101, s_s) \
V(I32x4RelaxedTruncF32x4U, 0xfd102, s_s) \
V(I32x4RelaxedTruncF64x2SZero, 0xfd103, s_s) \
V(I32x4RelaxedTruncF64x2UZero, 0xfd104, s_s) \
V(F32x4Qfma, 0xfd105, s_sss) \
V(F32x4Qfms, 0xfd106, s_sss) \
V(F64x2Qfma, 0xfd107, s_sss) \
V(F64x2Qfms, 0xfd108, s_sss) \
V(I8x16RelaxedLaneSelect, 0xfd109, s_sss) \
V(I16x8RelaxedLaneSelect, 0xfd10a, s_sss) \
V(I32x4RelaxedLaneSelect, 0xfd10b, s_sss) \
V(I64x2RelaxedLaneSelect, 0xfd10c, s_sss) \
V(F32x4RelaxedMin, 0xfd10d, s_ss) \
V(F32x4RelaxedMax, 0xfd10e, s_ss) \
V(F64x2RelaxedMin, 0xfd10f, s_ss) \
V(F64x2RelaxedMax, 0xfd110, s_ss) \
V(I16x8RelaxedQ15MulRS, 0xfd111, s_ss) \
V(I16x8DotI8x16I7x16S, 0xfd112, s_ss) \
V(I32x4DotI8x16I7x16AddS, 0xfd113, s_sss)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
V(I8x16ExtractLaneS, 0xfd15, _) \
V(I8x16ExtractLaneU, 0xfd16, _) \
V(I16x8ExtractLaneS, 0xfd18, _) \
V(I16x8ExtractLaneU, 0xfd19, _) \
V(I32x4ExtractLane, 0xfd1b, _) \
V(I64x2ExtractLane, 0xfd1d, _) \
V(F32x4ExtractLane, 0xfd1f, _) \
V(F64x2ExtractLane, 0xfd21, _)
#define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
V(I8x16ReplaceLane, 0xfd17, _) \
V(I16x8ReplaceLane, 0xfd1a, _) \
V(I32x4ReplaceLane, 0xfd1c, _) \
V(I64x2ReplaceLane, 0xfd1e, _) \
V(F32x4ReplaceLane, 0xfd20, _) \
V(F64x2ReplaceLane, 0xfd22, _)
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
V(F64Acos, 0xdc, d_d, "f64.acos") \
V(F64Asin, 0xdd, d_d, "f64.asin") \
V(F64Atan, 0xde, d_d, "f64.atan") \
V(F64Cos, 0xdf, d_d, "f64.cos") \
V(F64Sin, 0xe0, d_d, "f64.sin") \
V(F64Tan, 0xe1, d_d, "f64.tan") \
V(F64Exp, 0xe2, d_d, "f64.exp") \
V(F64Log, 0xe3, d_d, "f64.log") \
V(F64Atan2, 0xe4, d_dd, "f64.atan2") \
V(F64Pow, 0xe5, d_dd, "f64.pow") \
V(F64Mod, 0xe6, d_dd, "f64.mod") \
V(I32AsmjsDivS, 0xe7, i_ii, "i32.asmjs_div_s") \
V(I32AsmjsDivU, 0xe8, i_ii, "i32.asmjs_div_u") \
V(I32AsmjsRemS, 0xe9, i_ii, "i32.asmjs_rem_s") \
V(I32AsmjsRemU, 0xea, i_ii, "i32.asmjs_rem_u") \
V(I32AsmjsLoadMem8S, 0xeb, i_i, "i32.asmjs_load8_s") \
V(I32AsmjsLoadMem8U, 0xec, i_i, "i32.asmjs_load8_u") \
V(I32AsmjsLoadMem16S, 0xed, i_i, "i32.asmjs_load16_s") \
V(I32AsmjsLoadMem16U, 0xee, i_i, "i32.asmjs_load16_u") \
V(I32AsmjsLoadMem, 0xef, i_i, "i32.asmjs_load32") \
V(F32AsmjsLoadMem, 0xf0, f_i, "f32.asmjs_load") \
V(F64AsmjsLoadMem, 0xf1, d_i, "f64.asmjs_load") \
V(I32AsmjsStoreMem8, 0xf2, i_ii, "i32.asmjs_store8") \
V(I32AsmjsStoreMem16, 0xf3, i_ii, "i32.asmjs_store16") \
V(I32AsmjsStoreMem, 0xf4, i_ii, "i32.asmjs_store") \
V(F32AsmjsStoreMem, 0xf5, f_if, "f32.asmjs_store") \
V(F64AsmjsStoreMem, 0xf6, d_id, "f64.asmjs_store") \
V(I32AsmjsSConvertF32, 0xf7, i_f, "i32.asmjs_convert_f32_s") \
V(I32AsmjsUConvertF32, 0xf8, i_f, "i32.asmjs_convert_f32_u") \
V(I32AsmjsSConvertF64, 0xf9, i_d, "i32.asmjs_convert_f64_s") \
V(I32AsmjsUConvertF64, 0xfa, i_d, "i32.asmjs_convert_f64_u")
#define FOREACH_SIMD_MEM_OPCODE(V) \
V(S128LoadMem, 0xfd00, s_i, "v128.load") \
V(S128Load8x8S, 0xfd01, s_i, "v128.load8x8_s") \
V(S128Load8x8U, 0xfd02, s_i, "v128.load8x8_u") \
V(S128Load16x4S, 0xfd03, s_i, "v128.load16x4_s") \
V(S128Load16x4U, 0xfd04, s_i, "v128.load16x4_u") \
V(S128Load32x2S, 0xfd05, s_i, "v128.load32x2_s") \
V(S128Load32x2U, 0xfd06, s_i, "v128.load32x2_u") \
V(S128Load8Splat, 0xfd07, s_i, "v128.load8_splat") \
V(S128Load16Splat, 0xfd08, s_i, "v128.load16_splat") \
V(S128Load32Splat, 0xfd09, s_i, "v128.load32_splat") \
V(S128Load64Splat, 0xfd0a, s_i, "v128.load64_splat") \
V(S128StoreMem, 0xfd0b, v_is, "v128.store") \
V(S128Load32Zero, 0xfd5c, s_i, "v128.load32_zero") \
V(S128Load64Zero, 0xfd5d, s_i, "v128.load64_zero")
#define FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
V(S128Load8Lane, 0xfd54, s_is, "v128.load8_lane") \
V(S128Load16Lane, 0xfd55, s_is, "v128.load16_lane") \
V(S128Load32Lane, 0xfd56, s_is, "v128.load32_lane") \
V(S128Load64Lane, 0xfd57, s_is, "v128.load64_lane") \
V(S128Store8Lane, 0xfd58, v_is, "v128.store8_lane") \
V(S128Store16Lane, 0xfd59, v_is, "v128.store16_lane") \
V(S128Store32Lane, 0xfd5a, v_is, "v128.store32_lane") \
V(S128Store64Lane, 0xfd5b, v_is, "v128.store64_lane")
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _, "v128.const")
#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
V(I8x16Shuffle, 0xfd0d, s_ss, "i8x16.shuffle")
#define FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
V(I8x16Swizzle, 0xfd0e, s_ss, "i8x16.swizzle") \
V(I8x16Splat, 0xfd0f, s_i, "i8x16.splat") \
V(I16x8Splat, 0xfd10, s_i, "i16x8.splat") \
V(I32x4Splat, 0xfd11, s_i, "i32x4.splat") \
V(I64x2Splat, 0xfd12, s_l, "i64x2.splat") \
V(F32x4Splat, 0xfd13, s_f, "f32x4.splat") \
V(F64x2Splat, 0xfd14, s_d, "f64x2.splat") \
V(I8x16Eq, 0xfd23, s_ss, "i8x16.eq") \
V(I8x16Ne, 0xfd24, s_ss, "i8x16.ne") \
V(I8x16LtS, 0xfd25, s_ss, "i8x16.lt_s") \
V(I8x16LtU, 0xfd26, s_ss, "i8x16.lt_u") \
V(I8x16GtS, 0xfd27, s_ss, "i8x16.gt_s") \
V(I8x16GtU, 0xfd28, s_ss, "i8x16.gt_u") \
V(I8x16LeS, 0xfd29, s_ss, "i8x16.le_s") \
V(I8x16LeU, 0xfd2a, s_ss, "i8x16.le_u") \
V(I8x16GeS, 0xfd2b, s_ss, "i8x16.ge_s") \
V(I8x16GeU, 0xfd2c, s_ss, "i8x16.ge_u") \
V(I16x8Eq, 0xfd2d, s_ss, "i16x8.eq") \
V(I16x8Ne, 0xfd2e, s_ss, "i16x8.ne") \
V(I16x8LtS, 0xfd2f, s_ss, "i16x8.lt_s") \
V(I16x8LtU, 0xfd30, s_ss, "i16x8.lt_u") \
V(I16x8GtS, 0xfd31, s_ss, "i16x8.gt_s") \
V(I16x8GtU, 0xfd32, s_ss, "i16x8.gt_u") \
V(I16x8LeS, 0xfd33, s_ss, "i16x8.le_s") \
V(I16x8LeU, 0xfd34, s_ss, "i16x8.le_u") \
V(I16x8GeS, 0xfd35, s_ss, "i16x8.ge_s") \
V(I16x8GeU, 0xfd36, s_ss, "i16x8.ge_u") \
V(I32x4Eq, 0xfd37, s_ss, "i32x4.eq") \
V(I32x4Ne, 0xfd38, s_ss, "i32x4.ne") \
V(I32x4LtS, 0xfd39, s_ss, "i32x4.lt_s") \
V(I32x4LtU, 0xfd3a, s_ss, "i32x4.lt_u") \
V(I32x4GtS, 0xfd3b, s_ss, "i32x4.gt_s") \
V(I32x4GtU, 0xfd3c, s_ss, "i32x4.gt_u") \
V(I32x4LeS, 0xfd3d, s_ss, "i32x4.le_s") \
V(I32x4LeU, 0xfd3e, s_ss, "i32x4.le_u") \
V(I32x4GeS, 0xfd3f, s_ss, "i32x4.ge_s") \
V(I32x4GeU, 0xfd40, s_ss, "i32x4.ge_u") \
V(F32x4Eq, 0xfd41, s_ss, "f32x4.eq") \
V(F32x4Ne, 0xfd42, s_ss, "f32x4.ne") \
V(F32x4Lt, 0xfd43, s_ss, "f32x4.lt") \
V(F32x4Gt, 0xfd44, s_ss, "f32x4.gt") \
V(F32x4Le, 0xfd45, s_ss, "f32x4.le") \
V(F32x4Ge, 0xfd46, s_ss, "f32x4.ge") \
V(F64x2Eq, 0xfd47, s_ss, "f64x2.eq") \
V(F64x2Ne, 0xfd48, s_ss, "f64x2.ne") \
V(F64x2Lt, 0xfd49, s_ss, "f64x2.lt") \
V(F64x2Gt, 0xfd4a, s_ss, "f64x2.gt") \
V(F64x2Le, 0xfd4b, s_ss, "f64x2.le") \
V(F64x2Ge, 0xfd4c, s_ss, "f64x2.ge") \
V(S128Not, 0xfd4d, s_s, "v128.not") \
V(S128And, 0xfd4e, s_ss, "v128.and") \
V(S128AndNot, 0xfd4f, s_ss, "v128.andnot") \
V(S128Or, 0xfd50, s_ss, "v128.or") \
V(S128Xor, 0xfd51, s_ss, "v128.xor") \
V(S128Select, 0xfd52, s_sss, "v128.bitselect") \
V(V128AnyTrue, 0xfd53, i_s, "v128.any_true") \
V(F32x4DemoteF64x2Zero, 0xfd5e, s_s, "f32x4.demote_f64x2_zero") \
V(F64x2PromoteLowF32x4, 0xfd5f, s_s, "f64x2.promote_low_f32x4") \
V(I8x16Abs, 0xfd60, s_s, "i8x16.abs") \
V(I8x16Neg, 0xfd61, s_s, "i8x16.neg") \
V(I8x16Popcnt, 0xfd62, s_s, "i8x16.popcnt") \
V(I8x16AllTrue, 0xfd63, i_s, "i8x16.all_true") \
V(I8x16BitMask, 0xfd64, i_s, "i8x16.bitmask") \
V(I8x16SConvertI16x8, 0xfd65, s_ss, "i8x16.narrow_i16x8_s") \
V(I8x16UConvertI16x8, 0xfd66, s_ss, "i8x16.narrow_i16x8_u") \
V(F32x4Ceil, 0xfd67, s_s, "f32x4.ceil") \
V(F32x4Floor, 0xfd68, s_s, "f32x4.floor") \
V(F32x4Trunc, 0xfd69, s_s, "f32x4.trunc") \
V(F32x4NearestInt, 0xfd6a, s_s, "f32x4.nearest") \
V(I8x16Shl, 0xfd6b, s_si, "i8x16.shl") \
V(I8x16ShrS, 0xfd6c, s_si, "i8x16.shr_s") \
V(I8x16ShrU, 0xfd6d, s_si, "i8x16.shr_u") \
V(I8x16Add, 0xfd6e, s_ss, "i8x16.add") \
V(I8x16AddSatS, 0xfd6f, s_ss, "i8x16.add_sat_s") \
V(I8x16AddSatU, 0xfd70, s_ss, "i8x16.add_sat_u") \
V(I8x16Sub, 0xfd71, s_ss, "i8x16.sub") \
V(I8x16SubSatS, 0xfd72, s_ss, "i8x16.sub_sat_s") \
V(I8x16SubSatU, 0xfd73, s_ss, "i8x16.sub_sat_u") \
V(F64x2Ceil, 0xfd74, s_s, "f64x2.ceil") \
V(F64x2Floor, 0xfd75, s_s, "f64x2.floor") \
V(I8x16MinS, 0xfd76, s_ss, "i8x16.min_s") \
V(I8x16MinU, 0xfd77, s_ss, "i8x16.min_u") \
V(I8x16MaxS, 0xfd78, s_ss, "i8x16.max_s") \
V(I8x16MaxU, 0xfd79, s_ss, "i8x16.max_u") \
V(F64x2Trunc, 0xfd7a, s_s, "f64x2.trunc") \
V(I8x16RoundingAverageU, 0xfd7b, s_ss, "i8x16.avgr_u") \
V(I16x8ExtAddPairwiseI8x16S, 0xfd7c, s_s, "i16x8.extadd_pairwise_i8x16_s") \
V(I16x8ExtAddPairwiseI8x16U, 0xfd7d, s_s, "i16x8.extadd_pairwise_i8x16_u") \
V(I32x4ExtAddPairwiseI16x8S, 0xfd7e, s_s, "i32x4.extadd_pairwise_i16x8_s") \
V(I32x4ExtAddPairwiseI16x8U, 0xfd7f, s_s, "i32x4.extadd_pairwise_i16x8_u") \
V(I16x8Abs, 0xfd80, s_s, "i16x8.abs") \
V(I16x8Neg, 0xfd81, s_s, "i16x8.neg") \
V(I16x8Q15MulRSatS, 0xfd82, s_ss, "i16x8.q15mulr_sat_s") \
V(I16x8AllTrue, 0xfd83, i_s, "i16x8.all_true") \
V(I16x8BitMask, 0xfd84, i_s, "i16x8.bitmask") \
V(I16x8SConvertI32x4, 0xfd85, s_ss, "i16x8.narrow_i32x4_s") \
V(I16x8UConvertI32x4, 0xfd86, s_ss, "i16x8.narrow_i32x4_u") \
V(I16x8SConvertI8x16Low, 0xfd87, s_s, "i16x8.extend_low_i8x16_s") \
V(I16x8SConvertI8x16High, 0xfd88, s_s, "i16x8.extend_high_i8x16_s") \
V(I16x8UConvertI8x16Low, 0xfd89, s_s, "i16x8.extend_low_i8x16_u") \
V(I16x8UConvertI8x16High, 0xfd8a, s_s, "i16x8.extend_high_i8x16_u") \
V(I16x8Shl, 0xfd8b, s_si, "i16x8.shl") \
V(I16x8ShrS, 0xfd8c, s_si, "i16x8.shr_s") \
V(I16x8ShrU, 0xfd8d, s_si, "i16x8.shr_u") \
V(I16x8Add, 0xfd8e, s_ss, "i16x8.add") \
V(I16x8AddSatS, 0xfd8f, s_ss, "i16x8.add_sat_s") \
V(I16x8AddSatU, 0xfd90, s_ss, "i16x8.add_sat_u") \
V(I16x8Sub, 0xfd91, s_ss, "i16x8.sub") \
V(I16x8SubSatS, 0xfd92, s_ss, "i16x8.sub_sat_s") \
V(I16x8SubSatU, 0xfd93, s_ss, "i16x8.sub_sat_u") \
V(F64x2NearestInt, 0xfd94, s_s, "f64x2.nearest") \
V(I16x8Mul, 0xfd95, s_ss, "i16x8.mul") \
V(I16x8MinS, 0xfd96, s_ss, "i16x8.min_s") \
V(I16x8MinU, 0xfd97, s_ss, "i16x8.min_u") \
V(I16x8MaxS, 0xfd98, s_ss, "i16x8.max_s") \
V(I16x8MaxU, 0xfd99, s_ss, "i16x8.max_u") \
V(I16x8RoundingAverageU, 0xfd9b, s_ss, "i16x8.avgr_u") \
V(I16x8ExtMulLowI8x16S, 0xfd9c, s_ss, "i16x8.extmul_low_i8x16_s") \
V(I16x8ExtMulHighI8x16S, 0xfd9d, s_ss, "i16x8.extmul_high_i8x16_s") \
V(I16x8ExtMulLowI8x16U, 0xfd9e, s_ss, "i16x8.extmul_low_i8x16_u") \
V(I16x8ExtMulHighI8x16U, 0xfd9f, s_ss, "i16x8.extmul_high_i8x16_u") \
V(I32x4Abs, 0xfda0, s_s, "i32x4.abs") \
V(I32x4Neg, 0xfda1, s_s, "i32x4.neg") \
V(I32x4AllTrue, 0xfda3, i_s, "i32x4.all_true") \
V(I32x4BitMask, 0xfda4, i_s, "i32x4.bitmask") \
V(I32x4SConvertI16x8Low, 0xfda7, s_s, "i32x4.extend_low_i16x8_s") \
V(I32x4SConvertI16x8High, 0xfda8, s_s, "i32x4.extend_high_i16x8_s") \
V(I32x4UConvertI16x8Low, 0xfda9, s_s, "i32x4.extend_low_i16x8_u") \
V(I32x4UConvertI16x8High, 0xfdaa, s_s, "i32x4.extend_high_i16x8_u") \
V(I32x4Shl, 0xfdab, s_si, "i32x4.shl") \
V(I32x4ShrS, 0xfdac, s_si, "i32x4.shr_s") \
V(I32x4ShrU, 0xfdad, s_si, "i32x4.shr_u") \
V(I32x4Add, 0xfdae, s_ss, "i32x4.add") \
V(I32x4Sub, 0xfdb1, s_ss, "i32x4.sub") \
V(I32x4Mul, 0xfdb5, s_ss, "i32x4.mul") \
V(I32x4MinS, 0xfdb6, s_ss, "i32x4.min_s") \
V(I32x4MinU, 0xfdb7, s_ss, "i32x4.min_u") \
V(I32x4MaxS, 0xfdb8, s_ss, "i32x4.max_s") \
V(I32x4MaxU, 0xfdb9, s_ss, "i32x4.max_u") \
V(I32x4DotI16x8S, 0xfdba, s_ss, "i32x4.dot_i16x8_s") \
V(I32x4ExtMulLowI16x8S, 0xfdbc, s_ss, "i32x4.extmul_low_i16x8_s") \
V(I32x4ExtMulHighI16x8S, 0xfdbd, s_ss, "i32x4.extmul_high_i16x8_s") \
V(I32x4ExtMulLowI16x8U, 0xfdbe, s_ss, "i32x4.extmul_low_i16x8_u") \
V(I32x4ExtMulHighI16x8U, 0xfdbf, s_ss, "i32x4.extmul_high_i16x8_u") \
V(I64x2Abs, 0xfdc0, s_s, "i64x2.abs") \
V(I64x2Neg, 0xfdc1, s_s, "i64x2.neg") \
V(I64x2AllTrue, 0xfdc3, i_s, "i64x2.all_true") \
V(I64x2BitMask, 0xfdc4, i_s, "i64x2.bitmask") \
V(I64x2SConvertI32x4Low, 0xfdc7, s_s, "i64x2.extend_low_i32x4_s") \
V(I64x2SConvertI32x4High, 0xfdc8, s_s, "i64x2.extend_high_i32x4_s") \
V(I64x2UConvertI32x4Low, 0xfdc9, s_s, "i64x2.extend_low_i32x4_u") \
V(I64x2UConvertI32x4High, 0xfdca, s_s, "i64x2.extend_high_i32x4_u") \
V(I64x2Shl, 0xfdcb, s_si, "i64x2.shl") \
V(I64x2ShrS, 0xfdcc, s_si, "i64x2.shr_s") \
V(I64x2ShrU, 0xfdcd, s_si, "i64x2.shr_u") \
V(I64x2Add, 0xfdce, s_ss, "i64x2.add") \
V(I64x2Sub, 0xfdd1, s_ss, "i64x2.sub") \
V(I64x2Mul, 0xfdd5, s_ss, "i64x2.mul") \
V(I64x2Eq, 0xfdd6, s_ss, "i64x2.eq") \
V(I64x2Ne, 0xfdd7, s_ss, "i64x2.ne") \
V(I64x2LtS, 0xfdd8, s_ss, "i64x2.lt_s") \
V(I64x2GtS, 0xfdd9, s_ss, "i64x2.gt_s") \
V(I64x2LeS, 0xfdda, s_ss, "i64x2.le_s") \
V(I64x2GeS, 0xfddb, s_ss, "i64x2.ge_s") \
V(I64x2ExtMulLowI32x4S, 0xfddc, s_ss, "i64x2.extmul_low_i32x4_s") \
V(I64x2ExtMulHighI32x4S, 0xfddd, s_ss, "i64x2.extmul_high_i32x4_s") \
V(I64x2ExtMulLowI32x4U, 0xfdde, s_ss, "i64x2.extmul_low_i32x4_u") \
V(I64x2ExtMulHighI32x4U, 0xfddf, s_ss, "i64x2.extmul_high_i32x4_u") \
V(F32x4Abs, 0xfde0, s_s, "f32x4.abs") \
V(F32x4Neg, 0xfde1, s_s, "f32x4.neg") \
V(F32x4Sqrt, 0xfde3, s_s, "f32x4.sqrt") \
V(F32x4Add, 0xfde4, s_ss, "f32x4.add") \
V(F32x4Sub, 0xfde5, s_ss, "f32x4.sub") \
V(F32x4Mul, 0xfde6, s_ss, "f32x4.mul") \
V(F32x4Div, 0xfde7, s_ss, "f32x4.div") \
V(F32x4Min, 0xfde8, s_ss, "f32x4.min") \
V(F32x4Max, 0xfde9, s_ss, "f32x4.max") \
V(F32x4Pmin, 0xfdea, s_ss, "f32x4.pmin") \
V(F32x4Pmax, 0xfdeb, s_ss, "f32x4.pmax") \
V(F64x2Abs, 0xfdec, s_s, "f64x2.abs") \
V(F64x2Neg, 0xfded, s_s, "f64x2.neg") \
V(F64x2Sqrt, 0xfdef, s_s, "f64x2.sqrt") \
V(F64x2Add, 0xfdf0, s_ss, "f64x2.add") \
V(F64x2Sub, 0xfdf1, s_ss, "f64x2.sub") \
V(F64x2Mul, 0xfdf2, s_ss, "f64x2.mul") \
V(F64x2Div, 0xfdf3, s_ss, "f64x2.div") \
V(F64x2Min, 0xfdf4, s_ss, "f64x2.min") \
V(F64x2Max, 0xfdf5, s_ss, "f64x2.max") \
V(F64x2Pmin, 0xfdf6, s_ss, "f64x2.pmin") \
V(F64x2Pmax, 0xfdf7, s_ss, "f64x2.pmax") \
V(I32x4SConvertF32x4, 0xfdf8, s_s, "i32x4.trunc_sat_f32x4_s") \
V(I32x4UConvertF32x4, 0xfdf9, s_s, "i32x4.trunc_sat_f32x4_u") \
V(F32x4SConvertI32x4, 0xfdfa, s_s, "f32x4.convert_i32x4_s") \
V(F32x4UConvertI32x4, 0xfdfb, s_s, "f32x4.convert_i32x4_u") \
V(I32x4TruncSatF64x2SZero, 0xfdfc, s_s, "i32x4.trunc_sat_f64x2_s_zero") \
V(I32x4TruncSatF64x2UZero, 0xfdfd, s_s, "i32x4.trunc_sat_f64x2_u_zero") \
V(F64x2ConvertLowI32x4S, 0xfdfe, s_s, "f64x2.convert_low_i32x4_s") \
V(F64x2ConvertLowI32x4U, 0xfdff, s_s, "f64x2.convert_low_i32x4_u")
#define FOREACH_RELAXED_SIMD_OPCODE(V) \
V(I8x16RelaxedSwizzle, 0xfd100, s_ss, "i8x16.relaxed_swizzle") \
V(I32x4RelaxedTruncF32x4S, 0xfd101, s_s, "i32x4.relaxed_trunc_f32x4_s") \
V(I32x4RelaxedTruncF32x4U, 0xfd102, s_s, "i32x4.relaxed_trunc_f32x4_u") \
V(I32x4RelaxedTruncF64x2SZero, 0xfd103, s_s, \
"i32x4.relaxed_trunc_f64x2_s_zero") \
V(I32x4RelaxedTruncF64x2UZero, 0xfd104, s_s, \
"i32x4.relaxed_trunc_f64x2_u_zero") \
V(F32x4Qfma, 0xfd105, s_sss, "f32x4.qfma") \
V(F32x4Qfms, 0xfd106, s_sss, "f32x4.qfms") \
V(F64x2Qfma, 0xfd107, s_sss, "f64x2.qfma") \
V(F64x2Qfms, 0xfd108, s_sss, "f64x2.qfms") \
V(I8x16RelaxedLaneSelect, 0xfd109, s_sss, "i8x16.relaxed_laneselect") \
V(I16x8RelaxedLaneSelect, 0xfd10a, s_sss, "i16x8.relaxed_laneselect") \
V(I32x4RelaxedLaneSelect, 0xfd10b, s_sss, "i32x4.relaxed_laneselect") \
V(I64x2RelaxedLaneSelect, 0xfd10c, s_sss, "i64x2.relaxed_laneselect") \
V(F32x4RelaxedMin, 0xfd10d, s_ss, "f32x4.relaxed_min") \
V(F32x4RelaxedMax, 0xfd10e, s_ss, "f32x4.relaxed_max") \
V(F64x2RelaxedMin, 0xfd10f, s_ss, "f64x2.relaxed_min") \
V(F64x2RelaxedMax, 0xfd110, s_ss, "f64x2.relaxed_max") \
V(I16x8RelaxedQ15MulRS, 0xfd111, s_ss, "i16x8.relaxed_q15mulr_s") \
V(I16x8DotI8x16I7x16S, 0xfd112, s_ss, "i16x8.dot_i8x16_i7x16_s") \
V(I32x4DotI8x16I7x16AddS, 0xfd113, s_sss, "i32x4.dot_i8x16_i7x16_add_s")
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
V(I8x16ExtractLaneS, 0xfd15, _, "i8x16.extract_lane_s") \
V(I8x16ExtractLaneU, 0xfd16, _, "i8x16.extract_lane_u") \
V(I16x8ExtractLaneS, 0xfd18, _, "i16x8.extract_lane_s") \
V(I16x8ExtractLaneU, 0xfd19, _, "i16x8.extract_lane_u") \
V(I32x4ExtractLane, 0xfd1b, _, "i32x4.extract_lane") \
V(I64x2ExtractLane, 0xfd1d, _, "i64x2.extract_lane") \
V(F32x4ExtractLane, 0xfd1f, _, "f32x4.extract_lane") \
V(F64x2ExtractLane, 0xfd21, _, "f64x2.extract_lane")
#define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
V(I8x16ReplaceLane, 0xfd17, _, "i8x16.replace_lane") \
V(I16x8ReplaceLane, 0xfd1a, _, "i16x8.replace_lane") \
V(I32x4ReplaceLane, 0xfd1c, _, "i32x4.replace_lane") \
V(I64x2ReplaceLane, 0xfd1e, _, "i64x2.replace_lane") \
V(F32x4ReplaceLane, 0xfd20, _, "f32x4.replace_lane") \
V(F64x2ReplaceLane, 0xfd22, _, "f64x2.replace_lane")
#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
......@@ -579,180 +584,186 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_CONST_OPCODE(V)
#define FOREACH_NUMERIC_OPCODE(V_SIG, V_VARIADIC) \
V_SIG(I32SConvertSatF32, 0xfc00, i_f) \
V_SIG(I32UConvertSatF32, 0xfc01, i_f) \
V_SIG(I32SConvertSatF64, 0xfc02, i_d) \
V_SIG(I32UConvertSatF64, 0xfc03, i_d) \
V_SIG(I64SConvertSatF32, 0xfc04, l_f) \
V_SIG(I64UConvertSatF32, 0xfc05, l_f) \
V_SIG(I64SConvertSatF64, 0xfc06, l_d) \
V_SIG(I64UConvertSatF64, 0xfc07, l_d) \
V_VARIADIC(MemoryInit, 0xfc08) \
V_SIG(DataDrop, 0xfc09, v_v) \
V_VARIADIC(MemoryCopy, 0xfc0a) \
V_VARIADIC(MemoryFill, 0xfc0b) \
V_SIG(TableInit, 0xfc0c, v_iii) \
V_SIG(ElemDrop, 0xfc0d, v_v) \
V_SIG(TableCopy, 0xfc0e, v_iii) \
#define FOREACH_NUMERIC_OPCODE_WITH_SIG(V) \
V(I32SConvertSatF32, 0xfc00, i_f, "i32.trunc_sat_f32_s") \
V(I32UConvertSatF32, 0xfc01, i_f, "i32.trunc_sat_f32_u") \
V(I32SConvertSatF64, 0xfc02, i_d, "i32.trunc_sat_f64_s") \
V(I32UConvertSatF64, 0xfc03, i_d, "i32.trunc_sat_f64_u") \
V(I64SConvertSatF32, 0xfc04, l_f, "i64.trunc_sat_f32_s") \
V(I64UConvertSatF32, 0xfc05, l_f, "i64.trunc_sat_f32_u") \
V(I64SConvertSatF64, 0xfc06, l_d, "i64.trunc_sat_f64_s") \
V(I64UConvertSatF64, 0xfc07, l_d, "i64.trunc_sat_f64_u") \
V(DataDrop, 0xfc09, v_v, "data.drop") \
V(TableInit, 0xfc0c, v_iii, "table.init") \
V(ElemDrop, 0xfc0d, v_v, "elem.drop") \
V(TableCopy, 0xfc0e, v_iii, "table.copy") \
V(TableSize, 0xfc10, i_v, "table.size")
#define FOREACH_NUMERIC_OPCODE_VARIADIC(V) \
V(MemoryInit, 0xfc08, _, "memory.init") \
V(MemoryCopy, 0xfc0a, _, "memory.copy") \
V(MemoryFill, 0xfc0b, _, "memory.fill") \
/* TableGrow is polymorphic in the first parameter. */ \
/* It's whatever the table type is. */ \
V_VARIADIC(TableGrow, 0xfc0f) \
V_SIG(TableSize, 0xfc10, i_v) \
V(TableGrow, 0xfc0f, _, "table.grow") \
/* TableFill is polymorphic in the second parameter. */ \
/* It's whatever the table type is. */ \
V_VARIADIC(TableFill, 0xfc11)
#define FOREACH_ATOMIC_OPCODE(V) \
V(AtomicNotify, 0xfe00, i_ii) \
V(I32AtomicWait, 0xfe01, i_iil) \
V(I64AtomicWait, 0xfe02, i_ill) \
V(I32AtomicLoad, 0xfe10, i_i) \
V(I64AtomicLoad, 0xfe11, l_i) \
V(I32AtomicLoad8U, 0xfe12, i_i) \
V(I32AtomicLoad16U, 0xfe13, i_i) \
V(I64AtomicLoad8U, 0xfe14, l_i) \
V(I64AtomicLoad16U, 0xfe15, l_i) \
V(I64AtomicLoad32U, 0xfe16, l_i) \
V(I32AtomicStore, 0xfe17, v_ii) \
V(I64AtomicStore, 0xfe18, v_il) \
V(I32AtomicStore8U, 0xfe19, v_ii) \
V(I32AtomicStore16U, 0xfe1a, v_ii) \
V(I64AtomicStore8U, 0xfe1b, v_il) \
V(I64AtomicStore16U, 0xfe1c, v_il) \
V(I64AtomicStore32U, 0xfe1d, v_il) \
V(I32AtomicAdd, 0xfe1e, i_ii) \
V(I64AtomicAdd, 0xfe1f, l_il) \
V(I32AtomicAdd8U, 0xfe20, i_ii) \
V(I32AtomicAdd16U, 0xfe21, i_ii) \
V(I64AtomicAdd8U, 0xfe22, l_il) \
V(I64AtomicAdd16U, 0xfe23, l_il) \
V(I64AtomicAdd32U, 0xfe24, l_il) \
V(I32AtomicSub, 0xfe25, i_ii) \
V(I64AtomicSub, 0xfe26, l_il) \
V(I32AtomicSub8U, 0xfe27, i_ii) \
V(I32AtomicSub16U, 0xfe28, i_ii) \
V(I64AtomicSub8U, 0xfe29, l_il) \
V(I64AtomicSub16U, 0xfe2a, l_il) \
V(I64AtomicSub32U, 0xfe2b, l_il) \
V(I32AtomicAnd, 0xfe2c, i_ii) \
V(I64AtomicAnd, 0xfe2d, l_il) \
V(I32AtomicAnd8U, 0xfe2e, i_ii) \
V(I32AtomicAnd16U, 0xfe2f, i_ii) \
V(I64AtomicAnd8U, 0xfe30, l_il) \
V(I64AtomicAnd16U, 0xfe31, l_il) \
V(I64AtomicAnd32U, 0xfe32, l_il) \
V(I32AtomicOr, 0xfe33, i_ii) \
V(I64AtomicOr, 0xfe34, l_il) \
V(I32AtomicOr8U, 0xfe35, i_ii) \
V(I32AtomicOr16U, 0xfe36, i_ii) \
V(I64AtomicOr8U, 0xfe37, l_il) \
V(I64AtomicOr16U, 0xfe38, l_il) \
V(I64AtomicOr32U, 0xfe39, l_il) \
V(I32AtomicXor, 0xfe3a, i_ii) \
V(I64AtomicXor, 0xfe3b, l_il) \
V(I32AtomicXor8U, 0xfe3c, i_ii) \
V(I32AtomicXor16U, 0xfe3d, i_ii) \
V(I64AtomicXor8U, 0xfe3e, l_il) \
V(I64AtomicXor16U, 0xfe3f, l_il) \
V(I64AtomicXor32U, 0xfe40, l_il) \
V(I32AtomicExchange, 0xfe41, i_ii) \
V(I64AtomicExchange, 0xfe42, l_il) \
V(I32AtomicExchange8U, 0xfe43, i_ii) \
V(I32AtomicExchange16U, 0xfe44, i_ii) \
V(I64AtomicExchange8U, 0xfe45, l_il) \
V(I64AtomicExchange16U, 0xfe46, l_il) \
V(I64AtomicExchange32U, 0xfe47, l_il) \
V(I32AtomicCompareExchange, 0xfe48, i_iii) \
V(I64AtomicCompareExchange, 0xfe49, l_ill) \
V(I32AtomicCompareExchange8U, 0xfe4a, i_iii) \
V(I32AtomicCompareExchange16U, 0xfe4b, i_iii) \
V(I64AtomicCompareExchange8U, 0xfe4c, l_ill) \
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
V(TableFill, 0xfc11, _, "table.fill")
#define FOREACH_NUMERIC_OPCODE(V) \
FOREACH_NUMERIC_OPCODE_WITH_SIG(V) FOREACH_NUMERIC_OPCODE_VARIADIC(V)
#define FOREACH_ATOMIC_OPCODE(V) \
V(AtomicNotify, 0xfe00, i_ii, "memory.atomic.notify") \
V(I32AtomicWait, 0xfe01, i_iil, "memory.atomic.wait32") \
V(I64AtomicWait, 0xfe02, i_ill, "memory.atomic.wait64") \
V(I32AtomicLoad, 0xfe10, i_i, "i32.atomic.load") \
V(I64AtomicLoad, 0xfe11, l_i, "i64.atomic.load") \
V(I32AtomicLoad8U, 0xfe12, i_i, "i32.atomic.load8_u") \
V(I32AtomicLoad16U, 0xfe13, i_i, "i32.atomic.load16_u") \
V(I64AtomicLoad8U, 0xfe14, l_i, "i64.atomic.load8_u") \
V(I64AtomicLoad16U, 0xfe15, l_i, "i64.atomic.load16_u") \
V(I64AtomicLoad32U, 0xfe16, l_i, "i64.atomic.load32_u") \
V(I32AtomicStore, 0xfe17, v_ii, "i32.atomic.store") \
V(I64AtomicStore, 0xfe18, v_il, "i64.atomic.store") \
V(I32AtomicStore8U, 0xfe19, v_ii, "i32.atomic.store8") \
V(I32AtomicStore16U, 0xfe1a, v_ii, "i32.atomic.store16") \
V(I64AtomicStore8U, 0xfe1b, v_il, "i64.atomic.store8") \
V(I64AtomicStore16U, 0xfe1c, v_il, "i64.atomic.store16") \
V(I64AtomicStore32U, 0xfe1d, v_il, "i64.atomic.store32") \
V(I32AtomicAdd, 0xfe1e, i_ii, "i32.atomic.rmw.add") \
V(I64AtomicAdd, 0xfe1f, l_il, "i64.atomic.rmw.add") \
V(I32AtomicAdd8U, 0xfe20, i_ii, "i32.atomic.rmw8.add_u") \
V(I32AtomicAdd16U, 0xfe21, i_ii, "i32.atomic.rmw16.add_u") \
V(I64AtomicAdd8U, 0xfe22, l_il, "i64.atomic.rmw8.add_u") \
V(I64AtomicAdd16U, 0xfe23, l_il, "i64.atomic.rmw16.add_u") \
V(I64AtomicAdd32U, 0xfe24, l_il, "i64.atomic.rmw32.add_u") \
V(I32AtomicSub, 0xfe25, i_ii, "i32.atomic.rmw.sub") \
V(I64AtomicSub, 0xfe26, l_il, "i64.atomic.rmw.sub") \
V(I32AtomicSub8U, 0xfe27, i_ii, "i32.atomic.rmw8.sub_u") \
V(I32AtomicSub16U, 0xfe28, i_ii, "i32.atomic.rmw16.sub_u") \
V(I64AtomicSub8U, 0xfe29, l_il, "i64.atomic.rmw8.sub_u") \
V(I64AtomicSub16U, 0xfe2a, l_il, "i64.atomic.rmw16.sub_u") \
V(I64AtomicSub32U, 0xfe2b, l_il, "i64.atomic.rmw32.sub_u") \
V(I32AtomicAnd, 0xfe2c, i_ii, "i32.atomic.rmw.and") \
V(I64AtomicAnd, 0xfe2d, l_il, "i64.atomic.rmw.and") \
V(I32AtomicAnd8U, 0xfe2e, i_ii, "i32.atomic.rmw8.and_u") \
V(I32AtomicAnd16U, 0xfe2f, i_ii, "i32.atomic.rmw16.and_u") \
V(I64AtomicAnd8U, 0xfe30, l_il, "i64.atomic.rmw8.and_u") \
V(I64AtomicAnd16U, 0xfe31, l_il, "i64.atomic.rmw16.and_u") \
V(I64AtomicAnd32U, 0xfe32, l_il, "i64.atomic.rmw32.and_u") \
V(I32AtomicOr, 0xfe33, i_ii, "i32.atomic.rmw.or") \
V(I64AtomicOr, 0xfe34, l_il, "i64.atomic.rmw.or") \
V(I32AtomicOr8U, 0xfe35, i_ii, "i32.atomic.rmw8.or_u") \
V(I32AtomicOr16U, 0xfe36, i_ii, "i32.atomic.rmw16.or_u") \
V(I64AtomicOr8U, 0xfe37, l_il, "i64.atomic.rmw8.or_u") \
V(I64AtomicOr16U, 0xfe38, l_il, "i64.atomic.rmw16.or_u") \
V(I64AtomicOr32U, 0xfe39, l_il, "i64.atomic.rmw32.or_u") \
V(I32AtomicXor, 0xfe3a, i_ii, "i32.atomic.rmw.xor") \
V(I64AtomicXor, 0xfe3b, l_il, "i64.atomic.rmw.xor") \
V(I32AtomicXor8U, 0xfe3c, i_ii, "i32.atomic.rmw8.xor_u") \
V(I32AtomicXor16U, 0xfe3d, i_ii, "i32.atomic.rmw16.xor_u") \
V(I64AtomicXor8U, 0xfe3e, l_il, "i64.atomic.rmw8.xor_u") \
V(I64AtomicXor16U, 0xfe3f, l_il, "i64.atomic.rmw16.xor_u") \
V(I64AtomicXor32U, 0xfe40, l_il, "i64.atomic.rmw32.xor_u") \
V(I32AtomicExchange, 0xfe41, i_ii, "i32.atomic.rmw.xchg") \
V(I64AtomicExchange, 0xfe42, l_il, "i64.atomic.rmw.xchg") \
V(I32AtomicExchange8U, 0xfe43, i_ii, "i32.atomic.rmw8.xchg_u") \
V(I32AtomicExchange16U, 0xfe44, i_ii, "i32.atomic.rmw16.xchg_u") \
V(I64AtomicExchange8U, 0xfe45, l_il, "i64.atomic.rmw8.xchg_u") \
V(I64AtomicExchange16U, 0xfe46, l_il, "i64.atomic.rmw16.xchg_u") \
V(I64AtomicExchange32U, 0xfe47, l_il, "i64.atomic.rmw32.xchg_u") \
V(I32AtomicCompareExchange, 0xfe48, i_iii, "i32.atomic.rmw.cmpxchg") \
V(I64AtomicCompareExchange, 0xfe49, l_ill, "i64.atomic.rmw.cmpxchg") \
V(I32AtomicCompareExchange8U, 0xfe4a, i_iii, "i32.atomic.rmw8.cmpxchg_u") \
V(I32AtomicCompareExchange16U, 0xfe4b, i_iii, "i32.atomic.rmw16.cmpxchg_u") \
V(I64AtomicCompareExchange8U, 0xfe4c, l_ill, "i64.atomic.rmw8.cmpxchg_u") \
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill, "i64.atomic.rmw16.cmpxchg_u") \
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill, "i64.atomic.rmw32.cmpxchg_u")
#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
/* AtomicFence does not target a particular linear memory. */ \
V(AtomicFence, 0xfe03, v_v)
V(AtomicFence, 0xfe03, v_v, "atomic.fence")
#define FOREACH_GC_OPCODE(V) /* Force 80 columns */ \
V(StructNewWithRtt, 0xfb01, _) \
V(StructNewDefaultWithRtt, 0xfb02, _) \
V(StructGet, 0xfb03, _) \
V(StructGetS, 0xfb04, _) \
V(StructGetU, 0xfb05, _) \
V(StructSet, 0xfb06, _) \
V(StructNew, 0xfb07, _) \
V(StructNewDefault, 0xfb08, _) \
V(ArrayNewWithRtt, 0xfb11, _) \
V(ArrayNewDefaultWithRtt, 0xfb12, _) \
V(ArrayGet, 0xfb13, _) \
V(ArrayGetS, 0xfb14, _) \
V(ArrayGetU, 0xfb15, _) \
V(ArraySet, 0xfb16, _) \
V(ArrayLen, 0xfb17, _) \
V(ArrayCopy, 0xfb18, _) /* not standardized - V8 experimental */ \
V(ArrayInit, 0xfb19, _) \
V(ArrayInitStatic, 0xfb1a, _) \
V(ArrayNew, 0xfb1b, _) \
V(ArrayNewDefault, 0xfb1c, _) \
V(ArrayInitFromData, 0xfb1e, _) \
V(ArrayInitFromDataStatic, 0xfb1d, _) \
V(ArrayInitFromElemStatic, 0xfb1f, _) \
V(I31New, 0xfb20, _) \
V(I31GetS, 0xfb21, _) \
V(I31GetU, 0xfb22, _) \
V(RttCanon, 0xfb30, _) \
V(RefTest, 0xfb40, _) \
V(RefCast, 0xfb41, _) \
V(BrOnCast, 0xfb42, _) \
V(BrOnCastFail, 0xfb43, _) \
V(RefTestStatic, 0xfb44, _) \
V(RefCastStatic, 0xfb45, _) \
V(BrOnCastStatic, 0xfb46, _) \
V(BrOnCastStaticFail, 0xfb47, _) \
V(RefCastNopStatic, 0xfb48, _) \
V(RefIsFunc, 0xfb50, _) \
V(RefIsData, 0xfb51, _) \
V(RefIsI31, 0xfb52, _) \
V(RefIsArray, 0xfb53, _) \
V(RefAsFunc, 0xfb58, _) \
V(RefAsData, 0xfb59, _) \
V(RefAsI31, 0xfb5a, _) \
V(RefAsArray, 0xfb5b, _) \
V(BrOnFunc, 0xfb60, _) \
V(BrOnData, 0xfb61, _) \
V(BrOnI31, 0xfb62, _) \
V(BrOnArray, 0xfb66, _) \
V(BrOnNonFunc, 0xfb63, _) \
V(BrOnNonData, 0xfb64, _) \
V(BrOnNonI31, 0xfb65, _) \
V(BrOnNonArray, 0xfb67, _) \
V(StringNewWtf8, 0xfb80, _) \
V(StringNewWtf16, 0xfb81, _) \
V(StringConst, 0xfb82, _) \
V(StringMeasureUtf8, 0xfb83, _) \
V(StringMeasureWtf8, 0xfb84, _) \
V(StringMeasureWtf16, 0xfb85, _) \
V(StringEncodeWtf8, 0xfb86, _) \
V(StringEncodeWtf16, 0xfb87, _) \
V(StringConcat, 0xfb88, _) \
V(StringEq, 0xfb89, _) \
V(StringIsUSVSequence, 0xfb8a, _) \
V(StringAsWtf8, 0xfb90, _) \
V(StringViewWtf8Advance, 0xfb91, _) \
V(StringViewWtf8Encode, 0xfb92, _) \
V(StringViewWtf8Slice, 0xfb93, _) \
V(StringAsWtf16, 0xfb98, _) \
V(StringViewWtf16Length, 0xfb99, _) \
V(StringViewWtf16GetCodeUnit, 0xfb9a, _) \
V(StringViewWtf16Encode, 0xfb9b, _) \
V(StringViewWtf16Slice, 0xfb9c, _) \
V(StringAsIter, 0xfba0, _) \
V(StringViewIterCur, 0xfba1, _) \
V(StringViewIterAdvance, 0xfba2, _) \
V(StringViewIterRewind, 0xfba3, _) \
V(StringViewIterSlice, 0xfba4, _)
V(StructNewWithRtt, 0xfb01, _, "struct.new_with_rtt") \
V(StructNewDefaultWithRtt, 0xfb02, _, "struct.new_default_with_rtt") \
V(StructGet, 0xfb03, _, "struct.get") \
V(StructGetS, 0xfb04, _, "struct.get_s") \
V(StructGetU, 0xfb05, _, "struct.get_u") \
V(StructSet, 0xfb06, _, "struct.set") \
V(StructNew, 0xfb07, _, "struct.new") \
V(StructNewDefault, 0xfb08, _, "struct.new_default") \
V(ArrayNewWithRtt, 0xfb11, _, "array.new_with_rtt") \
V(ArrayNewDefaultWithRtt, 0xfb12, _, "array.new_default_with_rtt") \
V(ArrayGet, 0xfb13, _, "array.get") \
V(ArrayGetS, 0xfb14, _, "array.get_s") \
V(ArrayGetU, 0xfb15, _, "array.get_u") \
V(ArraySet, 0xfb16, _, "array.set") \
V(ArrayLen, 0xfb17, _, "array.len") \
V(ArrayCopy, 0xfb18, _, \
"array.copy") /* not standardized - V8 experimental */ \
V(ArrayInit, 0xfb19, _, "array.init") \
V(ArrayInitStatic, 0xfb1a, _, "array.init_static") \
V(ArrayNew, 0xfb1b, _, "array.new") \
V(ArrayNewDefault, 0xfb1c, _, "array.new_default") \
V(ArrayInitFromData, 0xfb1e, _, "array.init_from_data") \
V(ArrayInitFromDataStatic, 0xfb1d, _, "array.init_from_data_static") \
V(ArrayInitFromElemStatic, 0xfb1f, _, "array.init_from_elem_static") \
V(I31New, 0xfb20, _, "i31.new") \
V(I31GetS, 0xfb21, _, "i31.get_s") \
V(I31GetU, 0xfb22, _, "i31.get_u") \
V(RttCanon, 0xfb30, _, "rtt.canon") \
V(RefTest, 0xfb40, _, "ref.test") \
V(RefCast, 0xfb41, _, "ref.cast") \
V(BrOnCast, 0xfb42, _, "br_on_cast") \
V(BrOnCastFail, 0xfb43, _, "br_on_cast_fail") \
V(RefTestStatic, 0xfb44, _, "ref.test_static") \
V(RefCastStatic, 0xfb45, _, "ref.cast_static") \
V(BrOnCastStatic, 0xfb46, _, "br_on_cast_static") \
V(BrOnCastStaticFail, 0xfb47, _, "br_on_cast_static_fail") \
V(RefCastNopStatic, 0xfb48, _, "ref.cast_nop_static") \
V(RefIsFunc, 0xfb50, _, "ref.is_func") \
V(RefIsData, 0xfb51, _, "ref.is_data") \
V(RefIsI31, 0xfb52, _, "ref.is_i31") \
V(RefIsArray, 0xfb53, _, "ref.is_array") \
V(RefAsFunc, 0xfb58, _, "ref.as_func") \
V(RefAsData, 0xfb59, _, "ref.as_data") \
V(RefAsI31, 0xfb5a, _, "ref.as_i31") \
V(RefAsArray, 0xfb5b, _, "ref.as_array") \
V(BrOnFunc, 0xfb60, _, "br_on_func") \
V(BrOnData, 0xfb61, _, "br_on_data") \
V(BrOnI31, 0xfb62, _, "br_on_i31") \
V(BrOnArray, 0xfb66, _, "br_on_array") \
V(BrOnNonFunc, 0xfb63, _, "br_on_non_func") \
V(BrOnNonData, 0xfb64, _, "br_on_non_data") \
V(BrOnNonI31, 0xfb65, _, "br_on_non_i31") \
V(BrOnNonArray, 0xfb67, _, "br_on_non_array") \
V(StringNewWtf8, 0xfb80, _, "string.new_wtf8") \
V(StringNewWtf16, 0xfb81, _, "string.new_wtf16") \
V(StringConst, 0xfb82, _, "string.const") \
V(StringMeasureUtf8, 0xfb83, _, "string.measure_utf8") \
V(StringMeasureWtf8, 0xfb84, _, "string.measure_wtf8") \
V(StringMeasureWtf16, 0xfb85, _, "string.measure_wtf16") \
V(StringEncodeWtf8, 0xfb86, _, "string.encode_wtf8") \
V(StringEncodeWtf16, 0xfb87, _, "string.encode_wtf16") \
V(StringConcat, 0xfb88, _, "string.concat") \
V(StringEq, 0xfb89, _, "string.eq") \
V(StringIsUSVSequence, 0xfb8a, _, "string.is_usv_sequence") \
V(StringAsWtf8, 0xfb90, _, "string.as_wtf8") \
V(StringViewWtf8Advance, 0xfb91, _, "stringview_wtf8.advance") \
V(StringViewWtf8Encode, 0xfb92, _, "stringview_wtf8.encode") \
V(StringViewWtf8Slice, 0xfb93, _, "stringview_wtf8.slice") \
V(StringAsWtf16, 0xfb98, _, "string.as_wtf16") \
V(StringViewWtf16Length, 0xfb99, _, "stringview_wtf16.length") \
V(StringViewWtf16GetCodeUnit, 0xfb9a, _, "stringview_wtf16.get_codeunit") \
V(StringViewWtf16Encode, 0xfb9b, _, "stringview_wtf16.encode") \
V(StringViewWtf16Slice, 0xfb9c, _, "stringview_wtf16.slice") \
V(StringAsIter, 0xfba0, _, "string.as_iter") \
V(StringViewIterCur, 0xfba1, _, "stringview_iter.cur") \
V(StringViewIterAdvance, 0xfba2, _, "stringview_iter.advance") \
V(StringViewIterRewind, 0xfba3, _, "stringview_iter.rewind") \
V(StringViewIterSlice, 0xfba4, _, "stringview_iter.slice")
// All opcodes.
#define FOREACH_OPCODE(V) \
......@@ -767,7 +778,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
FOREACH_SIMD_OPCODE(V) \
FOREACH_ATOMIC_OPCODE(V) \
FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
FOREACH_NUMERIC_OPCODE(V, V) \
FOREACH_NUMERIC_OPCODE(V) \
FOREACH_GC_OPCODE(V)
// All signatures.
......
......@@ -2116,7 +2116,7 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
TEST(Build_Wasm_SimpleExprs) {
// Test that the decoder can build a graph for all supported simple expressions.
#define GRAPH_BUILD_TEST(name, opcode, sig) \
#define GRAPH_BUILD_TEST(name, ...) \
TestBuildGraphForSimpleExpression(kExpr##name);
FOREACH_SIMPLE_OPCODE(GRAPH_BUILD_TEST);
......
......@@ -506,7 +506,6 @@ v8_source_set("unittests_sources") {
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-module-sourcemap-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
]
}
......
......@@ -1517,7 +1517,7 @@ TEST_F(FunctionBodyDecoderTest, MacrosInt64) {
TEST_F(FunctionBodyDecoderTest, AllSimpleExpressions) {
// Test all simple expressions which are described by a signature.
#define DECODE_TEST(name, opcode, sig) \
#define DECODE_TEST(name, opcode, sig, ...) \
{ \
const FunctionSig* sig = WasmOpcodes::Signature(kExpr##name); \
if (sig->parameter_count() == 1) { \
......@@ -4805,7 +4805,7 @@ TEST_F(WasmOpcodeLengthTest, MiscMemExpressions) {
}
TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
#define SIMPLE_OPCODE(name, byte, sig) byte,
#define SIMPLE_OPCODE(name, byte, ...) byte,
static constexpr uint8_t kSimpleOpcodes[] = {
FOREACH_SIMPLE_OPCODE(SIMPLE_OPCODE)};
#undef SIMPLE_OPCODE
......@@ -4815,10 +4815,10 @@ TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
}
TEST_F(WasmOpcodeLengthTest, SimdExpressions) {
#define TEST_SIMD(name, opcode, sig) ExpectLengthPrefixed(0, kExpr##name);
#define TEST_SIMD(name, ...) ExpectLengthPrefixed(0, kExpr##name);
FOREACH_SIMD_0_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
#define TEST_SIMD(name, opcode, sig) ExpectLengthPrefixed(1, kExpr##name);
#define TEST_SIMD(name, ...) ExpectLengthPrefixed(1, kExpr##name);
FOREACH_SIMD_1_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
ExpectLengthPrefixed(16, kExprI8x16Shuffle);
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/wasm/wasm-opcodes-inl.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace wasm {
class WasmOpcodeTest : public TestWithZone {
public:
void CheckName(WasmOpcode opcode, const char* expected) {
EXPECT_STREQ(expected, WasmOpcodes::OpcodeName(opcode));
}
void CheckSimdName(uint32_t opcode_after_prefix, const char* expected) {
uint32_t raw = kSimdPrefix << (opcode_after_prefix <= 0xFF ? 8 : 12);
raw |= opcode_after_prefix;
WasmOpcode opcode = static_cast<WasmOpcode>(raw);
CheckName(opcode, expected);
}
};
TEST_F(WasmOpcodeTest, SimdNames) {
// Reference:
// https://webassembly.github.io/spec/core/binary/instructions.html#vector-instructions
CheckSimdName(0, "v128.load");
CheckSimdName(1, "v128.load8x8_s");
CheckSimdName(2, "v128.load8x8_u");
CheckSimdName(3, "v128.load16x4_s");
CheckSimdName(4, "v128.load16x4_u");
CheckSimdName(5, "v128.load32x2_s");
CheckSimdName(6, "v128.load32x2_u");
CheckSimdName(7, "v128.load8_splat");
CheckSimdName(8, "v128.load16_splat");
CheckSimdName(9, "v128.load32_splat");
CheckSimdName(10, "v128.load64_splat");
CheckSimdName(92, "v128.load32_zero");
CheckSimdName(93, "v128.load64_zero");
CheckSimdName(11, "v128.store");
CheckSimdName(84, "v128.load8_lane");
CheckSimdName(85, "v128.load16_lane");
CheckSimdName(86, "v128.load32_lane");
CheckSimdName(87, "v128.load64_lane");
CheckSimdName(88, "v128.store8_lane");
CheckSimdName(89, "v128.store16_lane");
CheckSimdName(90, "v128.store32_lane");
CheckSimdName(91, "v128.store64_lane");
CheckSimdName(12, "v128.const");
CheckSimdName(13, "i8x16.shuffle");
CheckSimdName(21, "i8x16.extract_lane_s");
CheckSimdName(22, "i8x16.extract_lane_u");
CheckSimdName(23, "i8x16.replace_lane");
CheckSimdName(24, "i16x8.extract_lane_s");
CheckSimdName(25, "i16x8.extract_lane_u");
CheckSimdName(26, "i16x8.replace_lane");
CheckSimdName(27, "i32x4.extract_lane");
CheckSimdName(28, "i32x4.replace_lane");
CheckSimdName(29, "i64x2.extract_lane");
CheckSimdName(30, "i64x2.replace_lane");
CheckSimdName(31, "f32x4.extract_lane");
CheckSimdName(32, "f32x4.replace_lane");
CheckSimdName(33, "f64x2.extract_lane");
CheckSimdName(34, "f64x2.replace_lane");
CheckSimdName(14, "i8x16.swizzle");
CheckSimdName(15, "i8x16.splat");
CheckSimdName(16, "i16x8.splat");
CheckSimdName(17, "i32x4.splat");
CheckSimdName(18, "i64x2.splat");
CheckSimdName(19, "f32x4.splat");
CheckSimdName(20, "f64x2.splat");
CheckSimdName(35, "i8x16.eq");
CheckSimdName(36, "i8x16.ne");
CheckSimdName(37, "i8x16.lt_s");
CheckSimdName(38, "i8x16.lt_u");
CheckSimdName(39, "i8x16.gt_s");
CheckSimdName(40, "i8x16.gt_u");
CheckSimdName(41, "i8x16.le_s");
CheckSimdName(42, "i8x16.le_u");
CheckSimdName(43, "i8x16.ge_s");
CheckSimdName(44, "i8x16.ge_u");
CheckSimdName(45, "i16x8.eq");
CheckSimdName(46, "i16x8.ne");
CheckSimdName(47, "i16x8.lt_s");
CheckSimdName(48, "i16x8.lt_u");
CheckSimdName(49, "i16x8.gt_s");
CheckSimdName(50, "i16x8.gt_u");
CheckSimdName(51, "i16x8.le_s");
CheckSimdName(52, "i16x8.le_u");
CheckSimdName(53, "i16x8.ge_s");
CheckSimdName(54, "i16x8.ge_u");
CheckSimdName(55, "i32x4.eq");
CheckSimdName(56, "i32x4.ne");
CheckSimdName(57, "i32x4.lt_s");
CheckSimdName(58, "i32x4.lt_u");
CheckSimdName(59, "i32x4.gt_s");
CheckSimdName(60, "i32x4.gt_u");
CheckSimdName(61, "i32x4.le_s");
CheckSimdName(62, "i32x4.le_u");
CheckSimdName(63, "i32x4.ge_s");
CheckSimdName(64, "i32x4.ge_u");
CheckSimdName(214, "i64x2.eq");
CheckSimdName(215, "i64x2.ne");
CheckSimdName(216, "i64x2.lt_s");
CheckSimdName(217, "i64x2.gt_s");
CheckSimdName(218, "i64x2.le_s");
CheckSimdName(219, "i64x2.ge_s");
CheckSimdName(65, "f32x4.eq");
CheckSimdName(66, "f32x4.ne");
CheckSimdName(67, "f32x4.lt");
CheckSimdName(68, "f32x4.gt");
CheckSimdName(69, "f32x4.le");
CheckSimdName(70, "f32x4.ge");
CheckSimdName(71, "f64x2.eq");
CheckSimdName(72, "f64x2.ne");
CheckSimdName(73, "f64x2.lt");
CheckSimdName(74, "f64x2.gt");
CheckSimdName(75, "f64x2.le");
CheckSimdName(76, "f64x2.ge");
CheckSimdName(77, "v128.not");
CheckSimdName(78, "v128.and");
CheckSimdName(79, "v128.andnot");
CheckSimdName(80, "v128.or");
CheckSimdName(81, "v128.xor");
CheckSimdName(82, "v128.bitselect");
CheckSimdName(83, "v128.any_true");
CheckSimdName(96, "i8x16.abs");
CheckSimdName(97, "i8x16.neg");
CheckSimdName(98, "i8x16.popcnt");
CheckSimdName(99, "i8x16.all_true");
CheckSimdName(100, "i8x16.bitmask");
CheckSimdName(101, "i8x16.narrow_i16x8_s");
CheckSimdName(102, "i8x16.narrow_i16x8_u");
CheckSimdName(107, "i8x16.shl");
CheckSimdName(108, "i8x16.shr_s");
CheckSimdName(109, "i8x16.shr_u");
CheckSimdName(110, "i8x16.add");
CheckSimdName(111, "i8x16.add_sat_s");
CheckSimdName(112, "i8x16.add_sat_u");
CheckSimdName(113, "i8x16.sub");
CheckSimdName(114, "i8x16.sub_sat_s");
CheckSimdName(115, "i8x16.sub_sat_u");
CheckSimdName(118, "i8x16.min_s");
CheckSimdName(119, "i8x16.min_u");
CheckSimdName(120, "i8x16.max_s");
CheckSimdName(121, "i8x16.max_u");
CheckSimdName(123, "i8x16.avgr_u");
CheckSimdName(124, "i16x8.extadd_pairwise_i8x16_s");
CheckSimdName(125, "i16x8.extadd_pairwise_i8x16_u");
CheckSimdName(128, "i16x8.abs");
CheckSimdName(129, "i16x8.neg");
CheckSimdName(130, "i16x8.q15mulr_sat_s");
CheckSimdName(131, "i16x8.all_true");
CheckSimdName(132, "i16x8.bitmask");
CheckSimdName(133, "i16x8.narrow_i32x4_s");
CheckSimdName(134, "i16x8.narrow_i32x4_u");
CheckSimdName(135, "i16x8.extend_low_i8x16_s");
CheckSimdName(136, "i16x8.extend_high_i8x16_s");
CheckSimdName(137, "i16x8.extend_low_i8x16_u");
CheckSimdName(138, "i16x8.extend_high_i8x16_u");
CheckSimdName(139, "i16x8.shl");
CheckSimdName(140, "i16x8.shr_s");
CheckSimdName(141, "i16x8.shr_u");
CheckSimdName(142, "i16x8.add");
CheckSimdName(143, "i16x8.add_sat_s");
CheckSimdName(144, "i16x8.add_sat_u");
CheckSimdName(145, "i16x8.sub");
CheckSimdName(146, "i16x8.sub_sat_s");
CheckSimdName(147, "i16x8.sub_sat_u");
CheckSimdName(149, "i16x8.mul");
CheckSimdName(150, "i16x8.min_s");
CheckSimdName(151, "i16x8.min_u");
CheckSimdName(152, "i16x8.max_s");
CheckSimdName(153, "i16x8.max_u");
CheckSimdName(155, "i16x8.avgr_u");
CheckSimdName(156, "i16x8.extmul_low_i8x16_s");
CheckSimdName(157, "i16x8.extmul_high_i8x16_s");
CheckSimdName(158, "i16x8.extmul_low_i8x16_u");
CheckSimdName(159, "i16x8.extmul_high_i8x16_u");
CheckSimdName(126, "i32x4.extadd_pairwise_i16x8_s");
CheckSimdName(127, "i32x4.extadd_pairwise_i16x8_u");
CheckSimdName(160, "i32x4.abs");
CheckSimdName(161, "i32x4.neg");
CheckSimdName(163, "i32x4.all_true");
CheckSimdName(164, "i32x4.bitmask");
CheckSimdName(167, "i32x4.extend_low_i16x8_s");
CheckSimdName(168, "i32x4.extend_high_i16x8_s");
CheckSimdName(169, "i32x4.extend_low_i16x8_u");
CheckSimdName(170, "i32x4.extend_high_i16x8_u");
CheckSimdName(171, "i32x4.shl");
CheckSimdName(172, "i32x4.shr_s");
CheckSimdName(173, "i32x4.shr_u");
CheckSimdName(174, "i32x4.add");
CheckSimdName(177, "i32x4.sub");
CheckSimdName(181, "i32x4.mul");
CheckSimdName(182, "i32x4.min_s");
CheckSimdName(183, "i32x4.min_u");
CheckSimdName(184, "i32x4.max_s");
CheckSimdName(185, "i32x4.max_u");
CheckSimdName(186, "i32x4.dot_i16x8_s");
CheckSimdName(188, "i32x4.extmul_low_i16x8_s");
CheckSimdName(189, "i32x4.extmul_high_i16x8_s");
CheckSimdName(190, "i32x4.extmul_low_i16x8_u");
CheckSimdName(191, "i32x4.extmul_high_i16x8_u");
CheckSimdName(192, "i64x2.abs");
CheckSimdName(193, "i64x2.neg");
CheckSimdName(195, "i64x2.all_true");
CheckSimdName(196, "i64x2.bitmask");
CheckSimdName(199, "i64x2.extend_low_i32x4_s");
CheckSimdName(200, "i64x2.extend_high_i32x4_s");
CheckSimdName(201, "i64x2.extend_low_i32x4_u");
CheckSimdName(202, "i64x2.extend_high_i32x4_u");
CheckSimdName(203, "i64x2.shl");
CheckSimdName(204, "i64x2.shr_s");
CheckSimdName(205, "i64x2.shr_u");
CheckSimdName(206, "i64x2.add");
CheckSimdName(209, "i64x2.sub");
CheckSimdName(213, "i64x2.mul");
CheckSimdName(220, "i64x2.extmul_low_i32x4_s");
CheckSimdName(221, "i64x2.extmul_high_i32x4_s");
CheckSimdName(222, "i64x2.extmul_low_i32x4_u");
CheckSimdName(223, "i64x2.extmul_high_i32x4_u");
CheckSimdName(103, "f32x4.ceil");
CheckSimdName(104, "f32x4.floor");
CheckSimdName(105, "f32x4.trunc");
CheckSimdName(106, "f32x4.nearest");
CheckSimdName(224, "f32x4.abs");
CheckSimdName(225, "f32x4.neg");
CheckSimdName(227, "f32x4.sqrt");
CheckSimdName(228, "f32x4.add");
CheckSimdName(229, "f32x4.sub");
CheckSimdName(230, "f32x4.mul");
CheckSimdName(231, "f32x4.div");
CheckSimdName(232, "f32x4.min");
CheckSimdName(233, "f32x4.max");
CheckSimdName(234, "f32x4.pmin");
CheckSimdName(235, "f32x4.pmax");
CheckSimdName(116, "f64x2.ceil");
CheckSimdName(117, "f64x2.floor");
CheckSimdName(122, "f64x2.trunc");
CheckSimdName(148, "f64x2.nearest");
CheckSimdName(236, "f64x2.abs");
CheckSimdName(237, "f64x2.neg");
CheckSimdName(239, "f64x2.sqrt");
CheckSimdName(240, "f64x2.add");
CheckSimdName(241, "f64x2.sub");
CheckSimdName(242, "f64x2.mul");
CheckSimdName(243, "f64x2.div");
CheckSimdName(244, "f64x2.min");
CheckSimdName(245, "f64x2.max");
CheckSimdName(246, "f64x2.pmin");
CheckSimdName(247, "f64x2.pmax");
CheckSimdName(248, "i32x4.trunc_sat_f32x4_s");
CheckSimdName(249, "i32x4.trunc_sat_f32x4_u");
CheckSimdName(250, "f32x4.convert_i32x4_s");
CheckSimdName(251, "f32x4.convert_i32x4_u");
CheckSimdName(252, "i32x4.trunc_sat_f64x2_s_zero");
CheckSimdName(253, "i32x4.trunc_sat_f64x2_u_zero");
CheckSimdName(254, "f64x2.convert_low_i32x4_s");
CheckSimdName(255, "f64x2.convert_low_i32x4_u");
CheckSimdName(94, "f32x4.demote_f64x2_zero");
CheckSimdName(95, "f64x2.promote_low_f32x4");
}
TEST_F(WasmOpcodeTest, AtomicNames) {
// Reference:
// https://webassembly.github.io/threads/core/text/instructions.html#atomic-memory-instructions
CheckName(kExprAtomicNotify, "memory.atomic.notify");
CheckName(kExprI32AtomicWait, "memory.atomic.wait32");
CheckName(kExprI64AtomicWait, "memory.atomic.wait64");
CheckName(kExprI32AtomicLoad, "i32.atomic.load");
CheckName(kExprI64AtomicLoad, "i64.atomic.load");
CheckName(kExprI32AtomicLoad8U, "i32.atomic.load8_u");
CheckName(kExprI32AtomicLoad16U, "i32.atomic.load16_u");
CheckName(kExprI64AtomicLoad8U, "i64.atomic.load8_u");
CheckName(kExprI64AtomicLoad16U, "i64.atomic.load16_u");
CheckName(kExprI64AtomicLoad32U, "i64.atomic.load32_u");
CheckName(kExprI32AtomicStore, "i32.atomic.store");
CheckName(kExprI64AtomicStore, "i64.atomic.store");
CheckName(kExprI32AtomicStore8U, "i32.atomic.store8");
CheckName(kExprI32AtomicStore16U, "i32.atomic.store16");
CheckName(kExprI64AtomicStore8U, "i64.atomic.store8");
CheckName(kExprI64AtomicStore16U, "i64.atomic.store16");
CheckName(kExprI64AtomicStore32U, "i64.atomic.store32");
CheckName(kExprI32AtomicAdd, "i32.atomic.rmw.add");
CheckName(kExprI64AtomicAdd, "i64.atomic.rmw.add");
CheckName(kExprI32AtomicAdd8U, "i32.atomic.rmw8.add_u");
CheckName(kExprI32AtomicAdd16U, "i32.atomic.rmw16.add_u");
CheckName(kExprI64AtomicAdd8U, "i64.atomic.rmw8.add_u");
CheckName(kExprI64AtomicAdd16U, "i64.atomic.rmw16.add_u");
CheckName(kExprI64AtomicAdd32U, "i64.atomic.rmw32.add_u");
CheckName(kExprI32AtomicSub, "i32.atomic.rmw.sub");
CheckName(kExprI64AtomicSub, "i64.atomic.rmw.sub");
CheckName(kExprI32AtomicSub8U, "i32.atomic.rmw8.sub_u");
CheckName(kExprI32AtomicSub16U, "i32.atomic.rmw16.sub_u");
CheckName(kExprI64AtomicSub8U, "i64.atomic.rmw8.sub_u");
CheckName(kExprI64AtomicSub16U, "i64.atomic.rmw16.sub_u");
CheckName(kExprI64AtomicSub32U, "i64.atomic.rmw32.sub_u");
CheckName(kExprI32AtomicAnd, "i32.atomic.rmw.and");
CheckName(kExprI64AtomicAnd, "i64.atomic.rmw.and");
CheckName(kExprI32AtomicAnd8U, "i32.atomic.rmw8.and_u");
CheckName(kExprI32AtomicAnd16U, "i32.atomic.rmw16.and_u");
CheckName(kExprI64AtomicAnd8U, "i64.atomic.rmw8.and_u");
CheckName(kExprI64AtomicAnd16U, "i64.atomic.rmw16.and_u");
CheckName(kExprI64AtomicAnd32U, "i64.atomic.rmw32.and_u");
CheckName(kExprI32AtomicOr, "i32.atomic.rmw.or");
CheckName(kExprI64AtomicOr, "i64.atomic.rmw.or");
CheckName(kExprI32AtomicOr8U, "i32.atomic.rmw8.or_u");
CheckName(kExprI32AtomicOr16U, "i32.atomic.rmw16.or_u");
CheckName(kExprI64AtomicOr8U, "i64.atomic.rmw8.or_u");
CheckName(kExprI64AtomicOr16U, "i64.atomic.rmw16.or_u");
CheckName(kExprI64AtomicOr32U, "i64.atomic.rmw32.or_u");
CheckName(kExprI32AtomicXor, "i32.atomic.rmw.xor");
CheckName(kExprI64AtomicXor, "i64.atomic.rmw.xor");
CheckName(kExprI32AtomicXor8U, "i32.atomic.rmw8.xor_u");
CheckName(kExprI32AtomicXor16U, "i32.atomic.rmw16.xor_u");
CheckName(kExprI64AtomicXor8U, "i64.atomic.rmw8.xor_u");
CheckName(kExprI64AtomicXor16U, "i64.atomic.rmw16.xor_u");
CheckName(kExprI64AtomicXor32U, "i64.atomic.rmw32.xor_u");
CheckName(kExprI32AtomicExchange, "i32.atomic.rmw.xchg");
CheckName(kExprI64AtomicExchange, "i64.atomic.rmw.xchg");
CheckName(kExprI32AtomicExchange8U, "i32.atomic.rmw8.xchg_u");
CheckName(kExprI32AtomicExchange16U, "i32.atomic.rmw16.xchg_u");
CheckName(kExprI64AtomicExchange8U, "i64.atomic.rmw8.xchg_u");
CheckName(kExprI64AtomicExchange16U, "i64.atomic.rmw16.xchg_u");
CheckName(kExprI64AtomicExchange32U, "i64.atomic.rmw32.xchg_u");
CheckName(kExprI32AtomicCompareExchange, "i32.atomic.rmw.cmpxchg");
CheckName(kExprI64AtomicCompareExchange, "i64.atomic.rmw.cmpxchg");
CheckName(kExprI32AtomicCompareExchange8U, "i32.atomic.rmw8.cmpxchg_u");
CheckName(kExprI32AtomicCompareExchange16U, "i32.atomic.rmw16.cmpxchg_u");
CheckName(kExprI64AtomicCompareExchange8U, "i64.atomic.rmw8.cmpxchg_u");
CheckName(kExprI64AtomicCompareExchange16U, "i64.atomic.rmw16.cmpxchg_u");
CheckName(kExprI64AtomicCompareExchange32U, "i64.atomic.rmw32.cmpxchg_u");
// https://github.com/WebAssembly/threads/blob/main/proposals/threads/Overview.md#fence-operator
CheckName(kExprAtomicFence, "atomic.fence");
}
} // namespace wasm
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment