Commit 848137c4 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd] Move load/store lane out of post-mvp

Define a new macro list, since this has 1 immediate operand (lane index)
compared to other SIMD load/stores.

Also remove all the ifdef guards.

Bug: v8:10975
Change-Id: Ib0a1f7bb6c4bdf83d81a65b4e02199b792d13837
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2645568Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72377}
parent 97a935ee
......@@ -2782,14 +2782,6 @@ void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64
// TODO(v8:10975): Prototyping load lane and store lane.
void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
// && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
!V8_TARGET_ARCH_ARM
// TODO(v8:10983) Prototyping sign select.
......
......@@ -1839,7 +1839,6 @@ class WasmDecoder : public Decoder {
return length;
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
return length + 1;
// clang-format on
FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
case kExprPrefetchT:
case kExprPrefetchNT: {
......@@ -1848,20 +1847,14 @@ class WasmDecoder : public Decoder {
kConservativelyAssumeMemory64);
return length + imm.length;
}
case kExprS128Load8Lane:
case kExprS128Load16Lane:
case kExprS128Load32Lane:
case kExprS128Load64Lane:
case kExprS128Store8Lane:
case kExprS128Store16Lane:
case kExprS128Store32Lane:
case kExprS128Store64Lane: {
MemoryAccessImmediate<validate> imm(decoder, pc + length,
UINT32_MAX,
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE) {
MemoryAccessImmediate<validate> imm(
decoder, pc + length, UINT32_MAX,
kConservativelyAssumeMemory64);
// 1 more byte for lane index immediate.
return length + imm.length + 1;
}
// clang-format on
// Shuffles require a byte per lane, or 16 immediate bytes.
case kExprS128Const:
case kExprI8x16Shuffle:
......
......@@ -590,6 +590,7 @@ constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(CASE)
FOREACH_SIMD_POST_MVP_MEM_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
......
......@@ -295,6 +295,16 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(S128Load32Zero, 0xfdfc, s_i) \
V(S128Load64Zero, 0xfdfd, s_i)
#define FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
V(S128Load8Lane, 0xfd58, s_is) \
V(S128Load16Lane, 0xfd59, s_is) \
V(S128Load32Lane, 0xfd5a, s_is) \
V(S128Load64Lane, 0xfd5b, s_is) \
V(S128Store8Lane, 0xfd5c, v_is) \
V(S128Store16Lane, 0xfd5d, v_is) \
V(S128Store32Lane, 0xfd5e, v_is) \
V(S128Store64Lane, 0xfd5f, v_is)
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _)
#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(I8x16Shuffle, 0xfd0d, s_ss)
......@@ -484,14 +494,6 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(F64x2NearestInt, 0xfddf, s_s)
#define FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
V(S128Load8Lane, 0xfd58, s_is) \
V(S128Load16Lane, 0xfd59, s_is) \
V(S128Load32Lane, 0xfd5a, s_is) \
V(S128Load64Lane, 0xfd5b, s_is) \
V(S128Store8Lane, 0xfd5c, v_is) \
V(S128Store16Lane, 0xfd5d, v_is) \
V(S128Store32Lane, 0xfd5e, v_is) \
V(S128Store64Lane, 0xfd5f, v_is) \
V(PrefetchT, 0xfdc5, v_i) \
V(PrefetchNT, 0xfdc6, v_i)
......@@ -558,6 +560,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
FOREACH_SIMD_MEM_OPCODE(V) \
FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
FOREACH_SIMD_CONST_OPCODE(V)
......
......@@ -4175,13 +4175,9 @@ WASM_SIMD_TEST(S128Load64Zero) {
RunLoadZeroTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Zero);
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_MIPS64
// TODO(v8:10975): Prototyping load lane and store lane.
template <typename T>
void RunLoadLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode load_op, WasmOpcode splat_op) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmOpcode const_op =
splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const;
......@@ -4280,8 +4276,6 @@ WASM_SIMD_TEST_NO_LOWERING(S128Load64Lane) {
template <typename T>
void RunStoreLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode store_op, WasmOpcode splat_op) {
FLAG_SCOPE(wasm_simd_post_mvp);
constexpr int lanes = kSimd128Size / sizeof(T);
constexpr int mem_index = 16; // Store to mem index 16 (bytes).
constexpr int splat_value = 33;
......@@ -4372,9 +4366,6 @@ WASM_SIMD_TEST_NO_LOWERING(S128Store64Lane) {
kExprI64x2Splat);
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
// V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_MIPS64
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AnyTrue) { \
FLAG_SCOPE(wasm_simd_post_mvp); \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment