Commit 2338a2fd authored by Aseem Garg's avatar Aseem Garg Committed by Commit Bot

[wasm] Add simd mem and shift ops to interpreter

R=clemensh@chromium.org
BUG=v8:6020

Change-Id: I5d95e4c23240ae4e174d896d31e1b9b1d89d9418
Reviewed-on: https://chromium-review.googlesource.com/1096493Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Aseem Garg <aseemgarg@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53717}
parent 9b0b3ab0
...@@ -354,8 +354,22 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices, ...@@ -354,8 +354,22 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
} }
} }
void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node, void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) {
const Operator* load_op, SimdType type) { MachineRepresentation rep = LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
switch (node->opcode()) {
case IrOpcode::kLoad:
load_op = machine()->Load(MachineTypeFrom(type));
break;
case IrOpcode::kUnalignedLoad:
load_op = machine()->UnalignedLoad(MachineTypeFrom(type));
break;
case IrOpcode::kProtectedLoad:
load_op = machine()->ProtectedLoad(MachineTypeFrom(type));
break;
default:
UNREACHABLE();
}
if (rep == MachineRepresentation::kSimd128) { if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
...@@ -387,9 +401,38 @@ void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node, ...@@ -387,9 +401,38 @@ void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
} }
} }
void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node, void SimdScalarLowering::LowerStoreOp(Node* node) {
const Operator* store_op, // For store operation, use replacement type of its input instead of the
SimdType rep_type) { // one of its effected node.
DCHECK_LT(2, node->InputCount());
SimdType rep_type = ReplacementType(node->InputAt(2));
replacements_[node->id()].type = rep_type;
const Operator* store_op;
MachineRepresentation rep;
switch (node->opcode()) {
case IrOpcode::kStore: {
rep = StoreRepresentationOf(node->op()).representation();
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
store_op = machine()->Store(StoreRepresentation(
MachineTypeFrom(rep_type).representation(), write_barrier_kind));
break;
}
case IrOpcode::kUnalignedStore: {
rep = UnalignedStoreRepresentationOf(node->op());
store_op =
machine()->UnalignedStore(MachineTypeFrom(rep_type).representation());
break;
}
case IrOpcode::kProtectedStore: {
rep = StoreRepresentationOf(node->op()).representation();
store_op =
machine()->ProtectedStore(MachineTypeFrom(rep_type).representation());
break;
}
default:
UNREACHABLE();
}
if (rep == MachineRepresentation::kSimd128) { if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
...@@ -899,52 +942,16 @@ void SimdScalarLowering::LowerNode(Node* node) { ...@@ -899,52 +942,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
} }
break; break;
} }
case IrOpcode::kLoad: { case IrOpcode::kLoad:
MachineRepresentation rep = case IrOpcode::kUnalignedLoad:
LoadRepresentationOf(node->op()).representation(); case IrOpcode::kProtectedLoad: {
const Operator* load_op; LowerLoadOp(node, rep_type);
load_op = machine()->Load(MachineTypeFrom(rep_type));
LowerLoadOp(rep, node, load_op, rep_type);
break;
}
case IrOpcode::kUnalignedLoad: {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
load_op = machine()->UnalignedLoad(MachineTypeFrom(rep_type));
LowerLoadOp(rep, node, load_op, rep_type);
break; break;
} }
case IrOpcode::kStore: { case IrOpcode::kStore:
// For store operation, use replacement type of its input instead of the case IrOpcode::kUnalignedStore:
// one of its effected node. case IrOpcode::kProtectedStore: {
DCHECK_LT(2, node->InputCount()); LowerStoreOp(node);
SimdType input_rep_type = ReplacementType(node->InputAt(2));
if (input_rep_type != rep_type)
replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep =
StoreRepresentationOf(node->op()).representation();
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
store_op = machine()->Store(
StoreRepresentation(MachineTypeFrom(input_rep_type).representation(),
write_barrier_kind));
LowerStoreOp(rep, node, store_op, input_rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
// For store operation, use replacement type of its input instead of the
// one of its effected node.
DCHECK_LT(2, node->InputCount());
SimdType input_rep_type = ReplacementType(node->InputAt(2));
if (input_rep_type != rep_type)
replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
store_op = machine()->UnalignedStore(
MachineTypeFrom(input_rep_type).representation());
LowerStoreOp(rep, node, store_op, input_rep_type);
break; break;
} }
case IrOpcode::kReturn: { case IrOpcode::kReturn: {
......
...@@ -73,10 +73,8 @@ class SimdScalarLowering { ...@@ -73,10 +73,8 @@ class SimdScalarLowering {
void PreparePhiReplacement(Node* phi); void PreparePhiReplacement(Node* phi);
void SetLoweredType(Node* node, Node* output); void SetLoweredType(Node* node, Node* output);
void GetIndexNodes(Node* index, Node** new_indices, SimdType type); void GetIndexNodes(Node* index, Node** new_indices, SimdType type);
void LowerLoadOp(MachineRepresentation rep, Node* node, void LowerLoadOp(Node* node, SimdType type);
const Operator* load_op, SimdType type); void LowerStoreOp(Node* node);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op, void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
bool not_horizontal = true); bool not_horizontal = true);
void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op, void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
......
...@@ -1138,11 +1138,6 @@ class WasmDecoder : public Decoder { ...@@ -1138,11 +1138,6 @@ class WasmDecoder : public Decoder {
case kSimdPrefix: { case kSimdPrefix: {
opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1)); opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
switch (opcode) { switch (opcode) {
case kExprI32AtomicStore:
case kExprI32AtomicStore8U:
case kExprI32AtomicStore16U:
case kExprS128StoreMem:
return {2, 0};
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(DECLARE_OPCODE_CASE) FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(DECLARE_OPCODE_CASE)
return {1, 1}; return {1, 1};
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(DECLARE_OPCODE_CASE) FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(DECLARE_OPCODE_CASE)
......
...@@ -1815,6 +1815,39 @@ class ThreadImpl { ...@@ -1815,6 +1815,39 @@ class ThreadImpl {
REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t) REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t) REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
#undef REPLACE_LANE_CASE #undef REPLACE_LANE_CASE
case kExprS128LoadMem:
return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
MachineRepresentation::kSimd128);
case kExprS128StoreMem:
return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
MachineRepresentation::kSimd128);
#define SHIFT_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
++len; \
WasmValue v = Pop(); \
stype s = v.to_s128().to_##name(); \
stype res; \
for (size_t i = 0; i < count; ++i) { \
auto a = s.val[i]; \
res.val[i] = expr; \
} \
Push(WasmValue(Simd128(res))); \
return true; \
}
SHIFT_CASE(I32x4Shl, i32x4, int4, 4, a << imm.shift)
SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
static_cast<uint32_t>(a) >> imm.shift)
SHIFT_CASE(I16x8Shl, i16x8, int8, 8, a << imm.shift)
SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> imm.shift)
SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
static_cast<uint16_t>(a) >> imm.shift)
SHIFT_CASE(I8x16Shl, i8x16, int16, 16, a << imm.shift)
SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> imm.shift)
SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
static_cast<uint8_t>(a) >> imm.shift)
#undef SHIFT_CASE
default: default:
return false; return false;
} }
......
...@@ -428,7 +428,8 @@ struct GetAsmJsOpcodeSigIndex { ...@@ -428,7 +428,8 @@ struct GetAsmJsOpcodeSigIndex {
struct GetSimdOpcodeSigIndex { struct GetSimdOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const { constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig: #define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) kSigEnum_None; return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
kSigEnum_None;
#undef CASE #undef CASE
} }
}; };
......
...@@ -545,6 +545,7 @@ using WasmName = Vector<const char>; ...@@ -545,6 +545,7 @@ using WasmName = Vector<const char>;
V(s_i, kWasmS128, kWasmI32) \ V(s_i, kWasmS128, kWasmI32) \
V(s_si, kWasmS128, kWasmS128, kWasmI32) \ V(s_si, kWasmS128, kWasmS128, kWasmI32) \
V(i_s, kWasmI32, kWasmS128) \ V(i_s, kWasmI32, kWasmS128) \
V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128) V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128)
#define FOREACH_PREFIX(V) \ #define FOREACH_PREFIX(V) \
......
...@@ -1090,17 +1090,17 @@ void RunI32x4ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd, ...@@ -1090,17 +1090,17 @@ void RunI32x4ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); } FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
} }
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I32x4Shl) { WASM_SIMD_TEST(I32x4Shl) {
RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4Shl, RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4Shl,
LogicalShiftLeft, 1); LogicalShiftLeft, 1);
} }
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I32x4ShrS) { WASM_SIMD_TEST(I32x4ShrS) {
RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrS, RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrS,
ArithmeticShiftRight, 1); ArithmeticShiftRight, 1);
} }
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I32x4ShrU) { WASM_SIMD_TEST(I32x4ShrU) {
RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrU, RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrU,
LogicalShiftRight, 1); LogicalShiftRight, 1);
} }
...@@ -1350,17 +1350,17 @@ void RunI16x8ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd, ...@@ -1350,17 +1350,17 @@ void RunI16x8ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); } FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
} }
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I16x8Shl) { WASM_SIMD_TEST(I16x8Shl) {
RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8Shl, RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8Shl,
LogicalShiftLeft, 1); LogicalShiftLeft, 1);
} }
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I16x8ShrS) { WASM_SIMD_TEST(I16x8ShrS) {
RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrS, RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrS,
ArithmeticShiftRight, 1); ArithmeticShiftRight, 1);
} }
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I16x8ShrU) { WASM_SIMD_TEST(I16x8ShrU) {
RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrU, RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrU,
LogicalShiftRight, 1); LogicalShiftRight, 1);
} }
...@@ -1569,17 +1569,17 @@ void RunI8x16ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd, ...@@ -1569,17 +1569,17 @@ void RunI8x16ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \ #if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32 V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I8x16Shl) { WASM_SIMD_TEST(I8x16Shl) {
RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16Shl, RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16Shl,
LogicalShiftLeft, 1); LogicalShiftLeft, 1);
} }
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I8x16ShrS) { WASM_SIMD_TEST(I8x16ShrS) {
RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrS, RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrS,
ArithmeticShiftRight, 1); ArithmeticShiftRight, 1);
} }
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I8x16ShrU) { WASM_SIMD_TEST(I8x16ShrU) {
RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrU, RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrU,
LogicalShiftRight, 1); LogicalShiftRight, 1);
} }
...@@ -2280,7 +2280,7 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) { ...@@ -2280,7 +2280,7 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 65.0f); CHECK_EQ(GetScalar(global, 3), 65.0f);
} }
WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) { WASM_SIMD_TEST(SimdLoadStoreLoad) {
WasmRunner<int32_t> r(execution_mode, lower_simd); WasmRunner<int32_t> r(execution_mode, lower_simd);
int32_t* memory = int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t)); r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment