Commit 76cc2ad0 authored by Sathya Gunasekaran's avatar Sathya Gunasekaran Committed by Commit Bot

Revert "[wasm] Add simd mem and shift ops to interpreter"

This reverts commit 2338a2fd.

Reason for revert: broke mips

see https://ci.chromium.org/buildbot/client.v8.ports/V8%20Mips%20-%20builder/17573

Original change's description:
> [wasm] Add simd mem and shift ops to interpreter
> 
> R=​clemensh@chromium.org
> BUG=v8:6020
> 
> Change-Id: I5d95e4c23240ae4e174d896d31e1b9b1d89d9418
> Reviewed-on: https://chromium-review.googlesource.com/1096493
> Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
> Commit-Queue: Aseem Garg <aseemgarg@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#53717}

TBR=aseemgarg@chromium.org,clemensh@chromium.org

Change-Id: I8e5b2e9a9c1c09d035335ba6681bbe3e1ce5abfb
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:6020
Reviewed-on: https://chromium-review.googlesource.com/1099776Reviewed-by: 's avatarSathya Gunasekaran <gsathya@chromium.org>
Commit-Queue: Sathya Gunasekaran <gsathya@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53718}
parent 2338a2fd
......@@ -354,22 +354,8 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
}
}
void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) {
MachineRepresentation rep = LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
switch (node->opcode()) {
case IrOpcode::kLoad:
load_op = machine()->Load(MachineTypeFrom(type));
break;
case IrOpcode::kUnalignedLoad:
load_op = machine()->UnalignedLoad(MachineTypeFrom(type));
break;
case IrOpcode::kProtectedLoad:
load_op = machine()->ProtectedLoad(MachineTypeFrom(type));
break;
default:
UNREACHABLE();
}
void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
const Operator* load_op, SimdType type) {
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
......@@ -401,38 +387,9 @@ void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) {
}
}
void SimdScalarLowering::LowerStoreOp(Node* node) {
// For store operation, use replacement type of its input instead of the
// one of its effected node.
DCHECK_LT(2, node->InputCount());
SimdType rep_type = ReplacementType(node->InputAt(2));
replacements_[node->id()].type = rep_type;
const Operator* store_op;
MachineRepresentation rep;
switch (node->opcode()) {
case IrOpcode::kStore: {
rep = StoreRepresentationOf(node->op()).representation();
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
store_op = machine()->Store(StoreRepresentation(
MachineTypeFrom(rep_type).representation(), write_barrier_kind));
break;
}
case IrOpcode::kUnalignedStore: {
rep = UnalignedStoreRepresentationOf(node->op());
store_op =
machine()->UnalignedStore(MachineTypeFrom(rep_type).representation());
break;
}
case IrOpcode::kProtectedStore: {
rep = StoreRepresentationOf(node->op()).representation();
store_op =
machine()->ProtectedStore(MachineTypeFrom(rep_type).representation());
break;
}
default:
UNREACHABLE();
}
void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op,
SimdType rep_type) {
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
......@@ -942,16 +899,52 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
case IrOpcode::kLoad:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kProtectedLoad: {
LowerLoadOp(node, rep_type);
case IrOpcode::kLoad: {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
load_op = machine()->Load(MachineTypeFrom(rep_type));
LowerLoadOp(rep, node, load_op, rep_type);
break;
}
case IrOpcode::kUnalignedLoad: {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
load_op = machine()->UnalignedLoad(MachineTypeFrom(rep_type));
LowerLoadOp(rep, node, load_op, rep_type);
break;
}
case IrOpcode::kStore:
case IrOpcode::kUnalignedStore:
case IrOpcode::kProtectedStore: {
LowerStoreOp(node);
case IrOpcode::kStore: {
// For store operation, use replacement type of its input instead of the
// one of its effected node.
DCHECK_LT(2, node->InputCount());
SimdType input_rep_type = ReplacementType(node->InputAt(2));
if (input_rep_type != rep_type)
replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep =
StoreRepresentationOf(node->op()).representation();
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
store_op = machine()->Store(
StoreRepresentation(MachineTypeFrom(input_rep_type).representation(),
write_barrier_kind));
LowerStoreOp(rep, node, store_op, input_rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
// For store operation, use replacement type of its input instead of the
// one of its effected node.
DCHECK_LT(2, node->InputCount());
SimdType input_rep_type = ReplacementType(node->InputAt(2));
if (input_rep_type != rep_type)
replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
store_op = machine()->UnalignedStore(
MachineTypeFrom(input_rep_type).representation());
LowerStoreOp(rep, node, store_op, input_rep_type);
break;
}
case IrOpcode::kReturn: {
......
......@@ -73,8 +73,10 @@ class SimdScalarLowering {
void PreparePhiReplacement(Node* phi);
void SetLoweredType(Node* node, Node* output);
void GetIndexNodes(Node* index, Node** new_indices, SimdType type);
void LowerLoadOp(Node* node, SimdType type);
void LowerStoreOp(Node* node);
void LowerLoadOp(MachineRepresentation rep, Node* node,
const Operator* load_op, SimdType type);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
bool not_horizontal = true);
void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
......
......@@ -1138,6 +1138,11 @@ class WasmDecoder : public Decoder {
case kSimdPrefix: {
opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
switch (opcode) {
case kExprI32AtomicStore:
case kExprI32AtomicStore8U:
case kExprI32AtomicStore16U:
case kExprS128StoreMem:
return {2, 0};
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(DECLARE_OPCODE_CASE)
return {1, 1};
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(DECLARE_OPCODE_CASE)
......
......@@ -1815,39 +1815,6 @@ class ThreadImpl {
REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
#undef REPLACE_LANE_CASE
case kExprS128LoadMem:
return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
MachineRepresentation::kSimd128);
case kExprS128StoreMem:
return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
MachineRepresentation::kSimd128);
#define SHIFT_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
++len; \
WasmValue v = Pop(); \
stype s = v.to_s128().to_##name(); \
stype res; \
for (size_t i = 0; i < count; ++i) { \
auto a = s.val[i]; \
res.val[i] = expr; \
} \
Push(WasmValue(Simd128(res))); \
return true; \
}
SHIFT_CASE(I32x4Shl, i32x4, int4, 4, a << imm.shift)
SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
static_cast<uint32_t>(a) >> imm.shift)
SHIFT_CASE(I16x8Shl, i16x8, int8, 8, a << imm.shift)
SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> imm.shift)
SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
static_cast<uint16_t>(a) >> imm.shift)
SHIFT_CASE(I8x16Shl, i8x16, int16, 16, a << imm.shift)
SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> imm.shift)
SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
static_cast<uint8_t>(a) >> imm.shift)
#undef SHIFT_CASE
default:
return false;
}
......
......@@ -428,8 +428,7 @@ struct GetAsmJsOpcodeSigIndex {
struct GetSimdOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
kSigEnum_None;
return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) kSigEnum_None;
#undef CASE
}
};
......
......@@ -545,7 +545,6 @@ using WasmName = Vector<const char>;
V(s_i, kWasmS128, kWasmI32) \
V(s_si, kWasmS128, kWasmS128, kWasmI32) \
V(i_s, kWasmI32, kWasmS128) \
V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128)
#define FOREACH_PREFIX(V) \
......
......@@ -1090,17 +1090,17 @@ void RunI32x4ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
}
WASM_SIMD_TEST(I32x4Shl) {
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I32x4Shl) {
RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4Shl,
LogicalShiftLeft, 1);
}
WASM_SIMD_TEST(I32x4ShrS) {
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I32x4ShrS) {
RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrS,
ArithmeticShiftRight, 1);
}
WASM_SIMD_TEST(I32x4ShrU) {
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I32x4ShrU) {
RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrU,
LogicalShiftRight, 1);
}
......@@ -1350,17 +1350,17 @@ void RunI16x8ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
}
WASM_SIMD_TEST(I16x8Shl) {
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I16x8Shl) {
RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8Shl,
LogicalShiftLeft, 1);
}
WASM_SIMD_TEST(I16x8ShrS) {
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I16x8ShrS) {
RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrS,
ArithmeticShiftRight, 1);
}
WASM_SIMD_TEST(I16x8ShrU) {
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I16x8ShrU) {
RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrU,
LogicalShiftRight, 1);
}
......@@ -1569,17 +1569,17 @@ void RunI8x16ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(I8x16Shl) {
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I8x16Shl) {
RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16Shl,
LogicalShiftLeft, 1);
}
WASM_SIMD_TEST(I8x16ShrS) {
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I8x16ShrS) {
RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrS,
ArithmeticShiftRight, 1);
}
WASM_SIMD_TEST(I8x16ShrU) {
WASM_SIMD_COMPILED_AND_LOWERED_TEST(I8x16ShrU) {
RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrU,
LogicalShiftRight, 1);
}
......@@ -2280,7 +2280,7 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
WASM_SIMD_TEST(SimdLoadStoreLoad) {
WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
WasmRunner<int32_t> r(execution_mode, lower_simd);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment