Commit 04568c52 authored by bbudge's avatar bbudge Committed by Commit bot

[Turbofan] Add more integer SIMD operations for ARM.

- Adds logical and arithmetic shifts for all integer types.
- Adds min and max for all integer types.
- Adds saturating add and subtract for small integer types.
- Removes lane operations from the MachineOperatorCache.

LOG=N
BUG=v8:4124

Review-Url: https://codereview.chromium.org/2668013003
Cr-Commit-Position: refs/heads/master@{#43005}
parent de700757
......@@ -1582,6 +1582,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kArmInt32x4ShiftLeftByScalar: {
__ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kArmInt32x4ShiftRightByScalar: {
__ vshr(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kArmInt32x4Add: {
__ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
......@@ -1630,6 +1640,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kArmUint32x4ShiftRightByScalar: {
__ vshr(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kArmUint32x4Min: {
__ vmin(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint32x4Max: {
__ vmax(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint32x4GreaterThan: {
__ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
......@@ -1668,16 +1693,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kArmInt16x8ShiftLeftByScalar: {
__ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kArmInt16x8ShiftRightByScalar: {
__ vshr(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kArmInt16x8Add: {
__ vadd(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt16x8AddSaturate: {
__ vqadd(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt16x8Sub: {
__ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt16x8SubSaturate: {
__ vqsub(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt16x8Mul: {
__ vmul(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
......@@ -1716,6 +1761,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kArmUint16x8ShiftRightByScalar: {
__ vshr(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kArmUint16x8AddSaturate: {
__ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint16x8SubSaturate: {
__ vqsub(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint16x8Min: {
__ vmin(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint16x8Max: {
__ vmax(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint16x8GreaterThan: {
__ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
......@@ -1745,16 +1815,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kArmInt8x16ShiftLeftByScalar: {
__ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
case kArmInt8x16ShiftRightByScalar: {
__ vshr(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
case kArmInt8x16Add: {
__ vadd(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt8x16AddSaturate: {
__ vqadd(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt8x16Sub: {
__ vsub(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt8x16SubSaturate: {
__ vqsub(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt8x16Mul: {
__ vmul(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
......@@ -1792,6 +1882,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
case kArmUint8x16ShiftRightByScalar: {
__ vshr(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
case kArmUint8x16AddSaturate: {
__ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint8x16SubSaturate: {
__ vqsub(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint8x16Min: {
__ vmin(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint8x16Max: {
__ vmax(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmUint8x16GreaterThan: {
__ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
......
......@@ -137,6 +137,8 @@ namespace compiler {
V(ArmInt32x4FromFloat32x4) \
V(ArmUint32x4FromFloat32x4) \
V(ArmInt32x4Neg) \
V(ArmInt32x4ShiftLeftByScalar) \
V(ArmInt32x4ShiftRightByScalar) \
V(ArmInt32x4Add) \
V(ArmInt32x4Sub) \
V(ArmInt32x4Mul) \
......@@ -146,6 +148,9 @@ namespace compiler {
V(ArmInt32x4NotEqual) \
V(ArmInt32x4GreaterThan) \
V(ArmInt32x4GreaterThanOrEqual) \
V(ArmUint32x4ShiftRightByScalar) \
V(ArmUint32x4Min) \
V(ArmUint32x4Max) \
V(ArmUint32x4GreaterThan) \
V(ArmUint32x4GreaterThanOrEqual) \
V(ArmSimd32x4Select) \
......@@ -153,8 +158,12 @@ namespace compiler {
V(ArmInt16x8ExtractLane) \
V(ArmInt16x8ReplaceLane) \
V(ArmInt16x8Neg) \
V(ArmInt16x8ShiftLeftByScalar) \
V(ArmInt16x8ShiftRightByScalar) \
V(ArmInt16x8Add) \
V(ArmInt16x8AddSaturate) \
V(ArmInt16x8Sub) \
V(ArmInt16x8SubSaturate) \
V(ArmInt16x8Mul) \
V(ArmInt16x8Min) \
V(ArmInt16x8Max) \
......@@ -162,14 +171,23 @@ namespace compiler {
V(ArmInt16x8NotEqual) \
V(ArmInt16x8GreaterThan) \
V(ArmInt16x8GreaterThanOrEqual) \
V(ArmUint16x8ShiftRightByScalar) \
V(ArmUint16x8AddSaturate) \
V(ArmUint16x8SubSaturate) \
V(ArmUint16x8Min) \
V(ArmUint16x8Max) \
V(ArmUint16x8GreaterThan) \
V(ArmUint16x8GreaterThanOrEqual) \
V(ArmInt8x16Splat) \
V(ArmInt8x16ExtractLane) \
V(ArmInt8x16ReplaceLane) \
V(ArmInt8x16Neg) \
V(ArmInt8x16ShiftLeftByScalar) \
V(ArmInt8x16ShiftRightByScalar) \
V(ArmInt8x16Add) \
V(ArmInt8x16AddSaturate) \
V(ArmInt8x16Sub) \
V(ArmInt8x16SubSaturate) \
V(ArmInt8x16Mul) \
V(ArmInt8x16Min) \
V(ArmInt8x16Max) \
......@@ -177,6 +195,11 @@ namespace compiler {
V(ArmInt8x16NotEqual) \
V(ArmInt8x16GreaterThan) \
V(ArmInt8x16GreaterThanOrEqual) \
V(ArmUint8x16ShiftRightByScalar) \
V(ArmUint8x16AddSaturate) \
V(ArmUint8x16SubSaturate) \
V(ArmUint8x16Min) \
V(ArmUint8x16Max) \
V(ArmUint8x16GreaterThan) \
V(ArmUint8x16GreaterThanOrEqual)
......
......@@ -125,6 +125,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmInt32x4FromFloat32x4:
case kArmUint32x4FromFloat32x4:
case kArmInt32x4Neg:
case kArmInt32x4ShiftLeftByScalar:
case kArmInt32x4ShiftRightByScalar:
case kArmInt32x4Add:
case kArmInt32x4Sub:
case kArmInt32x4Mul:
......@@ -134,6 +136,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmInt32x4NotEqual:
case kArmInt32x4GreaterThan:
case kArmInt32x4GreaterThanOrEqual:
case kArmUint32x4ShiftRightByScalar:
case kArmUint32x4Min:
case kArmUint32x4Max:
case kArmUint32x4GreaterThan:
case kArmUint32x4GreaterThanOrEqual:
case kArmSimd32x4Select:
......@@ -141,8 +146,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmInt16x8ExtractLane:
case kArmInt16x8ReplaceLane:
case kArmInt16x8Neg:
case kArmInt16x8ShiftLeftByScalar:
case kArmInt16x8ShiftRightByScalar:
case kArmInt16x8Add:
case kArmInt16x8AddSaturate:
case kArmInt16x8Sub:
case kArmInt16x8SubSaturate:
case kArmInt16x8Mul:
case kArmInt16x8Min:
case kArmInt16x8Max:
......@@ -150,14 +159,23 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmInt16x8NotEqual:
case kArmInt16x8GreaterThan:
case kArmInt16x8GreaterThanOrEqual:
case kArmUint16x8ShiftRightByScalar:
case kArmUint16x8AddSaturate:
case kArmUint16x8SubSaturate:
case kArmUint16x8Min:
case kArmUint16x8Max:
case kArmUint16x8GreaterThan:
case kArmUint16x8GreaterThanOrEqual:
case kArmInt8x16Splat:
case kArmInt8x16ExtractLane:
case kArmInt8x16ReplaceLane:
case kArmInt8x16Neg:
case kArmInt8x16ShiftLeftByScalar:
case kArmInt8x16ShiftRightByScalar:
case kArmInt8x16Add:
case kArmInt8x16AddSaturate:
case kArmInt8x16Sub:
case kArmInt8x16SubSaturate:
case kArmInt8x16Mul:
case kArmInt8x16Min:
case kArmInt8x16Max:
......@@ -165,6 +183,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmInt8x16NotEqual:
case kArmInt8x16GreaterThan:
case kArmInt8x16GreaterThanOrEqual:
case kArmUint8x16ShiftRightByScalar:
case kArmUint8x16AddSaturate:
case kArmUint8x16SubSaturate:
case kArmUint8x16Min:
case kArmUint8x16Max:
case kArmUint8x16GreaterThan:
case kArmUint8x16GreaterThanOrEqual:
return kNoOpcodeFlags;
......
......@@ -2190,10 +2190,14 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
V(Int32x4NotEqual) \
V(Int32x4GreaterThan) \
V(Int32x4GreaterThanOrEqual) \
V(Uint32x4Min) \
V(Uint32x4Max) \
V(Uint32x4GreaterThan) \
V(Uint32x4GreaterThanOrEqual) \
V(Int16x8Add) \
V(Int16x8AddSaturate) \
V(Int16x8Sub) \
V(Int16x8SubSaturate) \
V(Int16x8Mul) \
V(Int16x8Min) \
V(Int16x8Max) \
......@@ -2201,10 +2205,16 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
V(Int16x8NotEqual) \
V(Int16x8GreaterThan) \
V(Int16x8GreaterThanOrEqual) \
V(Uint16x8AddSaturate) \
V(Uint16x8SubSaturate) \
V(Uint16x8Min) \
V(Uint16x8Max) \
V(Uint16x8GreaterThan) \
V(Uint16x8GreaterThanOrEqual) \
V(Int8x16Add) \
V(Int8x16AddSaturate) \
V(Int8x16Sub) \
V(Int8x16SubSaturate) \
V(Int8x16Mul) \
V(Int8x16Min) \
V(Int8x16Max) \
......@@ -2212,9 +2222,24 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
V(Int8x16NotEqual) \
V(Int8x16GreaterThan) \
V(Int8x16GreaterThanOrEqual) \
V(Uint8x16AddSaturate) \
V(Uint8x16SubSaturate) \
V(Uint8x16Min) \
V(Uint8x16Max) \
V(Uint8x16GreaterThan) \
V(Uint8x16GreaterThanOrEqual)
#define SIMD_SHIFT_OP_LIST(V) \
V(Int32x4ShiftLeftByScalar) \
V(Int32x4ShiftRightByScalar) \
V(Uint32x4ShiftRightByScalar) \
V(Int16x8ShiftLeftByScalar) \
V(Int16x8ShiftRightByScalar) \
V(Uint16x8ShiftRightByScalar) \
V(Int8x16ShiftLeftByScalar) \
V(Int8x16ShiftRightByScalar) \
V(Uint8x16ShiftRightByScalar)
#define SIMD_VISIT_SPLAT(Type) \
void InstructionSelector::VisitCreate##Type(Node* node) { \
VisitRR(this, kArm##Type##Splat, node); \
......@@ -2250,6 +2275,13 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#define SIMD_VISIT_SHIFT_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
VisitRRI(this, kArm##Name, node); \
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
void InstructionSelector::VisitSimd32x4Select(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmSimd32x4Select, g.DefineAsRegister(node),
......
......@@ -67,6 +67,14 @@ class InstructionOperandConverter {
return static_cast<int16_t>(InputInt32(index));
}
uint8_t InputInt3(size_t index) {
return static_cast<uint8_t>(InputInt32(index) & 0x7);
}
uint8_t InputInt4(size_t index) {
return static_cast<uint8_t>(InputInt32(index) & 0xF);
}
uint8_t InputInt5(size_t index) {
return static_cast<uint8_t>(InputInt32(index) & 0x1F);
}
......
......@@ -1489,6 +1489,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitUint32x4FromFloat32x4(node);
case IrOpcode::kInt32x4Neg:
return MarkAsSimd128(node), VisitInt32x4Neg(node);
case IrOpcode::kInt32x4ShiftLeftByScalar:
return MarkAsSimd128(node), VisitInt32x4ShiftLeftByScalar(node);
case IrOpcode::kInt32x4ShiftRightByScalar:
return MarkAsSimd128(node), VisitInt32x4ShiftRightByScalar(node);
case IrOpcode::kInt32x4Add:
return MarkAsSimd128(node), VisitInt32x4Add(node);
case IrOpcode::kInt32x4Sub:
......@@ -1507,6 +1511,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitInt32x4GreaterThan(node);
case IrOpcode::kInt32x4GreaterThanOrEqual:
return MarkAsSimd128(node), VisitInt32x4GreaterThanOrEqual(node);
case IrOpcode::kUint32x4ShiftRightByScalar:
return MarkAsSimd128(node), VisitUint32x4ShiftRightByScalar(node);
case IrOpcode::kUint32x4Min:
return MarkAsSimd128(node), VisitUint32x4Min(node);
case IrOpcode::kUint32x4Max:
return MarkAsSimd128(node), VisitUint32x4Max(node);
case IrOpcode::kUint32x4GreaterThan:
return MarkAsSimd128(node), VisitUint32x4GreaterThan(node);
case IrOpcode::kUint32x4GreaterThanOrEqual:
......@@ -1521,10 +1531,18 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitInt16x8ReplaceLane(node);
case IrOpcode::kInt16x8Neg:
return MarkAsSimd128(node), VisitInt16x8Neg(node);
case IrOpcode::kInt16x8ShiftLeftByScalar:
return MarkAsSimd128(node), VisitInt16x8ShiftLeftByScalar(node);
case IrOpcode::kInt16x8ShiftRightByScalar:
return MarkAsSimd128(node), VisitInt16x8ShiftRightByScalar(node);
case IrOpcode::kInt16x8Add:
return MarkAsSimd128(node), VisitInt16x8Add(node);
case IrOpcode::kInt16x8AddSaturate:
return MarkAsSimd128(node), VisitInt16x8AddSaturate(node);
case IrOpcode::kInt16x8Sub:
return MarkAsSimd128(node), VisitInt16x8Sub(node);
case IrOpcode::kInt16x8SubSaturate:
return MarkAsSimd128(node), VisitInt16x8SubSaturate(node);
case IrOpcode::kInt16x8Mul:
return MarkAsSimd128(node), VisitInt16x8Mul(node);
case IrOpcode::kInt16x8Min:
......@@ -1539,6 +1557,16 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitInt16x8GreaterThan(node);
case IrOpcode::kInt16x8GreaterThanOrEqual:
return MarkAsSimd128(node), VisitInt16x8GreaterThanOrEqual(node);
case IrOpcode::kUint16x8ShiftRightByScalar:
return MarkAsSimd128(node), VisitUint16x8ShiftRightByScalar(node);
case IrOpcode::kUint16x8AddSaturate:
return MarkAsSimd128(node), VisitUint16x8AddSaturate(node);
case IrOpcode::kUint16x8SubSaturate:
return MarkAsSimd128(node), VisitUint16x8SubSaturate(node);
case IrOpcode::kUint16x8Min:
return MarkAsSimd128(node), VisitUint16x8Min(node);
case IrOpcode::kUint16x8Max:
return MarkAsSimd128(node), VisitUint16x8Max(node);
case IrOpcode::kUint16x8GreaterThan:
return MarkAsSimd128(node), VisitUint16x8GreaterThan(node);
case IrOpcode::kUint16x8GreaterThanOrEqual:
......@@ -1551,10 +1579,18 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitInt8x16ReplaceLane(node);
case IrOpcode::kInt8x16Neg:
return MarkAsSimd128(node), VisitInt8x16Neg(node);
case IrOpcode::kInt8x16ShiftLeftByScalar:
return MarkAsSimd128(node), VisitInt8x16ShiftLeftByScalar(node);
case IrOpcode::kInt8x16ShiftRightByScalar:
return MarkAsSimd128(node), VisitInt8x16ShiftRightByScalar(node);
case IrOpcode::kInt8x16Add:
return MarkAsSimd128(node), VisitInt8x16Add(node);
case IrOpcode::kInt8x16AddSaturate:
return MarkAsSimd128(node), VisitInt8x16AddSaturate(node);
case IrOpcode::kInt8x16Sub:
return MarkAsSimd128(node), VisitInt8x16Sub(node);
case IrOpcode::kInt8x16SubSaturate:
return MarkAsSimd128(node), VisitInt8x16SubSaturate(node);
case IrOpcode::kInt8x16Mul:
return MarkAsSimd128(node), VisitInt8x16Mul(node);
case IrOpcode::kInt8x16Min:
......@@ -1569,6 +1605,16 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitInt8x16GreaterThan(node);
case IrOpcode::kInt8x16GreaterThanOrEqual:
return MarkAsSimd128(node), VisitInt8x16GreaterThanOrEqual(node);
case IrOpcode::kUint8x16ShiftRightByScalar:
return MarkAsSimd128(node), VisitUint8x16ShiftRightByScalar(node);
case IrOpcode::kUint8x16AddSaturate:
return MarkAsSimd128(node), VisitUint8x16AddSaturate(node);
case IrOpcode::kUint8x16SubSaturate:
return MarkAsSimd128(node), VisitUint8x16SubSaturate(node);
case IrOpcode::kUint8x16Min:
return MarkAsSimd128(node), VisitUint8x16Min(node);
case IrOpcode::kUint8x16Max:
return MarkAsSimd128(node), VisitUint8x16Max(node);
case IrOpcode::kUint8x16GreaterThan:
return MarkAsSimd128(node), VisitUint8x16GreaterThan(node);
case IrOpcode::kUint8x16GreaterThanOrEqual:
......@@ -1959,6 +2005,14 @@ void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
void InstructionSelector::VisitInt32x4Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32x4ShiftLeftByScalar(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt32x4ShiftRightByScalar(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt32x4Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32x4Max(Node* node) { UNIMPLEMENTED(); }
......@@ -1983,6 +2037,14 @@ void InstructionSelector::VisitInt32x4GreaterThanOrEqual(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint32x4ShiftRightByScalar(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint32x4Max(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint32x4GreaterThan(Node* node) {
UNIMPLEMENTED();
}
......@@ -2005,10 +2067,26 @@ void InstructionSelector::VisitInt16x8ReplaceLane(Node* node) {
void InstructionSelector::VisitInt16x8Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt16x8ShiftLeftByScalar(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt16x8ShiftRightByScalar(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt16x8Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt16x8AddSaturate(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt16x8Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt16x8SubSaturate(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt16x8Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt16x8Max(Node* node) { UNIMPLEMENTED(); }
......@@ -2033,6 +2111,22 @@ void InstructionSelector::VisitInt16x8GreaterThanOrEqual(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint16x8ShiftRightByScalar(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint16x8AddSaturate(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint16x8SubSaturate(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint16x8Max(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint16x8Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint16x8GreaterThan(Node* node) {
UNIMPLEMENTED();
}
......@@ -2053,10 +2147,26 @@ void InstructionSelector::VisitInt8x16ReplaceLane(Node* node) {
void InstructionSelector::VisitInt8x16Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt8x16ShiftLeftByScalar(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt8x16ShiftRightByScalar(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt8x16Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt8x16AddSaturate(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt8x16Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt8x16SubSaturate(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt8x16Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt8x16Max(Node* node) { UNIMPLEMENTED(); }
......@@ -2081,6 +2191,22 @@ void InstructionSelector::VisitInt8x16GreaterThanOrEqual(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint8x16ShiftRightByScalar(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint8x16AddSaturate(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint8x16SubSaturate(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint8x16Max(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint8x16Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitUint8x16GreaterThan(Node* node) {
UNIMPLEMENTED();
}
......
......@@ -249,8 +249,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int32x4Mul, Operator::kCommutative, 2, 0, 1) \
V(Int32x4Min, Operator::kCommutative, 2, 0, 1) \
V(Int32x4Max, Operator::kCommutative, 2, 0, 1) \
V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Int32x4Equal, Operator::kCommutative, 2, 0, 1) \
V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
......@@ -260,16 +258,12 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
V(Uint32x4Min, Operator::kCommutative, 2, 0, 1) \
V(Uint32x4Max, Operator::kCommutative, 2, 0, 1) \
V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1) \
V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
......@@ -289,8 +283,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int16x8Mul, Operator::kCommutative, 2, 0, 1) \
V(Int16x8Min, Operator::kCommutative, 2, 0, 1) \
V(Int16x8Max, Operator::kCommutative, 2, 0, 1) \
V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Int16x8Equal, Operator::kCommutative, 2, 0, 1) \
V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
......@@ -304,15 +296,11 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8Min, Operator::kCommutative, 2, 0, 1) \
V(Uint16x8Max, Operator::kCommutative, 2, 0, 1) \
V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1) \
V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
......@@ -332,8 +320,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int8x16Mul, Operator::kCommutative, 2, 0, 1) \
V(Int8x16Min, Operator::kCommutative, 2, 0, 1) \
V(Int8x16Max, Operator::kCommutative, 2, 0, 1) \
V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Int8x16Equal, Operator::kCommutative, 2, 0, 1) \
V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
......@@ -347,15 +333,11 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16Min, Operator::kCommutative, 2, 0, 1) \
V(Uint8x16Max, Operator::kCommutative, 2, 0, 1) \
V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1) \
V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
......@@ -453,8 +435,16 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
#define SIMD_LANE_OP_LIST(V) \
V(Float32x4, 4) \
V(Int32x4, 4) \
V(Bool32x4, 4) \
V(Int16x8, 8) \
V(Int8x16, 16)
V(Bool16x8, 8) \
V(Int8x16, 16) \
V(Bool8x16, 16)
#define SIMD_SHIFT_OP_LIST(V) \
V(32x4, 32) \
V(16x8, 16) \
V(8x16, 8)
#define STACK_SLOT_CACHED_SIZES_LIST(V) V(4) V(8) V(16)
......@@ -632,24 +622,6 @@ struct MachineOperatorGlobalCache {
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef STORE
#define SIMD_LANE_OPS(Name, lane_count) \
struct Name##ExtractLaneOperator final : public Operator1<int> { \
static int lane_number; \
Name##ExtractLaneOperator() \
: Operator1<int>(IrOpcode::k##Name##ExtractLane, Operator::kPure, \
"ExtractLane", 1, 0, 0, 1, 0, 0, lane_number++) {} \
}; \
struct Name##ReplaceLaneOperator final : public Operator1<int> { \
static int lane_number; \
Name##ReplaceLaneOperator() \
: Operator1<int>(IrOpcode::k##Name##ReplaceLane, Operator::kPure, \
"ReplaceLane", 2, 0, 0, 1, 0, 0, lane_number++) {} \
}; \
Name##ExtractLaneOperator k##Name##ExtractLane[lane_count]; \
Name##ReplaceLaneOperator k##Name##ReplaceLane[lane_count];
SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
#undef SIMD_LANE_OPS
struct DebugBreakOperator : public Operator {
DebugBreakOperator()
: Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
......@@ -877,22 +849,49 @@ const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
return nullptr;
}
#define SIMD_LANE_OPS(Name, lane_count) \
const Operator* MachineOperatorBuilder::Name##ExtractLane( \
int32_t lane_number) { \
DCHECK(0 <= lane_number && lane_number < lane_count); \
return &cache_.k##Name##ExtractLane[lane_number]; \
} \
const Operator* MachineOperatorBuilder::Name##ReplaceLane( \
int32_t lane_number) { \
DCHECK(0 <= lane_number && lane_number < lane_count); \
return &cache_.k##Name##ReplaceLane[lane_number]; \
} \
int MachineOperatorGlobalCache::Name##ExtractLaneOperator::lane_number = 0; \
int MachineOperatorGlobalCache::Name##ReplaceLaneOperator::lane_number = 0;
#define SIMD_LANE_OPS(Type, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane( \
int32_t lane_index) { \
DCHECK(0 <= lane_index && lane_index < lane_count); \
return new (zone_) \
Operator1<int32_t>(IrOpcode::k##Type##ExtractLane, Operator::kPure, \
"Extract lane", 1, 0, 0, 1, 0, 0, lane_index); \
} \
const Operator* MachineOperatorBuilder::Type##ReplaceLane( \
int32_t lane_index) { \
DCHECK(0 <= lane_index && lane_index < lane_count); \
return new (zone_) \
Operator1<int32_t>(IrOpcode::k##Type##ReplaceLane, Operator::kPure, \
"Replace lane", 2, 0, 0, 1, 0, 0, lane_index); \
}
SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
#undef SIMD_LANE_OPS
#define SIMD_SHIFT_OPS(format, bits) \
const Operator* MachineOperatorBuilder::Int##format##ShiftLeftByScalar( \
int32_t shift) { \
DCHECK(0 <= shift && shift < bits); \
return new (zone_) Operator1<int32_t>( \
IrOpcode::kInt##format##ShiftLeftByScalar, Operator::kPure, \
"Shift left", 1, 0, 0, 1, 0, 0, shift); \
} \
const Operator* MachineOperatorBuilder::Int##format##ShiftRightByScalar( \
int32_t shift) { \
DCHECK(0 < shift && shift <= bits); \
return new (zone_) Operator1<int32_t>( \
IrOpcode::kInt##format##ShiftRightByScalar, Operator::kPure, \
"Arithmetic shift right", 1, 0, 0, 1, 0, 0, shift); \
} \
const Operator* MachineOperatorBuilder::Uint##format##ShiftRightByScalar( \
int32_t shift) { \
DCHECK(0 <= shift && shift < bits); \
return new (zone_) Operator1<int32_t>( \
IrOpcode::kUint##format##ShiftRightByScalar, Operator::kPure, \
"Shift right", 1, 0, 0, 1, 0, 0, shift); \
}
SIMD_SHIFT_OP_LIST(SIMD_SHIFT_OPS)
#undef SIMD_SHIFT_OPS
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -459,8 +459,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Int32x4Mul();
const Operator* Int32x4Min();
const Operator* Int32x4Max();
const Operator* Int32x4ShiftLeftByScalar();
const Operator* Int32x4ShiftRightByScalar();
const Operator* Int32x4ShiftLeftByScalar(int32_t);
const Operator* Int32x4ShiftRightByScalar(int32_t);
const Operator* Int32x4Equal();
const Operator* Int32x4NotEqual();
const Operator* Int32x4LessThan();
......@@ -471,8 +471,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Uint32x4Min();
const Operator* Uint32x4Max();
const Operator* Uint32x4ShiftLeftByScalar();
const Operator* Uint32x4ShiftRightByScalar();
const Operator* Uint32x4ShiftRightByScalar(int32_t);
const Operator* Uint32x4LessThan();
const Operator* Uint32x4LessThanOrEqual();
const Operator* Uint32x4GreaterThan();
......@@ -480,8 +479,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Uint32x4FromFloat32x4();
const Operator* CreateBool32x4();
const Operator* Bool32x4ExtractLane();
const Operator* Bool32x4ReplaceLane();
const Operator* Bool32x4ExtractLane(int32_t);
const Operator* Bool32x4ReplaceLane(int32_t);
const Operator* Bool32x4And();
const Operator* Bool32x4Or();
const Operator* Bool32x4Xor();
......@@ -504,8 +503,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Int16x8Mul();
const Operator* Int16x8Min();
const Operator* Int16x8Max();
const Operator* Int16x8ShiftLeftByScalar();
const Operator* Int16x8ShiftRightByScalar();
const Operator* Int16x8ShiftLeftByScalar(int32_t);
const Operator* Int16x8ShiftRightByScalar(int32_t);
const Operator* Int16x8Equal();
const Operator* Int16x8NotEqual();
const Operator* Int16x8LessThan();
......@@ -520,16 +519,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Uint16x8SubSaturate();
const Operator* Uint16x8Min();
const Operator* Uint16x8Max();
const Operator* Uint16x8ShiftLeftByScalar();
const Operator* Uint16x8ShiftRightByScalar();
const Operator* Uint16x8ShiftRightByScalar(int32_t);
const Operator* Uint16x8LessThan();
const Operator* Uint16x8LessThanOrEqual();
const Operator* Uint16x8GreaterThan();
const Operator* Uint16x8GreaterThanOrEqual();
const Operator* CreateBool16x8();
const Operator* Bool16x8ExtractLane();
const Operator* Bool16x8ReplaceLane();
const Operator* Bool16x8ExtractLane(int32_t);
const Operator* Bool16x8ReplaceLane(int32_t);
const Operator* Bool16x8And();
const Operator* Bool16x8Or();
const Operator* Bool16x8Xor();
......@@ -552,8 +550,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Int8x16Mul();
const Operator* Int8x16Min();
const Operator* Int8x16Max();
const Operator* Int8x16ShiftLeftByScalar();
const Operator* Int8x16ShiftRightByScalar();
const Operator* Int8x16ShiftLeftByScalar(int32_t);
const Operator* Int8x16ShiftRightByScalar(int32_t);
const Operator* Int8x16Equal();
const Operator* Int8x16NotEqual();
const Operator* Int8x16LessThan();
......@@ -568,16 +566,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Uint8x16SubSaturate();
const Operator* Uint8x16Min();
const Operator* Uint8x16Max();
const Operator* Uint8x16ShiftLeftByScalar();
const Operator* Uint8x16ShiftRightByScalar();
const Operator* Uint8x16ShiftRightByScalar(int32_t);
const Operator* Uint8x16LessThan();
const Operator* Uint8x16LessThanOrEqual();
const Operator* Uint8x16GreaterThan();
const Operator* Uint8x16GreaterThanOrEqual();
const Operator* CreateBool8x16();
const Operator* Bool8x16ExtractLane();
const Operator* Bool8x16ReplaceLane();
const Operator* Bool8x16ExtractLane(int32_t);
const Operator* Bool8x16ReplaceLane(int32_t);
const Operator* Bool8x16And();
const Operator* Bool8x16Or();
const Operator* Bool8x16Xor();
......
......@@ -3423,6 +3423,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI32x4GeS:
return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThanOrEqual(),
inputs[0], inputs[1]);
case wasm::kExprI32x4MinU:
return graph()->NewNode(jsgraph()->machine()->Uint32x4Min(), inputs[0],
inputs[1]);
case wasm::kExprI32x4MaxU:
return graph()->NewNode(jsgraph()->machine()->Uint32x4Max(), inputs[0],
inputs[1]);
case wasm::kExprI32x4LtU:
return graph()->NewNode(jsgraph()->machine()->Uint32x4GreaterThan(),
inputs[1], inputs[0]);
......@@ -3449,9 +3455,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI16x8Add:
return graph()->NewNode(jsgraph()->machine()->Int16x8Add(), inputs[0],
inputs[1]);
case wasm::kExprI16x8AddSaturateS:
return graph()->NewNode(jsgraph()->machine()->Int16x8AddSaturate(),
inputs[0], inputs[1]);
case wasm::kExprI16x8Sub:
return graph()->NewNode(jsgraph()->machine()->Int16x8Sub(), inputs[0],
inputs[1]);
case wasm::kExprI16x8SubSaturateS:
return graph()->NewNode(jsgraph()->machine()->Int16x8SubSaturate(),
inputs[0], inputs[1]);
case wasm::kExprI16x8Mul:
return graph()->NewNode(jsgraph()->machine()->Int16x8Mul(), inputs[0],
inputs[1]);
......@@ -3479,6 +3491,18 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI16x8GeS:
return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThanOrEqual(),
inputs[0], inputs[1]);
case wasm::kExprI16x8AddSaturateU:
return graph()->NewNode(jsgraph()->machine()->Uint16x8AddSaturate(),
inputs[0], inputs[1]);
case wasm::kExprI16x8SubSaturateU:
return graph()->NewNode(jsgraph()->machine()->Uint16x8SubSaturate(),
inputs[0], inputs[1]);
case wasm::kExprI16x8MinU:
return graph()->NewNode(jsgraph()->machine()->Uint16x8Min(), inputs[0],
inputs[1]);
case wasm::kExprI16x8MaxU:
return graph()->NewNode(jsgraph()->machine()->Uint16x8Max(), inputs[0],
inputs[1]);
case wasm::kExprI16x8LtU:
return graph()->NewNode(jsgraph()->machine()->Uint16x8GreaterThan(),
inputs[1], inputs[0]);
......@@ -3504,9 +3528,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI8x16Add:
return graph()->NewNode(jsgraph()->machine()->Int8x16Add(), inputs[0],
inputs[1]);
case wasm::kExprI8x16AddSaturateS:
return graph()->NewNode(jsgraph()->machine()->Int8x16AddSaturate(),
inputs[0], inputs[1]);
case wasm::kExprI8x16Sub:
return graph()->NewNode(jsgraph()->machine()->Int8x16Sub(), inputs[0],
inputs[1]);
case wasm::kExprI8x16SubSaturateS:
return graph()->NewNode(jsgraph()->machine()->Int8x16SubSaturate(),
inputs[0], inputs[1]);
case wasm::kExprI8x16Mul:
return graph()->NewNode(jsgraph()->machine()->Int8x16Mul(), inputs[0],
inputs[1]);
......@@ -3534,6 +3564,18 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI8x16GeS:
return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThanOrEqual(),
inputs[0], inputs[1]);
case wasm::kExprI8x16AddSaturateU:
return graph()->NewNode(jsgraph()->machine()->Uint8x16AddSaturate(),
inputs[0], inputs[1]);
case wasm::kExprI8x16SubSaturateU:
return graph()->NewNode(jsgraph()->machine()->Uint8x16SubSaturate(),
inputs[0], inputs[1]);
case wasm::kExprI8x16MinU:
return graph()->NewNode(jsgraph()->machine()->Uint8x16Min(), inputs[0],
inputs[1]);
case wasm::kExprI8x16MaxU:
return graph()->NewNode(jsgraph()->machine()->Uint8x16Max(), inputs[0],
inputs[1]);
case wasm::kExprI8x16LtU:
return graph()->NewNode(jsgraph()->machine()->Uint8x16GreaterThan(),
inputs[1], inputs[0]);
......@@ -3586,6 +3628,42 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
}
}
Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
const NodeVector& inputs) {
has_simd_ = true;
switch (opcode) {
case wasm::kExprI32x4Shl:
return graph()->NewNode(
jsgraph()->machine()->Int32x4ShiftLeftByScalar(shift), inputs[0]);
case wasm::kExprI32x4ShrS:
return graph()->NewNode(
jsgraph()->machine()->Int32x4ShiftRightByScalar(shift), inputs[0]);
case wasm::kExprI32x4ShrU:
return graph()->NewNode(
jsgraph()->machine()->Uint32x4ShiftRightByScalar(shift), inputs[0]);
case wasm::kExprI16x8Shl:
return graph()->NewNode(
jsgraph()->machine()->Int16x8ShiftLeftByScalar(shift), inputs[0]);
case wasm::kExprI16x8ShrS:
return graph()->NewNode(
jsgraph()->machine()->Int16x8ShiftRightByScalar(shift), inputs[0]);
case wasm::kExprI16x8ShrU:
return graph()->NewNode(
jsgraph()->machine()->Uint16x8ShiftRightByScalar(shift), inputs[0]);
case wasm::kExprI8x16Shl:
return graph()->NewNode(
jsgraph()->machine()->Int8x16ShiftLeftByScalar(shift), inputs[0]);
case wasm::kExprI8x16ShrS:
return graph()->NewNode(
jsgraph()->machine()->Int8x16ShiftRightByScalar(shift), inputs[0]);
case wasm::kExprI8x16ShrU:
return graph()->NewNode(
jsgraph()->machine()->Uint8x16ShiftRightByScalar(shift), inputs[0]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
}
static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Isolate* isolate, Handle<Code> code,
const char* message, uint32_t index,
......
......@@ -232,6 +232,9 @@ class WasmGraphBuilder {
Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
const NodeVector& inputs);
Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
const NodeVector& inputs);
bool has_simd() const { return has_simd_; }
wasm::ModuleEnv* module_env() const { return module_; }
......
......@@ -149,16 +149,28 @@ struct Control {
(build() ? CheckForException(builder_->func(__VA_ARGS__)) : nullptr)
#define BUILD0(func) (build() ? CheckForException(builder_->func()) : nullptr)
struct LaneOperand {
// Operand for SIMD lane operations.
struct SimdLaneOperand {
uint8_t lane;
unsigned length;
inline LaneOperand(Decoder* decoder, const byte* pc) {
inline SimdLaneOperand(Decoder* decoder, const byte* pc) {
lane = decoder->checked_read_u8(pc, 2, "lane");
length = 1;
}
};
// Operand for SIMD shift operations.
struct SimdShiftOperand {
uint8_t shift;
unsigned length;
inline SimdShiftOperand(Decoder* decoder, const byte* pc) {
shift = decoder->checked_read_u8(pc, 2, "shift");
length = 1;
}
};
// Generic Wasm bytecode decoder with utilities for decoding operands,
// lengths, etc.
class WasmDecoder : public Decoder {
......@@ -350,7 +362,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, WasmOpcode opcode,
LaneOperand& operand) {
SimdLaneOperand& operand) {
uint8_t num_lanes = 0;
switch (opcode) {
case kExprF32x4ExtractLane:
......@@ -372,7 +384,38 @@ class WasmDecoder : public Decoder {
break;
}
if (operand.lane < 0 || operand.lane >= num_lanes) {
error(pc_, pc_ + 2, "invalid lane value");
error(pc_, pc_ + 2, "invalid lane index");
return false;
} else {
return true;
}
}
inline bool Validate(const byte* pc, WasmOpcode opcode,
SimdShiftOperand& operand) {
uint8_t max_shift = 0;
switch (opcode) {
case kExprI32x4Shl:
case kExprI32x4ShrS:
case kExprI32x4ShrU:
max_shift = 32;
break;
case kExprI16x8Shl:
case kExprI16x8ShrS:
case kExprI16x8ShrU:
max_shift = 16;
break;
case kExprI8x16Shl:
case kExprI8x16ShrS:
case kExprI8x16ShrU:
max_shift = 8;
break;
default:
UNREACHABLE();
break;
}
if (operand.shift < 0 || operand.shift >= max_shift) {
error(pc_, pc_ + 2, "invalid shift amount");
return false;
} else {
return true;
......@@ -1369,8 +1412,8 @@ class WasmFullDecoder : public WasmDecoder {
return 1 + operand.length;
}
unsigned ExtractLane(WasmOpcode opcode, ValueType type) {
LaneOperand operand(this, pc_);
unsigned SimdExtractLane(WasmOpcode opcode, ValueType type) {
SimdLaneOperand operand(this, pc_);
if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(1, zone_);
inputs[0] = Pop(0, ValueType::kSimd128).node;
......@@ -1380,8 +1423,8 @@ class WasmFullDecoder : public WasmDecoder {
return operand.length;
}
unsigned ReplaceLane(WasmOpcode opcode, ValueType type) {
LaneOperand operand(this, pc_);
unsigned SimdReplaceLane(WasmOpcode opcode, ValueType type) {
SimdLaneOperand operand(this, pc_);
if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(2, zone_);
inputs[1] = Pop(1, type).node;
......@@ -1392,27 +1435,50 @@ class WasmFullDecoder : public WasmDecoder {
return operand.length;
}
unsigned SimdShiftOp(WasmOpcode opcode) {
SimdShiftOperand operand(this, pc_);
if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(1, zone_);
inputs[0] = Pop(0, ValueType::kSimd128).node;
TFNode* node = BUILD(SimdShiftOp, opcode, operand.shift, inputs);
Push(ValueType::kSimd128, node);
}
return operand.length;
}
unsigned DecodeSimdOpcode(WasmOpcode opcode) {
unsigned len = 0;
switch (opcode) {
case kExprF32x4ExtractLane: {
len = ExtractLane(opcode, ValueType::kFloat32);
len = SimdExtractLane(opcode, ValueType::kFloat32);
break;
}
case kExprI32x4ExtractLane:
case kExprI16x8ExtractLane:
case kExprI8x16ExtractLane: {
len = ExtractLane(opcode, ValueType::kWord32);
len = SimdExtractLane(opcode, ValueType::kWord32);
break;
}
case kExprF32x4ReplaceLane: {
len = ReplaceLane(opcode, ValueType::kFloat32);
len = SimdReplaceLane(opcode, ValueType::kFloat32);
break;
}
case kExprI32x4ReplaceLane:
case kExprI16x8ReplaceLane:
case kExprI8x16ReplaceLane: {
len = ReplaceLane(opcode, ValueType::kWord32);
len = SimdReplaceLane(opcode, ValueType::kWord32);
break;
}
case kExprI32x4Shl:
case kExprI32x4ShrS:
case kExprI32x4ShrU:
case kExprI16x8Shl:
case kExprI16x8ShrS:
case kExprI16x8ShrU:
case kExprI8x16Shl:
case kExprI8x16ShrS:
case kExprI8x16ShrU: {
len = SimdShiftOp(opcode);
break;
}
default: {
......
......@@ -460,9 +460,6 @@ class LocalDeclEncoder {
static_cast<byte>(index)
#define WASM_UNOP(opcode, x) x, static_cast<byte>(opcode)
#define WASM_BINOP(opcode, x, y) x, y, static_cast<byte>(opcode)
#define WASM_SIMD_UNOP(opcode, x) x, kSimdPrefix, static_cast<byte>(opcode)
#define WASM_SIMD_BINOP(opcode, x, y) \
x, y, kSimdPrefix, static_cast<byte>(opcode)
//------------------------------------------------------------------------------
// Int32 operations
......
......@@ -303,8 +303,6 @@ const WasmCodePosition kNoCodePosition = -1;
V(I32x4Mul, 0xe521, s_ss) \
V(I32x4MinS, 0xe522, s_ss) \
V(I32x4MaxS, 0xe523, s_ss) \
V(I32x4Shl, 0xe524, s_si) \
V(I32x4ShrS, 0xe525, s_si) \
V(I32x4Eq, 0xe526, s_ss) \
V(I32x4Ne, 0xe527, s_ss) \
V(I32x4LtS, 0xe528, s_ss) \
......@@ -317,7 +315,6 @@ const WasmCodePosition kNoCodePosition = -1;
V(I32x4SConvertF32x4, 0xe52f, s_s) \
V(I32x4MinU, 0xe530, s_ss) \
V(I32x4MaxU, 0xe531, s_ss) \
V(I32x4ShrU, 0xe532, s_ss) \
V(I32x4LtU, 0xe533, s_ss) \
V(I32x4LeU, 0xe534, s_ss) \
V(I32x4GtU, 0xe535, s_ss) \
......@@ -332,8 +329,6 @@ const WasmCodePosition kNoCodePosition = -1;
V(I16x8Mul, 0xe540, s_ss) \
V(I16x8MinS, 0xe541, s_ss) \
V(I16x8MaxS, 0xe542, s_ss) \
V(I16x8Shl, 0xe543, s_si) \
V(I16x8ShrS, 0xe544, s_si) \
V(I16x8Eq, 0xe545, s_ss) \
V(I16x8Ne, 0xe546, s_ss) \
V(I16x8LtS, 0xe547, s_ss) \
......@@ -347,7 +342,6 @@ const WasmCodePosition kNoCodePosition = -1;
V(I16x8SubSaturateU, 0xe54f, s_ss) \
V(I16x8MinU, 0xe550, s_ss) \
V(I16x8MaxU, 0xe551, s_ss) \
V(I16x8ShrU, 0xe552, s_si) \
V(I16x8LtU, 0xe553, s_ss) \
V(I16x8LeU, 0xe554, s_ss) \
V(I16x8GtU, 0xe555, s_ss) \
......@@ -361,8 +355,6 @@ const WasmCodePosition kNoCodePosition = -1;
V(I8x16Mul, 0xe55f, s_ss) \
V(I8x16MinS, 0xe560, s_ss) \
V(I8x16MaxS, 0xe561, s_ss) \
V(I8x16Shl, 0xe562, s_si) \
V(I8x16ShrS, 0xe563, s_si) \
V(I8x16Eq, 0xe564, s_ss) \
V(I8x16Ne, 0xe565, s_ss) \
V(I8x16LtS, 0xe566, s_ss) \
......@@ -376,7 +368,6 @@ const WasmCodePosition kNoCodePosition = -1;
V(I8x16SubSaturateU, 0xe56e, s_ss) \
V(I8x16MinU, 0xe56f, s_ss) \
V(I8x16MaxU, 0xe570, s_ss) \
V(I8x16ShrU, 0xe571, s_ss) \
V(I8x16LtU, 0xe572, s_ss) \
V(I8x16LeU, 0xe573, s_ss) \
V(I8x16GtU, 0xe574, s_ss) \
......@@ -394,10 +385,19 @@ const WasmCodePosition kNoCodePosition = -1;
V(F32x4ReplaceLane, 0xe502, _) \
V(I32x4ExtractLane, 0xe51c, _) \
V(I32x4ReplaceLane, 0xe51d, _) \
V(I32x4Shl, 0xe524, _) \
V(I32x4ShrS, 0xe525, _) \
V(I32x4ShrU, 0xe532, _) \
V(I16x8ExtractLane, 0xe539, _) \
V(I16x8ReplaceLane, 0xe53a, _) \
V(I16x8Shl, 0xe543, _) \
V(I16x8ShrS, 0xe544, _) \
V(I16x8ShrU, 0xe552, _) \
V(I8x16ExtractLane, 0xe558, _) \
V(I8x16ReplaceLane, 0xe559, _)
V(I8x16ReplaceLane, 0xe559, _) \
V(I8x16Shl, 0xe562, _) \
V(I8x16ShrS, 0xe563, _) \
V(I8x16ShrU, 0xe571, _)
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicAdd8S, 0xe601, i_ii) \
......
......@@ -20,10 +20,13 @@ typedef float (*FloatBinOp)(float, float);
typedef int32_t (*FloatCompareOp)(float, float);
typedef int32_t (*Int32UnOp)(int32_t);
typedef int32_t (*Int32BinOp)(int32_t, int32_t);
typedef int32_t (*Int32ShiftOp)(int32_t, int);
typedef int16_t (*Int16UnOp)(int16_t);
typedef int16_t (*Int16BinOp)(int16_t, int16_t);
typedef int16_t (*Int16ShiftOp)(int16_t, int);
typedef int8_t (*Int8UnOp)(int8_t);
typedef int8_t (*Int8BinOp)(int8_t, int8_t);
typedef int8_t (*Int8ShiftOp)(int8_t, int);
#if V8_TARGET_ARCH_ARM
// Floating point specific value functions.
......@@ -53,6 +56,28 @@ T Mul(T a, T b) {
return a * b;
}
template <typename T>
T Minimum(T a, T b) {
return a <= b ? a : b;
}
template <typename T>
T Maximum(T a, T b) {
return a >= b ? a : b;
}
template <typename T>
T UnsignedMinimum(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? a : b;
}
template <typename T>
T UnsignedMaximum(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? a : b;
}
template <typename T>
T Equal(T a, T b) {
return a == b ? -1 : 0;
......@@ -107,6 +132,61 @@ T UnsignedLessEqual(T a, T b) {
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? -1 : 0;
}
template <typename T>
T LogicalShiftLeft(T a, int shift) {
return a << shift;
}
template <typename T>
T LogicalShiftRight(T a, int shift) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) >> shift;
}
template <typename T>
int64_t Widen(T value) {
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
return static_cast<int64_t>(value);
}
template <typename T>
int64_t UnsignedWiden(T value) {
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<int64_t>(static_cast<UnsignedT>(value));
}
template <typename T>
T Clamp(int64_t value) {
static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
int64_t clamped = std::max(min, std::min(max, value));
return static_cast<T>(clamped);
}
template <typename T>
T AddSaturate(T a, T b) {
return Clamp<T>(Widen(a) + Widen(b));
}
template <typename T>
T SubSaturate(T a, T b) {
return Clamp<T>(Widen(a) - Widen(b));
}
template <typename T>
T UnsignedAddSaturate(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return Clamp<UnsignedT>(UnsignedWiden(a) + UnsignedWiden(b));
}
template <typename T>
T UnsignedSubSaturate(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return Clamp<UnsignedT>(UnsignedWiden(a) - UnsignedWiden(b));
}
} // namespace
// TODO(gdeepti): These are tests using sample values to verify functional
......@@ -184,6 +264,12 @@ T UnsignedLessEqual(T a, T b) {
#define WASM_SIMD_CHECK_SPLAT4_F32(TYPE, value, lv) \
WASM_SIMD_CHECK4_F32(TYPE, value, lv, lv, lv, lv)
#define WASM_SIMD_UNOP(opcode, x) x, kSimdPrefix, static_cast<byte>(opcode)
#define WASM_SIMD_BINOP(opcode, x, y) \
x, y, kSimdPrefix, static_cast<byte>(opcode)
#define WASM_SIMD_SHIFT_OP(opcode, x, shift) \
x, kSimdPrefix, static_cast<byte>(opcode), static_cast<byte>(shift)
#if V8_TARGET_ARCH_ARM
WASM_EXEC_TEST(F32x4Splat) {
FLAG_wasm_simd_prototype = true;
......@@ -283,8 +369,7 @@ void RunF32x4UnOpTest(WasmOpcode simd_op, FloatUnOp expected_op) {
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(simd,
WASM_SIMD_UNOP(simd_op & 0xffu, WASM_GET_LOCAL(simd))),
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT4_F32(F32x4, simd, expected), WASM_ONE);
FOR_FLOAT32_INPUTS(i) {
......@@ -306,9 +391,8 @@ void RunF32x4BinOpTest(WasmOpcode simd_op, FloatBinOp expected_op) {
byte simd1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(simd1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(b))),
WASM_SET_LOCAL(simd1,
WASM_SIMD_BINOP(simd_op & 0xffu, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SIMD_CHECK_SPLAT4_F32(F32x4, simd1, expected), WASM_ONE);
FOR_FLOAT32_INPUTS(i) {
......@@ -338,9 +422,8 @@ void RunF32x4CompareOpTest(WasmOpcode simd_op, FloatCompareOp expected_op) {
byte simd1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(simd1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(b))),
WASM_SET_LOCAL(simd1,
WASM_SIMD_BINOP(simd_op & 0xffu, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, expected), WASM_ONE);
FOR_FLOAT32_INPUTS(i) {
......@@ -663,8 +746,7 @@ void RunI32x4UnOpTest(WasmOpcode simd_op, Int32UnOp expected_op) {
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(simd,
WASM_SIMD_UNOP(simd_op & 0xffu, WASM_GET_LOCAL(simd))),
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT4(I32x4, simd, I32, expected), WASM_ONE);
FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
......@@ -683,9 +765,8 @@ void RunI32x4BinOpTest(WasmOpcode simd_op, Int32BinOp expected_op) {
byte simd1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(simd1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(b))),
WASM_SET_LOCAL(simd1,
WASM_SIMD_BINOP(simd_op & 0xffu, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SIMD_CHECK_SPLAT4(I32x4, simd1, I32, expected), WASM_ONE);
FOR_INT32_INPUTS(i) {
......@@ -700,6 +781,10 @@ WASM_EXEC_TEST(I32x4Sub) { RunI32x4BinOpTest(kExprI32x4Sub, Sub); }
#if V8_TARGET_ARCH_ARM
WASM_EXEC_TEST(I32x4Mul) { RunI32x4BinOpTest(kExprI32x4Mul, Mul); }
WASM_EXEC_TEST(I32x4Min) { RunI32x4BinOpTest(kExprI32x4MinS, Minimum); }
WASM_EXEC_TEST(I32x4Max) { RunI32x4BinOpTest(kExprI32x4MaxS, Maximum); }
WASM_EXEC_TEST(I32x4Equal) { RunI32x4BinOpTest(kExprI32x4Eq, Equal); }
WASM_EXEC_TEST(I32x4NotEqual) { RunI32x4BinOpTest(kExprI32x4Ne, NotEqual); }
......@@ -714,6 +799,14 @@ WASM_EXEC_TEST(I32x4Less) { RunI32x4BinOpTest(kExprI32x4LtS, Less); }
WASM_EXEC_TEST(I32x4LessEqual) { RunI32x4BinOpTest(kExprI32x4LeS, LessEqual); }
WASM_EXEC_TEST(Ui32x4Min) {
RunI32x4BinOpTest(kExprI32x4MinU, UnsignedMinimum);
}
WASM_EXEC_TEST(Ui32x4Max) {
RunI32x4BinOpTest(kExprI32x4MaxU, UnsignedMaximum);
}
WASM_EXEC_TEST(Ui32x4Greater) {
RunI32x4BinOpTest(kExprI32x4GtU, UnsignedGreater);
}
......@@ -728,6 +821,33 @@ WASM_EXEC_TEST(Ui32x4LessEqual) {
RunI32x4BinOpTest(kExprI32x4LeU, UnsignedLessEqual);
}
void RunI32x4ShiftOpTest(WasmOpcode simd_op, Int32ShiftOp expected_op,
int shift) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(
simd, WASM_SIMD_SHIFT_OP(simd_op, WASM_GET_LOCAL(simd), shift)),
WASM_SIMD_CHECK_SPLAT4(I32x4, simd, I32, expected), WASM_ONE);
FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
}
WASM_EXEC_TEST(I32x4Shl) {
RunI32x4ShiftOpTest(kExprI32x4Shl, LogicalShiftLeft, 1);
}
WASM_EXEC_TEST(I32x4ShrS) {
RunI32x4ShiftOpTest(kExprI32x4ShrS, ArithmeticShiftRight, 1);
}
WASM_EXEC_TEST(I32x4ShrU) {
RunI32x4ShiftOpTest(kExprI32x4ShrU, LogicalShiftRight, 1);
}
void RunI16x8UnOpTest(WasmOpcode simd_op, Int16UnOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
......@@ -735,8 +855,7 @@ void RunI16x8UnOpTest(WasmOpcode simd_op, Int16UnOp expected_op) {
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(simd,
WASM_SIMD_UNOP(simd_op & 0xffu, WASM_GET_LOCAL(simd))),
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT8(I16x8, simd, I32, expected), WASM_ONE);
FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
......@@ -754,9 +873,8 @@ void RunI16x8BinOpTest(WasmOpcode simd_op, Int16BinOp expected_op) {
byte simd1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(simd1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(b))),
WASM_SET_LOCAL(simd1,
WASM_SIMD_BINOP(simd_op & 0xffu, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SIMD_CHECK_SPLAT8(I16x8, simd1, I32, expected), WASM_ONE);
FOR_INT16_INPUTS(i) {
......@@ -766,10 +884,22 @@ void RunI16x8BinOpTest(WasmOpcode simd_op, Int16BinOp expected_op) {
WASM_EXEC_TEST(I16x8Add) { RunI16x8BinOpTest(kExprI16x8Add, Add); }
WASM_EXEC_TEST(I16x8AddSaturate) {
RunI16x8BinOpTest(kExprI16x8AddSaturateS, AddSaturate);
}
WASM_EXEC_TEST(I16x8Sub) { RunI16x8BinOpTest(kExprI16x8Sub, Sub); }
WASM_EXEC_TEST(I16x8SubSaturate) {
RunI16x8BinOpTest(kExprI16x8SubSaturateS, SubSaturate);
}
WASM_EXEC_TEST(I16x8Mul) { RunI16x8BinOpTest(kExprI16x8Mul, Mul); }
WASM_EXEC_TEST(I16x8Min) { RunI16x8BinOpTest(kExprI16x8MinS, Minimum); }
WASM_EXEC_TEST(I16x8Max) { RunI16x8BinOpTest(kExprI16x8MaxS, Maximum); }
WASM_EXEC_TEST(I16x8Equal) { RunI16x8BinOpTest(kExprI16x8Eq, Equal); }
WASM_EXEC_TEST(I16x8NotEqual) { RunI16x8BinOpTest(kExprI16x8Ne, NotEqual); }
......@@ -784,6 +914,22 @@ WASM_EXEC_TEST(I16x8Less) { RunI16x8BinOpTest(kExprI16x8LtS, Less); }
WASM_EXEC_TEST(I16x8LessEqual) { RunI16x8BinOpTest(kExprI16x8LeS, LessEqual); }
WASM_EXEC_TEST(Ui16x8AddSaturate) {
RunI16x8BinOpTest(kExprI16x8AddSaturateU, UnsignedAddSaturate);
}
WASM_EXEC_TEST(Ui16x8SubSaturate) {
RunI16x8BinOpTest(kExprI16x8SubSaturateU, UnsignedSubSaturate);
}
WASM_EXEC_TEST(Ui16x8Min) {
RunI16x8BinOpTest(kExprI16x8MinU, UnsignedMinimum);
}
WASM_EXEC_TEST(Ui16x8Max) {
RunI16x8BinOpTest(kExprI16x8MaxU, UnsignedMaximum);
}
WASM_EXEC_TEST(Ui16x8Greater) {
RunI16x8BinOpTest(kExprI16x8GtU, UnsignedGreater);
}
......@@ -798,6 +944,33 @@ WASM_EXEC_TEST(Ui16x8LessEqual) {
RunI16x8BinOpTest(kExprI16x8LeU, UnsignedLessEqual);
}
void RunI16x8ShiftOpTest(WasmOpcode simd_op, Int16ShiftOp expected_op,
int shift) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(
simd, WASM_SIMD_SHIFT_OP(simd_op, WASM_GET_LOCAL(simd), shift)),
WASM_SIMD_CHECK_SPLAT8(I16x8, simd, I32, expected), WASM_ONE);
FOR_INT16_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
}
WASM_EXEC_TEST(I16x8Shl) {
RunI16x8ShiftOpTest(kExprI16x8Shl, LogicalShiftLeft, 1);
}
WASM_EXEC_TEST(I16x8ShrS) {
RunI16x8ShiftOpTest(kExprI16x8ShrS, ArithmeticShiftRight, 1);
}
WASM_EXEC_TEST(I16x8ShrU) {
RunI16x8ShiftOpTest(kExprI16x8ShrU, LogicalShiftRight, 1);
}
void RunI8x16UnOpTest(WasmOpcode simd_op, Int8UnOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
......@@ -805,8 +978,7 @@ void RunI8x16UnOpTest(WasmOpcode simd_op, Int8UnOp expected_op) {
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(simd,
WASM_SIMD_UNOP(simd_op & 0xffu, WASM_GET_LOCAL(simd))),
WASM_SET_LOCAL(simd, WASM_SIMD_UNOP(simd_op, WASM_GET_LOCAL(simd))),
WASM_SIMD_CHECK_SPLAT16(I8x16, simd, I32, expected), WASM_ONE);
FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i))); }
......@@ -824,9 +996,8 @@ void RunI8x16BinOpTest(WasmOpcode simd_op, Int8BinOp expected_op) {
byte simd1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd0, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(simd1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(b))),
WASM_SET_LOCAL(simd1,
WASM_SIMD_BINOP(simd_op & 0xffu, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SET_LOCAL(simd1, WASM_SIMD_BINOP(simd_op, WASM_GET_LOCAL(simd0),
WASM_GET_LOCAL(simd1))),
WASM_SIMD_CHECK_SPLAT16(I8x16, simd1, I32, expected), WASM_ONE);
FOR_INT8_INPUTS(i) {
......@@ -836,10 +1007,22 @@ void RunI8x16BinOpTest(WasmOpcode simd_op, Int8BinOp expected_op) {
WASM_EXEC_TEST(I8x16Add) { RunI8x16BinOpTest(kExprI8x16Add, Add); }
WASM_EXEC_TEST(I8x16AddSaturate) {
RunI8x16BinOpTest(kExprI8x16AddSaturateS, AddSaturate);
}
WASM_EXEC_TEST(I8x16Sub) { RunI8x16BinOpTest(kExprI8x16Sub, Sub); }
WASM_EXEC_TEST(I8x16SubSaturate) {
RunI8x16BinOpTest(kExprI8x16SubSaturateS, SubSaturate);
}
WASM_EXEC_TEST(I8x16Mul) { RunI8x16BinOpTest(kExprI8x16Mul, Mul); }
WASM_EXEC_TEST(I8x16Min) { RunI8x16BinOpTest(kExprI8x16MinS, Minimum); }
WASM_EXEC_TEST(I8x16Max) { RunI8x16BinOpTest(kExprI8x16MaxS, Maximum); }
WASM_EXEC_TEST(I8x16Equal) { RunI8x16BinOpTest(kExprI8x16Eq, Equal); }
WASM_EXEC_TEST(I8x16NotEqual) { RunI8x16BinOpTest(kExprI8x16Ne, NotEqual); }
......@@ -854,6 +1037,22 @@ WASM_EXEC_TEST(I8x16Less) { RunI8x16BinOpTest(kExprI8x16LtS, Less); }
WASM_EXEC_TEST(I8x16LessEqual) { RunI8x16BinOpTest(kExprI8x16LeS, LessEqual); }
WASM_EXEC_TEST(Ui8x16AddSaturate) {
RunI8x16BinOpTest(kExprI8x16AddSaturateU, UnsignedAddSaturate);
}
WASM_EXEC_TEST(Ui8x16SubSaturate) {
RunI8x16BinOpTest(kExprI8x16SubSaturateU, UnsignedSubSaturate);
}
WASM_EXEC_TEST(Ui8x16Min) {
RunI8x16BinOpTest(kExprI8x16MinU, UnsignedMinimum);
}
WASM_EXEC_TEST(Ui8x16Max) {
RunI8x16BinOpTest(kExprI8x16MaxU, UnsignedMaximum);
}
WASM_EXEC_TEST(Ui8x16Greater) {
RunI8x16BinOpTest(kExprI8x16GtU, UnsignedGreater);
}
......@@ -867,4 +1066,31 @@ WASM_EXEC_TEST(Ui8x16Less) { RunI8x16BinOpTest(kExprI8x16LtU, UnsignedLess); }
WASM_EXEC_TEST(Ui8x16LessEqual) {
RunI8x16BinOpTest(kExprI8x16LeU, UnsignedLessEqual);
}
void RunI8x16ShiftOpTest(WasmOpcode simd_op, Int8ShiftOp expected_op,
int shift) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(a))),
WASM_SET_LOCAL(
simd, WASM_SIMD_SHIFT_OP(simd_op, WASM_GET_LOCAL(simd), shift)),
WASM_SIMD_CHECK_SPLAT16(I8x16, simd, I32, expected), WASM_ONE);
FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
}
WASM_EXEC_TEST(I8x16Shl) {
RunI8x16ShiftOpTest(kExprI8x16Shl, LogicalShiftLeft, 1);
}
WASM_EXEC_TEST(I8x16ShrS) {
RunI8x16ShiftOpTest(kExprI8x16ShrS, ArithmeticShiftRight, 1);
}
WASM_EXEC_TEST(I8x16ShrU) {
RunI8x16ShiftOpTest(kExprI8x16ShrU, LogicalShiftRight, 1);
}
#endif // V8_TARGET_ARCH_ARM
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment