Commit 6234fda3 authored by gdeepti's avatar gdeepti Committed by Commit bot

[wasm] Make Opcode names consistent across architectures, implementations

- Fix opcode names to be consistent with opcodes as in wasm-opcodes.h
- Fix Ordering of Ops, inconsistencies

BUG=v8:6020

Review-Url: https://codereview.chromium.org/2776753004
Cr-Commit-Position: refs/heads/master@{#44239}
parent 88f71126
This diff is collapsed.
......@@ -124,108 +124,108 @@ namespace compiler {
V(ArmStr) \
V(ArmPush) \
V(ArmPoke) \
V(ArmFloat32x4Splat) \
V(ArmFloat32x4ExtractLane) \
V(ArmFloat32x4ReplaceLane) \
V(ArmFloat32x4FromInt32x4) \
V(ArmFloat32x4FromUint32x4) \
V(ArmFloat32x4Abs) \
V(ArmFloat32x4Neg) \
V(ArmFloat32x4RecipApprox) \
V(ArmFloat32x4RecipSqrtApprox) \
V(ArmFloat32x4Add) \
V(ArmFloat32x4Sub) \
V(ArmFloat32x4Mul) \
V(ArmFloat32x4Min) \
V(ArmFloat32x4Max) \
V(ArmFloat32x4RecipRefine) \
V(ArmFloat32x4RecipSqrtRefine) \
V(ArmFloat32x4Equal) \
V(ArmFloat32x4NotEqual) \
V(ArmFloat32x4LessThan) \
V(ArmFloat32x4LessThanOrEqual) \
V(ArmInt32x4Splat) \
V(ArmInt32x4ExtractLane) \
V(ArmInt32x4ReplaceLane) \
V(ArmInt32x4FromFloat32x4) \
V(ArmUint32x4FromFloat32x4) \
V(ArmInt32x4Neg) \
V(ArmInt32x4ShiftLeftByScalar) \
V(ArmInt32x4ShiftRightByScalar) \
V(ArmInt32x4Add) \
V(ArmInt32x4Sub) \
V(ArmInt32x4Mul) \
V(ArmInt32x4Min) \
V(ArmInt32x4Max) \
V(ArmInt32x4Equal) \
V(ArmInt32x4NotEqual) \
V(ArmInt32x4LessThan) \
V(ArmInt32x4LessThanOrEqual) \
V(ArmUint32x4ShiftRightByScalar) \
V(ArmUint32x4Min) \
V(ArmUint32x4Max) \
V(ArmUint32x4LessThan) \
V(ArmUint32x4LessThanOrEqual) \
V(ArmInt16x8Splat) \
V(ArmInt16x8ExtractLane) \
V(ArmInt16x8ReplaceLane) \
V(ArmInt16x8Neg) \
V(ArmInt16x8ShiftLeftByScalar) \
V(ArmInt16x8ShiftRightByScalar) \
V(ArmInt16x8Add) \
V(ArmInt16x8AddSaturate) \
V(ArmInt16x8Sub) \
V(ArmInt16x8SubSaturate) \
V(ArmInt16x8Mul) \
V(ArmInt16x8Min) \
V(ArmInt16x8Max) \
V(ArmInt16x8Equal) \
V(ArmInt16x8NotEqual) \
V(ArmInt16x8LessThan) \
V(ArmInt16x8LessThanOrEqual) \
V(ArmUint16x8ShiftRightByScalar) \
V(ArmUint16x8AddSaturate) \
V(ArmUint16x8SubSaturate) \
V(ArmUint16x8Min) \
V(ArmUint16x8Max) \
V(ArmUint16x8LessThan) \
V(ArmUint16x8LessThanOrEqual) \
V(ArmInt8x16Splat) \
V(ArmInt8x16ExtractLane) \
V(ArmInt8x16ReplaceLane) \
V(ArmInt8x16Neg) \
V(ArmInt8x16ShiftLeftByScalar) \
V(ArmInt8x16ShiftRightByScalar) \
V(ArmInt8x16Add) \
V(ArmInt8x16AddSaturate) \
V(ArmInt8x16Sub) \
V(ArmInt8x16SubSaturate) \
V(ArmInt8x16Mul) \
V(ArmInt8x16Min) \
V(ArmInt8x16Max) \
V(ArmInt8x16Equal) \
V(ArmInt8x16NotEqual) \
V(ArmInt8x16LessThan) \
V(ArmInt8x16LessThanOrEqual) \
V(ArmUint8x16ShiftRightByScalar) \
V(ArmUint8x16AddSaturate) \
V(ArmUint8x16SubSaturate) \
V(ArmUint8x16Min) \
V(ArmUint8x16Max) \
V(ArmUint8x16LessThan) \
V(ArmUint8x16LessThanOrEqual) \
V(ArmSimd128Zero) \
V(ArmSimd128And) \
V(ArmSimd128Or) \
V(ArmSimd128Xor) \
V(ArmSimd128Not) \
V(ArmSimd128Select) \
V(ArmSimd1x4AnyTrue) \
V(ArmSimd1x4AllTrue) \
V(ArmSimd1x8AnyTrue) \
V(ArmSimd1x8AllTrue) \
V(ArmSimd1x16AnyTrue) \
V(ArmSimd1x16AllTrue)
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
V(ArmF32x4SConvertI32x4) \
V(ArmF32x4UConvertI32x4) \
V(ArmF32x4Abs) \
V(ArmF32x4Neg) \
V(ArmF32x4RecipApprox) \
V(ArmF32x4RecipSqrtApprox) \
V(ArmF32x4Add) \
V(ArmF32x4Sub) \
V(ArmF32x4Mul) \
V(ArmF32x4Min) \
V(ArmF32x4Max) \
V(ArmF32x4RecipRefine) \
V(ArmF32x4RecipSqrtRefine) \
V(ArmF32x4Eq) \
V(ArmF32x4Ne) \
V(ArmF32x4Lt) \
V(ArmF32x4Le) \
V(ArmI32x4Splat) \
V(ArmI32x4ExtractLane) \
V(ArmI32x4ReplaceLane) \
V(ArmI32x4SConvertF32x4) \
V(ArmI32x4Neg) \
V(ArmI32x4Shl) \
V(ArmI32x4ShrS) \
V(ArmI32x4Add) \
V(ArmI32x4Sub) \
V(ArmI32x4Mul) \
V(ArmI32x4MinS) \
V(ArmI32x4MaxS) \
V(ArmI32x4Eq) \
V(ArmI32x4Ne) \
V(ArmI32x4LtS) \
V(ArmI32x4LeS) \
V(ArmI32x4UConvertF32x4) \
V(ArmI32x4ShrU) \
V(ArmI32x4MinU) \
V(ArmI32x4MaxU) \
V(ArmI32x4LtU) \
V(ArmI32x4LeU) \
V(ArmI16x8Splat) \
V(ArmI16x8ExtractLane) \
V(ArmI16x8ReplaceLane) \
V(ArmI16x8Neg) \
V(ArmI16x8Shl) \
V(ArmI16x8ShrS) \
V(ArmI16x8Add) \
V(ArmI16x8AddSaturateS) \
V(ArmI16x8Sub) \
V(ArmI16x8SubSaturateS) \
V(ArmI16x8Mul) \
V(ArmI16x8MinS) \
V(ArmI16x8MaxS) \
V(ArmI16x8Eq) \
V(ArmI16x8Ne) \
V(ArmI16x8LtS) \
V(ArmI16x8LeS) \
V(ArmI16x8ShrU) \
V(ArmI16x8AddSaturateU) \
V(ArmI16x8SubSaturateU) \
V(ArmI16x8MinU) \
V(ArmI16x8MaxU) \
V(ArmI16x8LtU) \
V(ArmI16x8LeU) \
V(ArmI8x16Splat) \
V(ArmI8x16ExtractLane) \
V(ArmI8x16ReplaceLane) \
V(ArmI8x16Neg) \
V(ArmI8x16Shl) \
V(ArmI8x16ShrS) \
V(ArmI8x16Add) \
V(ArmI8x16AddSaturateS) \
V(ArmI8x16Sub) \
V(ArmI8x16SubSaturateS) \
V(ArmI8x16Mul) \
V(ArmI8x16MinS) \
V(ArmI8x16MaxS) \
V(ArmI8x16Eq) \
V(ArmI8x16Ne) \
V(ArmI8x16LtS) \
V(ArmI8x16LeS) \
V(ArmI8x16ShrU) \
V(ArmI8x16AddSaturateU) \
V(ArmI8x16SubSaturateU) \
V(ArmI8x16MinU) \
V(ArmI8x16MaxU) \
V(ArmI8x16LtU) \
V(ArmI8x16LeU) \
V(ArmS128Zero) \
V(ArmS128And) \
V(ArmS128Or) \
V(ArmS128Xor) \
V(ArmS128Not) \
V(ArmS128Select) \
V(ArmS1x4AnyTrue) \
V(ArmS1x4AllTrue) \
V(ArmS1x8AnyTrue) \
V(ArmS1x8AllTrue) \
V(ArmS1x16AnyTrue) \
V(ArmS1x16AllTrue)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -108,108 +108,108 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmFloat32Min:
case kArmFloat64Min:
case kArmFloat64SilenceNaN:
case kArmFloat32x4Splat:
case kArmFloat32x4ExtractLane:
case kArmFloat32x4ReplaceLane:
case kArmFloat32x4FromInt32x4:
case kArmFloat32x4FromUint32x4:
case kArmFloat32x4Abs:
case kArmFloat32x4Neg:
case kArmFloat32x4RecipApprox:
case kArmFloat32x4RecipSqrtApprox:
case kArmFloat32x4Add:
case kArmFloat32x4Sub:
case kArmFloat32x4Mul:
case kArmFloat32x4Min:
case kArmFloat32x4Max:
case kArmFloat32x4RecipRefine:
case kArmFloat32x4RecipSqrtRefine:
case kArmFloat32x4Equal:
case kArmFloat32x4NotEqual:
case kArmFloat32x4LessThan:
case kArmFloat32x4LessThanOrEqual:
case kArmInt32x4Splat:
case kArmInt32x4ExtractLane:
case kArmInt32x4ReplaceLane:
case kArmInt32x4FromFloat32x4:
case kArmUint32x4FromFloat32x4:
case kArmInt32x4Neg:
case kArmInt32x4ShiftLeftByScalar:
case kArmInt32x4ShiftRightByScalar:
case kArmInt32x4Add:
case kArmInt32x4Sub:
case kArmInt32x4Mul:
case kArmInt32x4Min:
case kArmInt32x4Max:
case kArmInt32x4Equal:
case kArmInt32x4NotEqual:
case kArmInt32x4LessThan:
case kArmInt32x4LessThanOrEqual:
case kArmUint32x4ShiftRightByScalar:
case kArmUint32x4Min:
case kArmUint32x4Max:
case kArmUint32x4LessThan:
case kArmUint32x4LessThanOrEqual:
case kArmInt16x8Splat:
case kArmInt16x8ExtractLane:
case kArmInt16x8ReplaceLane:
case kArmInt16x8Neg:
case kArmInt16x8ShiftLeftByScalar:
case kArmInt16x8ShiftRightByScalar:
case kArmInt16x8Add:
case kArmInt16x8AddSaturate:
case kArmInt16x8Sub:
case kArmInt16x8SubSaturate:
case kArmInt16x8Mul:
case kArmInt16x8Min:
case kArmInt16x8Max:
case kArmInt16x8Equal:
case kArmInt16x8NotEqual:
case kArmInt16x8LessThan:
case kArmInt16x8LessThanOrEqual:
case kArmUint16x8ShiftRightByScalar:
case kArmUint16x8AddSaturate:
case kArmUint16x8SubSaturate:
case kArmUint16x8Min:
case kArmUint16x8Max:
case kArmUint16x8LessThan:
case kArmUint16x8LessThanOrEqual:
case kArmInt8x16Splat:
case kArmInt8x16ExtractLane:
case kArmInt8x16ReplaceLane:
case kArmInt8x16Neg:
case kArmInt8x16ShiftLeftByScalar:
case kArmInt8x16ShiftRightByScalar:
case kArmInt8x16Add:
case kArmInt8x16AddSaturate:
case kArmInt8x16Sub:
case kArmInt8x16SubSaturate:
case kArmInt8x16Mul:
case kArmInt8x16Min:
case kArmInt8x16Max:
case kArmInt8x16Equal:
case kArmInt8x16NotEqual:
case kArmInt8x16LessThan:
case kArmInt8x16LessThanOrEqual:
case kArmUint8x16ShiftRightByScalar:
case kArmUint8x16AddSaturate:
case kArmUint8x16SubSaturate:
case kArmUint8x16Min:
case kArmUint8x16Max:
case kArmUint8x16LessThan:
case kArmUint8x16LessThanOrEqual:
case kArmSimd128Zero:
case kArmSimd128And:
case kArmSimd128Or:
case kArmSimd128Xor:
case kArmSimd128Not:
case kArmSimd128Select:
case kArmSimd1x4AnyTrue:
case kArmSimd1x4AllTrue:
case kArmSimd1x8AnyTrue:
case kArmSimd1x8AllTrue:
case kArmSimd1x16AnyTrue:
case kArmSimd1x16AllTrue:
case kArmF32x4Splat:
case kArmF32x4ExtractLane:
case kArmF32x4ReplaceLane:
case kArmF32x4SConvertI32x4:
case kArmF32x4UConvertI32x4:
case kArmF32x4Abs:
case kArmF32x4Neg:
case kArmF32x4RecipApprox:
case kArmF32x4RecipSqrtApprox:
case kArmF32x4Add:
case kArmF32x4Sub:
case kArmF32x4Mul:
case kArmF32x4Min:
case kArmF32x4Max:
case kArmF32x4RecipRefine:
case kArmF32x4RecipSqrtRefine:
case kArmF32x4Eq:
case kArmF32x4Ne:
case kArmF32x4Lt:
case kArmF32x4Le:
case kArmI32x4Splat:
case kArmI32x4ExtractLane:
case kArmI32x4ReplaceLane:
case kArmI32x4SConvertF32x4:
case kArmI32x4Neg:
case kArmI32x4Shl:
case kArmI32x4ShrS:
case kArmI32x4Add:
case kArmI32x4Sub:
case kArmI32x4Mul:
case kArmI32x4MinS:
case kArmI32x4MaxS:
case kArmI32x4Eq:
case kArmI32x4Ne:
case kArmI32x4LtS:
case kArmI32x4LeS:
case kArmI32x4UConvertF32x4:
case kArmI32x4ShrU:
case kArmI32x4MinU:
case kArmI32x4MaxU:
case kArmI32x4LtU:
case kArmI32x4LeU:
case kArmI16x8Splat:
case kArmI16x8ExtractLane:
case kArmI16x8ReplaceLane:
case kArmI16x8Neg:
case kArmI16x8Shl:
case kArmI16x8ShrS:
case kArmI16x8Add:
case kArmI16x8AddSaturateS:
case kArmI16x8Sub:
case kArmI16x8SubSaturateS:
case kArmI16x8Mul:
case kArmI16x8MinS:
case kArmI16x8MaxS:
case kArmI16x8Eq:
case kArmI16x8Ne:
case kArmI16x8LtS:
case kArmI16x8LeS:
case kArmI16x8AddSaturateU:
case kArmI16x8SubSaturateU:
case kArmI16x8ShrU:
case kArmI16x8MinU:
case kArmI16x8MaxU:
case kArmI16x8LtU:
case kArmI16x8LeU:
case kArmI8x16Splat:
case kArmI8x16ExtractLane:
case kArmI8x16ReplaceLane:
case kArmI8x16Neg:
case kArmI8x16Shl:
case kArmI8x16ShrS:
case kArmI8x16Add:
case kArmI8x16AddSaturateS:
case kArmI8x16Sub:
case kArmI8x16SubSaturateS:
case kArmI8x16Mul:
case kArmI8x16MinS:
case kArmI8x16MaxS:
case kArmI8x16Eq:
case kArmI8x16Ne:
case kArmI8x16LtS:
case kArmI8x16LeS:
case kArmI8x16AddSaturateU:
case kArmI8x16SubSaturateU:
case kArmI8x16ShrU:
case kArmI8x16MinU:
case kArmI8x16MaxU:
case kArmI8x16LtU:
case kArmI8x16LeU:
case kArmS128Zero:
case kArmS128And:
case kArmS128Or:
case kArmS128Xor:
case kArmS128Not:
case kArmS128Select:
case kArmS1x4AnyTrue:
case kArmS1x4AllTrue:
case kArmS1x8AnyTrue:
case kArmS1x8AllTrue:
case kArmS1x16AnyTrue:
case kArmS1x16AllTrue:
return kNoOpcodeFlags;
case kArmVldrF32:
......
......@@ -1883,35 +1883,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
case kIA32Int32x4Splat: {
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputOperand(0));
__ pshufd(dst, dst, 0x0);
break;
}
case kIA32Int32x4ExtractLane: {
case kIA32I32x4ExtractLane: {
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kIA32Int32x4ReplaceLane: {
case kIA32I32x4ReplaceLane: {
__ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
}
case kSSEInt32x4Add: {
case kSSEI32x4Add: {
__ paddd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kSSEInt32x4Sub: {
case kSSEI32x4Sub: {
__ psubd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXInt32x4Add: {
case kAVXI32x4Add: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kAVXInt32x4Sub: {
case kAVXI32x4Sub: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
......
......@@ -111,13 +111,13 @@ namespace compiler {
V(IA32PushFloat64) \
V(IA32Poke) \
V(IA32StackCheck) \
V(IA32Int32x4Splat) \
V(IA32Int32x4ExtractLane) \
V(IA32Int32x4ReplaceLane) \
V(SSEInt32x4Add) \
V(SSEInt32x4Sub) \
V(AVXInt32x4Add) \
V(AVXInt32x4Sub)
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(IA32I32x4ReplaceLane) \
V(SSEI32x4Add) \
V(SSEI32x4Sub) \
V(AVXI32x4Add) \
V(AVXI32x4Sub)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -97,13 +97,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
case kIA32Int32x4Splat:
case kIA32Int32x4ExtractLane:
case kIA32Int32x4ReplaceLane:
case kSSEInt32x4Add:
case kSSEInt32x4Sub:
case kAVXInt32x4Add:
case kAVXInt32x4Sub:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kIA32I32x4ReplaceLane:
case kSSEI32x4Add:
case kSSEI32x4Sub:
case kAVXI32x4Add:
case kAVXI32x4Sub:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
......
......@@ -893,8 +893,8 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
V(Int32x4Add, kAVXInt32x4Add, kSSEInt32x4Add) \
V(Int32x4Sub, kAVXInt32x4Sub, kSSEInt32x4Sub)
V(I32x4Add, kAVXI32x4Add, kSSEI32x4Add) \
V(I32x4Sub, kAVXI32x4Sub, kSSEI32x4Sub)
#define FLOAT_UNOP_LIST(V) \
V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
......@@ -1819,21 +1819,21 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitInt32x4Splat(Node* node) {
VisitRO(this, node, kIA32Int32x4Splat);
void InstructionSelector::VisitI32x4Splat(Node* node) {
VisitRO(this, node, kIA32I32x4Splat);
}
void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
IA32OperandGenerator g(this);
int32_t lane = OpParameter<int32_t>(node);
Emit(kIA32Int32x4ExtractLane, g.DefineAsRegister(node),
Emit(kIA32I32x4ExtractLane, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
}
void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
IA32OperandGenerator g(this);
int32_t lane = OpParameter<int32_t>(node);
Emit(kIA32Int32x4ReplaceLane, g.DefineSameAsFirst(node),
Emit(kIA32I32x4ReplaceLane, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
g.Use(node->InputAt(1)));
}
......
This diff is collapsed.
......@@ -215,9 +215,9 @@ class MachineRepresentationInferrer {
case IrOpcode::kTruncateFloat32ToInt32:
case IrOpcode::kTruncateFloat32ToUint32:
case IrOpcode::kBitcastFloat32ToInt32:
case IrOpcode::kInt32x4ExtractLane:
case IrOpcode::kInt16x8ExtractLane:
case IrOpcode::kInt8x16ExtractLane:
case IrOpcode::kI32x4ExtractLane:
case IrOpcode::kI16x8ExtractLane:
case IrOpcode::kI8x16ExtractLane:
case IrOpcode::kInt32Constant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kTruncateFloat64ToWord32:
......@@ -363,9 +363,9 @@ class MachineRepresentationChecker {
CheckValueInputForInt64Op(node, 0);
CheckValueInputForInt64Op(node, 1);
break;
case IrOpcode::kInt32x4ExtractLane:
case IrOpcode::kInt16x8ExtractLane:
case IrOpcode::kInt8x16ExtractLane:
case IrOpcode::kI32x4ExtractLane:
case IrOpcode::kI16x8ExtractLane:
case IrOpcode::kI8x16ExtractLane:
CheckValueInputRepresentationIs(node, 0,
MachineRepresentation::kSimd128);
break;
......
This diff is collapsed.
......@@ -430,155 +430,149 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Float64SilenceNaN();
// SIMD operators.
const Operator* Float32x4Splat();
const Operator* Float32x4ExtractLane(int32_t);
const Operator* Float32x4ReplaceLane(int32_t);
const Operator* Float32x4Abs();
const Operator* Float32x4Neg();
const Operator* Float32x4Sqrt();
const Operator* Float32x4RecipApprox();
const Operator* Float32x4RecipSqrtApprox();
const Operator* Float32x4Add();
const Operator* Float32x4Sub();
const Operator* Float32x4Mul();
const Operator* Float32x4Div();
const Operator* Float32x4Min();
const Operator* Float32x4Max();
const Operator* Float32x4MinNum();
const Operator* Float32x4MaxNum();
const Operator* Float32x4RecipRefine();
const Operator* Float32x4RecipSqrtRefine();
const Operator* Float32x4Equal();
const Operator* Float32x4NotEqual();
const Operator* Float32x4LessThan();
const Operator* Float32x4LessThanOrEqual();
const Operator* Float32x4FromInt32x4();
const Operator* Float32x4FromUint32x4();
const Operator* Int32x4Splat();
const Operator* Int32x4ExtractLane(int32_t);
const Operator* Int32x4ReplaceLane(int32_t);
const Operator* Int32x4Neg();
const Operator* Int32x4Add();
const Operator* Int32x4Sub();
const Operator* Int32x4Mul();
const Operator* Int32x4Min();
const Operator* Int32x4Max();
const Operator* Int32x4ShiftLeftByScalar(int32_t);
const Operator* Int32x4ShiftRightByScalar(int32_t);
const Operator* Int32x4Equal();
const Operator* Int32x4NotEqual();
const Operator* Int32x4LessThan();
const Operator* Int32x4LessThanOrEqual();
const Operator* Int32x4FromFloat32x4();
const Operator* Uint32x4Min();
const Operator* Uint32x4Max();
const Operator* Uint32x4ShiftRightByScalar(int32_t);
const Operator* Uint32x4LessThan();
const Operator* Uint32x4LessThanOrEqual();
const Operator* Uint32x4FromFloat32x4();
const Operator* Int16x8Splat();
const Operator* Int16x8ExtractLane(int32_t);
const Operator* Int16x8ReplaceLane(int32_t);
const Operator* Int16x8Neg();
const Operator* Int16x8Add();
const Operator* Int16x8AddSaturate();
const Operator* Int16x8Sub();
const Operator* Int16x8SubSaturate();
const Operator* Int16x8Mul();
const Operator* Int16x8Min();
const Operator* Int16x8Max();
const Operator* Int16x8ShiftLeftByScalar(int32_t);
const Operator* Int16x8ShiftRightByScalar(int32_t);
const Operator* Int16x8Equal();
const Operator* Int16x8NotEqual();
const Operator* Int16x8LessThan();
const Operator* Int16x8LessThanOrEqual();
const Operator* Uint16x8AddSaturate();
const Operator* Uint16x8SubSaturate();
const Operator* Uint16x8Min();
const Operator* Uint16x8Max();
const Operator* Uint16x8ShiftRightByScalar(int32_t);
const Operator* Uint16x8LessThan();
const Operator* Uint16x8LessThanOrEqual();
const Operator* Int8x16Splat();
const Operator* Int8x16ExtractLane(int32_t);
const Operator* Int8x16ReplaceLane(int32_t);
const Operator* Int8x16Neg();
const Operator* Int8x16Add();
const Operator* Int8x16AddSaturate();
const Operator* Int8x16Sub();
const Operator* Int8x16SubSaturate();
const Operator* Int8x16Mul();
const Operator* Int8x16Min();
const Operator* Int8x16Max();
const Operator* Int8x16ShiftLeftByScalar(int32_t);
const Operator* Int8x16ShiftRightByScalar(int32_t);
const Operator* Int8x16Equal();
const Operator* Int8x16NotEqual();
const Operator* Int8x16LessThan();
const Operator* Int8x16LessThanOrEqual();
const Operator* Uint8x16AddSaturate();
const Operator* Uint8x16SubSaturate();
const Operator* Uint8x16Min();
const Operator* Uint8x16Max();
const Operator* Uint8x16ShiftRightByScalar(int32_t);
const Operator* Uint8x16LessThan();
const Operator* Uint8x16LessThanOrEqual();
const Operator* Simd128Load();
const Operator* Simd128Load1();
const Operator* Simd128Load2();
const Operator* Simd128Load3();
const Operator* Simd128Store();
const Operator* Simd128Store1();
const Operator* Simd128Store2();
const Operator* Simd128Store3();
const Operator* Simd128Zero();
const Operator* Simd128And();
const Operator* Simd128Or();
const Operator* Simd128Xor();
const Operator* Simd128Not();
const Operator* Simd32x4Select();
const Operator* Simd32x4Swizzle(uint32_t);
const Operator* Simd32x4Shuffle();
const Operator* Simd16x8Select();
const Operator* Simd16x8Swizzle(uint32_t);
const Operator* Simd16x8Shuffle();
const Operator* Simd8x16Select();
const Operator* Simd8x16Swizzle(uint32_t);
const Operator* Simd8x16Shuffle();
const Operator* Simd1x4Zero();
const Operator* Simd1x4And();
const Operator* Simd1x4Or();
const Operator* Simd1x4Xor();
const Operator* Simd1x4Not();
const Operator* Simd1x4AnyTrue();
const Operator* Simd1x4AllTrue();
const Operator* Simd1x8Zero();
const Operator* Simd1x8And();
const Operator* Simd1x8Or();
const Operator* Simd1x8Xor();
const Operator* Simd1x8Not();
const Operator* Simd1x8AnyTrue();
const Operator* Simd1x8AllTrue();
const Operator* Simd1x16Zero();
const Operator* Simd1x16And();
const Operator* Simd1x16Or();
const Operator* Simd1x16Xor();
const Operator* Simd1x16Not();
const Operator* Simd1x16AnyTrue();
const Operator* Simd1x16AllTrue();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
const Operator* F32x4ReplaceLane(int32_t);
const Operator* F32x4SConvertI32x4();
const Operator* F32x4UConvertI32x4();
const Operator* F32x4Abs();
const Operator* F32x4Neg();
const Operator* F32x4Sqrt();
const Operator* F32x4RecipApprox();
const Operator* F32x4RecipSqrtApprox();
const Operator* F32x4Add();
const Operator* F32x4Sub();
const Operator* F32x4Mul();
const Operator* F32x4Div();
const Operator* F32x4Min();
const Operator* F32x4Max();
const Operator* F32x4MinNum();
const Operator* F32x4MaxNum();
const Operator* F32x4RecipRefine();
const Operator* F32x4RecipSqrtRefine();
const Operator* F32x4Eq();
const Operator* F32x4Ne();
const Operator* F32x4Lt();
const Operator* F32x4Le();
const Operator* I32x4Splat();
const Operator* I32x4ExtractLane(int32_t);
const Operator* I32x4ReplaceLane(int32_t);
const Operator* I32x4SConvertF32x4();
const Operator* I32x4Neg();
const Operator* I32x4Shl(int32_t);
const Operator* I32x4ShrS(int32_t);
const Operator* I32x4Add();
const Operator* I32x4Sub();
const Operator* I32x4Mul();
const Operator* I32x4MinS();
const Operator* I32x4MaxS();
const Operator* I32x4Eq();
const Operator* I32x4Ne();
const Operator* I32x4LtS();
const Operator* I32x4LeS();
const Operator* I32x4UConvertF32x4();
const Operator* I32x4ShrU(int32_t);
const Operator* I32x4MinU();
const Operator* I32x4MaxU();
const Operator* I32x4LtU();
const Operator* I32x4LeU();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLane(int32_t);
const Operator* I16x8ReplaceLane(int32_t);
const Operator* I16x8Neg();
const Operator* I16x8Shl(int32_t);
const Operator* I16x8ShrS(int32_t);
const Operator* I16x8Add();
const Operator* I16x8AddSaturateS();
const Operator* I16x8Sub();
const Operator* I16x8SubSaturateS();
const Operator* I16x8Mul();
const Operator* I16x8MinS();
const Operator* I16x8MaxS();
const Operator* I16x8Eq();
const Operator* I16x8Ne();
const Operator* I16x8LtS();
const Operator* I16x8LeS();
const Operator* I16x8ShrU(int32_t);
const Operator* I16x8AddSaturateU();
const Operator* I16x8SubSaturateU();
const Operator* I16x8MinU();
const Operator* I16x8MaxU();
const Operator* I16x8LtU();
const Operator* I16x8LeU();
const Operator* I8x16Splat();
const Operator* I8x16ExtractLane(int32_t);
const Operator* I8x16ReplaceLane(int32_t);
const Operator* I8x16Neg();
const Operator* I8x16Shl(int32_t);
const Operator* I8x16ShrS(int32_t);
const Operator* I8x16Add();
const Operator* I8x16AddSaturateS();
const Operator* I8x16Sub();
const Operator* I8x16SubSaturateS();
const Operator* I8x16Mul();
const Operator* I8x16MinS();
const Operator* I8x16MaxS();
const Operator* I8x16Eq();
const Operator* I8x16Ne();
const Operator* I8x16LtS();
const Operator* I8x16LeS();
const Operator* I8x16ShrU(int32_t);
const Operator* I8x16AddSaturateU();
const Operator* I8x16SubSaturateU();
const Operator* I8x16MinU();
const Operator* I8x16MaxU();
const Operator* I8x16LtU();
const Operator* I8x16LeU();
const Operator* S128Load();
const Operator* S128Store();
const Operator* S128Zero();
const Operator* S128And();
const Operator* S128Or();
const Operator* S128Xor();
const Operator* S128Not();
const Operator* S32x4Select();
const Operator* S32x4Swizzle(uint32_t);
const Operator* S32x4Shuffle();
const Operator* S16x8Select();
const Operator* S16x8Swizzle(uint32_t);
const Operator* S16x8Shuffle();
const Operator* S8x16Select();
const Operator* S8x16Swizzle(uint32_t);
const Operator* S8x16Shuffle();
const Operator* S1x4Zero();
const Operator* S1x4And();
const Operator* S1x4Or();
const Operator* S1x4Xor();
const Operator* S1x4Not();
const Operator* S1x4AnyTrue();
const Operator* S1x4AllTrue();
const Operator* S1x8Zero();
const Operator* S1x8And();
const Operator* S1x8Or();
const Operator* S1x8Xor();
const Operator* S1x8Not();
const Operator* S1x8AnyTrue();
const Operator* S1x8AllTrue();
const Operator* S1x16Zero();
const Operator* S1x16And();
const Operator* S1x16Or();
const Operator* S1x16Xor();
const Operator* S1x16Not();
const Operator* S1x16AnyTrue();
const Operator* S1x16AllTrue();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -235,10 +235,10 @@ class WasmGraphBuilder {
void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
Node* Simd128Zero();
Node* Simd1x4Zero();
Node* Simd1x8Zero();
Node* Simd1x16Zero();
Node* S128Zero();
Node* S1x4Zero();
Node* S1x8Zero();
Node* S1x16Zero();
Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
......
......@@ -2142,18 +2142,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
case kX64Int32x4Splat: {
case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0));
__ pshufd(dst, dst, 0x0);
break;
}
case kX64Int32x4ExtractLane: {
case kX64I32x4ExtractLane: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64Int32x4ReplaceLane: {
case kX64I32x4ReplaceLane: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
if (instr->InputAt(2)->IsRegister()) {
__ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
......@@ -2163,62 +2163,67 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
case kX64Int32x4Add: {
case kX64I32x4Shl: {
__ pslld(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kX64I32x4ShrS: {
__ psrad(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kX64I32x4Add: {
__ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Int32x4Sub: {
case kX64I32x4Sub: {
__ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Int32x4Mul: {
case kX64I32x4Mul: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Int32x4Min: {
case kX64I32x4MinS: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Int32x4Max: {
case kX64I32x4MaxS: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Uint32x4Min: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Uint32x4Max: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Int32x4Equal: {
case kX64I32x4Eq: {
__ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Int32x4NotEqual: {
case kX64I32x4Ne: {
__ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kX64Int32x4ShiftLeftByScalar: {
__ pslld(i.OutputSimd128Register(), i.InputInt8(1));
case kX64I32x4ShrU: {
__ psrld(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kX64Int32x4ShiftRightByScalar: {
__ psrad(i.OutputSimd128Register(), i.InputInt8(1));
case kX64I32x4MinU: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Uint32x4ShiftRightByScalar: {
__ psrld(i.OutputSimd128Register(), i.InputInt8(1));
case kX64I32x4MaxU: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
__ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64Simd32x4Select: {
case kX64S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ xorps(dst, dst);
break;
}
case kX64S32x4Select: {
// Mask used here is stored in dst.
XMMRegister dst = i.OutputSimd128Register();
__ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
......@@ -2227,11 +2232,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xorps(dst, i.InputSimd128Register(2));
break;
}
case kX64Simd128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ xorps(dst, dst);
break;
}
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
......
......@@ -143,23 +143,23 @@ namespace compiler {
V(X64Push) \
V(X64Poke) \
V(X64StackCheck) \
V(X64Int32x4Splat) \
V(X64Int32x4ExtractLane) \
V(X64Int32x4ReplaceLane) \
V(X64Int32x4Add) \
V(X64Int32x4Sub) \
V(X64Int32x4Mul) \
V(X64Int32x4Min) \
V(X64Int32x4Max) \
V(X64Int32x4Equal) \
V(X64Int32x4NotEqual) \
V(X64Int32x4ShiftLeftByScalar) \
V(X64Int32x4ShiftRightByScalar) \
V(X64Uint32x4ShiftRightByScalar) \
V(X64Uint32x4Min) \
V(X64Uint32x4Max) \
V(X64Simd32x4Select) \
V(X64Simd128Zero)
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
V(X64I32x4Shl) \
V(X64I32x4ShrS) \
V(X64I32x4Add) \
V(X64I32x4Sub) \
V(X64I32x4Mul) \
V(X64I32x4MinS) \
V(X64I32x4MaxS) \
V(X64I32x4Eq) \
V(X64I32x4Ne) \
V(X64I32x4ShrU) \
V(X64I32x4MinU) \
V(X64I32x4MaxU) \
V(X64S32x4Select) \
V(X64S128Zero)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -123,23 +123,23 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
case kX64Int32x4Splat:
case kX64Int32x4ExtractLane:
case kX64Int32x4ReplaceLane:
case kX64Int32x4Add:
case kX64Int32x4Sub:
case kX64Int32x4Mul:
case kX64Int32x4Min:
case kX64Int32x4Max:
case kX64Int32x4Equal:
case kX64Int32x4NotEqual:
case kX64Int32x4ShiftLeftByScalar:
case kX64Int32x4ShiftRightByScalar:
case kX64Uint32x4ShiftRightByScalar:
case kX64Uint32x4Min:
case kX64Uint32x4Max:
case kX64Simd32x4Select:
case kX64Simd128Zero:
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
case kX64I32x4Shl:
case kX64I32x4ShrS:
case kX64I32x4Add:
case kX64I32x4Sub:
case kX64I32x4Mul:
case kX64I32x4MinS:
case kX64I32x4MaxS:
case kX64I32x4Eq:
case kX64I32x4Ne:
case kX64I32x4ShrU:
case kX64I32x4MinU:
case kX64I32x4MaxU:
case kX64S128Zero:
case kX64S32x4Select:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
......
......@@ -2381,29 +2381,29 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
#define SIMD_TYPES(V) V(Int32x4)
#define SIMD_TYPES(V) V(I32x4)
#define SIMD_ZERO_OP_LIST(V) \
V(Simd128Zero) \
V(Simd1x4Zero) \
V(Simd1x8Zero) \
V(Simd1x16Zero)
V(S128Zero) \
V(S1x4Zero) \
V(S1x8Zero) \
V(S1x16Zero)
#define SIMD_SHIFT_OPCODES(V) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU)
#define SIMD_BINOP_LIST(V) \
V(Int32x4Add) \
V(Int32x4Sub) \
V(Int32x4Mul) \
V(Int32x4Min) \
V(Int32x4Max) \
V(Int32x4Equal) \
V(Int32x4NotEqual) \
V(Uint32x4Min) \
V(Uint32x4Max)
#define SIMD_SHIFT_OPCODES(V) \
V(Int32x4ShiftLeftByScalar) \
V(Int32x4ShiftRightByScalar) \
V(Uint32x4ShiftRightByScalar)
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
V(I32x4MaxS) \
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4MinU) \
V(I32x4MaxU)
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
......@@ -2435,23 +2435,14 @@ SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
#undef VISIT_SIMD_REPLACE_LANE
#define SIMD_VISIT_ZERO_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
X64OperandGenerator g(this); \
Emit(kX64Simd128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \
#define SIMD_VISIT_ZERO_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
X64OperandGenerator g(this); \
Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \
}
SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
#undef SIMD_VISIT_ZERO_OP
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#define VISIT_SIMD_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
......@@ -2462,9 +2453,18 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
void InstructionSelector::VisitSimd32x4Select(Node* node) {
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
void InstructionSelector::VisitS32x4Select(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Simd32x4Select, g.DefineSameAsFirst(node),
Emit(kX64S32x4Select, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
g.UseRegister(node->InputAt(2)));
}
......
......@@ -657,13 +657,13 @@ class WasmFullDecoder : public WasmDecoder {
case kWasmF64:
return builder_->Float64Constant(0);
case kWasmS128:
return builder_->Simd128Zero();
return builder_->S128Zero();
case kWasmS1x4:
return builder_->Simd1x4Zero();
return builder_->S1x4Zero();
case kWasmS1x8:
return builder_->Simd1x8Zero();
return builder_->S1x8Zero();
case kWasmS1x16:
return builder_->Simd1x16Zero();
return builder_->S1x16Zero();
default:
UNREACHABLE();
return nullptr;
......
......@@ -450,7 +450,7 @@ WASM_EXEC_COMPILED_TEST(F32x4ReplaceLane) {
}
// Tests both signed and unsigned conversion.
WASM_EXEC_COMPILED_TEST(F32x4FromInt32x4) {
WASM_EXEC_COMPILED_TEST(F32x4ConvertI32x4) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, float, float> r(kExecuteCompiled);
byte a = 0;
......@@ -891,7 +891,7 @@ int32_t ConvertToInt(double val, bool unsigned_integer) {
}
// Tests both signed and unsigned conversion.
WASM_EXEC_COMPILED_TEST(I32x4FromFloat32x4) {
WASM_EXEC_COMPILED_TEST(I32x4Convert32x4) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, float, int32_t, int32_t> r(kExecuteCompiled);
byte a = 0;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment