Commit cda2e2dd authored by aseemgarg's avatar aseemgarg Committed by Commit bot

[wasm] Implement simd lowering for I16x8

R=bbudge@chromium.org,gdeepti@chromium.org,mtrofin@chromium.org
BUG=v8:6020

Review-Url: https://codereview.chromium.org/2843523002
Cr-Commit-Position: refs/heads/master@{#45004}
parent f79c3b51
......@@ -16,6 +16,13 @@ namespace v8 {
namespace internal {
namespace compiler {
namespace {
static const int kNumLanes32 = 4;
static const int kNumLanes16 = 8;
static const int32_t kMask16 = 0xffff;
static const int32_t kShift16 = 16;
} // anonymous
SimdScalarLowering::SimdScalarLowering(
JSGraph* jsgraph, Signature<MachineRepresentation>* signature)
: jsgraph_(jsgraph),
......@@ -35,7 +42,7 @@ SimdScalarLowering::SimdScalarLowering(
void SimdScalarLowering::LowerGraph() {
stack_.push_back({graph()->end(), 0});
state_.Set(graph()->end(), State::kOnStack);
replacements_[graph()->end()->id()].type = SimdType::kInt32;
replacements_[graph()->end()->id()].type = SimdType::kInt32x4;
while (!stack_.empty()) {
NodeState& top = stack_.back();
......@@ -73,11 +80,14 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4SConvertF32x4) \
V(I32x4UConvertF32x4) \
V(I32x4Neg) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
V(I32x4MaxS) \
V(I32x4ShrU) \
V(I32x4MinU) \
V(I32x4MaxU) \
V(S128And) \
......@@ -119,6 +129,44 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4GtU) \
V(I32x4GeU)
#define FOREACH_INT16X8_OPCODE(V) \
V(I16x8Splat) \
V(I16x8ExtractLane) \
V(I16x8ReplaceLane) \
V(I16x8Neg) \
V(I16x8Shl) \
V(I16x8ShrS) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
V(I16x8Sub) \
V(I16x8SubSaturateS) \
V(I16x8Mul) \
V(I16x8MinS) \
V(I16x8MaxS) \
V(I16x8ShrU) \
V(I16x8AddSaturateU) \
V(I16x8SubSaturateU) \
V(I16x8MinU) \
V(I16x8MaxU)
#define FOREACH_INT16X8_TO_SIMD1X8OPCODE(V) \
V(I16x8Eq) \
V(I16x8Ne) \
V(I16x8LtS) \
V(I16x8LeS) \
V(I16x8LtU) \
V(I16x8LeU)
#define FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(V) \
V(Float32x4, Float32) \
V(Int32x4, Int32) \
V(Int16x8, Int16)
#define FOREACH_SIMD_TYPE_TO_MACHINE_REP(V) \
V(Float32x4, Float32) \
V(Int32x4, Word32) \
V(Int16x8, Word16)
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
#define CASE_STMT(name) case IrOpcode::k##name:
......@@ -126,11 +174,11 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
case IrOpcode::kReturn:
case IrOpcode::kParameter:
case IrOpcode::kCall: {
replacements_[node->id()].type = SimdType::kInt32;
replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
FOREACH_FLOAT32X4_OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kFloat32;
replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
......@@ -138,23 +186,39 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
replacements_[node->id()].type = SimdType::kSimd1x4;
break;
}
FOREACH_INT16X8_OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kInt16x8;
break;
}
FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kSimd1x8;
break;
}
default: {
switch (output->opcode()) {
FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
case IrOpcode::kF32x4SConvertI32x4:
case IrOpcode::kF32x4UConvertI32x4: {
replacements_[node->id()].type = SimdType::kInt32;
replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
FOREACH_INT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
case IrOpcode::kI32x4SConvertF32x4:
case IrOpcode::kI32x4UConvertF32x4: {
replacements_[node->id()].type = SimdType::kFloat32;
replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
case IrOpcode::kS32x4Select: {
replacements_[node->id()].type = SimdType::kSimd1x4;
break;
}
FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kInt16x8;
break;
}
case IrOpcode::kS16x8Select: {
replacements_[node->id()].type = SimdType::kSimd1x8;
break;
}
default: {
replacements_[node->id()].type = replacements_[output->id()].type;
......@@ -199,42 +263,58 @@ static int GetReturnCountAfterLowering(
return result;
}
void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices) {
int SimdScalarLowering::NumLanes(SimdType type) {
int num_lanes = 0;
if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4 ||
type == SimdType::kSimd1x4) {
num_lanes = kNumLanes32;
} else if (type == SimdType::kInt16x8 || type == SimdType::kSimd1x8) {
num_lanes = kNumLanes16;
} else {
UNREACHABLE();
}
return num_lanes;
}
void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
SimdType type) {
new_indices[0] = index;
for (size_t i = 1; i < kMaxLanes; ++i) {
int num_lanes = NumLanes(type);
int lane_width = kSimd128Size / num_lanes;
for (int i = 1; i < num_lanes; ++i) {
new_indices[i] = graph()->NewNode(machine()->Int32Add(), index,
graph()->NewNode(common()->Int32Constant(
static_cast<int>(i) * kLaneWidth)));
static_cast<int>(i) * lane_width)));
}
}
void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
const Operator* load_op) {
const Operator* load_op, SimdType type) {
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* indices[kMaxLanes];
GetIndexNodes(index, indices);
Node* rep_nodes[kMaxLanes];
int num_lanes = NumLanes(type);
Node** indices = zone()->NewArray<Node*>(num_lanes);
GetIndexNodes(index, indices, type);
Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
rep_nodes[0] = node;
NodeProperties::ChangeOp(rep_nodes[0], load_op);
if (node->InputCount() > 2) {
DCHECK(node->InputCount() > 3);
Node* effect_input = node->InputAt(2);
Node* control_input = node->InputAt(3);
rep_nodes[3] = graph()->NewNode(load_op, base, indices[3], effect_input,
control_input);
rep_nodes[2] = graph()->NewNode(load_op, base, indices[2], rep_nodes[3],
control_input);
rep_nodes[1] = graph()->NewNode(load_op, base, indices[1], rep_nodes[2],
control_input);
for (int i = num_lanes - 1; i > 0; --i) {
rep_nodes[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
control_input);
effect_input = rep_nodes[i];
}
rep_nodes[0]->ReplaceInput(2, rep_nodes[1]);
} else {
for (size_t i = 1; i < kMaxLanes; ++i) {
for (int i = 1; i < num_lanes; ++i) {
rep_nodes[i] = graph()->NewNode(load_op, base, indices[i]);
}
}
ReplaceNode(node, rep_nodes);
ReplaceNode(node, rep_nodes, num_lanes);
} else {
DefaultLowering(node);
}
......@@ -246,12 +326,13 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* indices[kMaxLanes];
GetIndexNodes(index, indices);
int num_lanes = NumLanes(rep_type);
Node** indices = zone()->NewArray<Node*>(num_lanes);
GetIndexNodes(index, indices, rep_type);
DCHECK(node->InputCount() > 2);
Node* value = node->InputAt(2);
DCHECK(HasReplacement(1, value));
Node* rep_nodes[kMaxLanes];
Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
rep_nodes[0] = node;
Node** rep_inputs = GetReplacementsWithType(value, rep_type);
rep_nodes[0]->ReplaceInput(2, rep_inputs[0]);
......@@ -260,22 +341,22 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
DCHECK(node->InputCount() > 4);
Node* effect_input = node->InputAt(3);
Node* control_input = node->InputAt(4);
rep_nodes[3] = graph()->NewNode(store_op, base, indices[3], rep_inputs[3],
effect_input, control_input);
rep_nodes[2] = graph()->NewNode(store_op, base, indices[2], rep_inputs[2],
rep_nodes[3], control_input);
rep_nodes[1] = graph()->NewNode(store_op, base, indices[1], rep_inputs[1],
rep_nodes[2], control_input);
for (int i = num_lanes - 1; i > 0; --i) {
rep_nodes[i] =
graph()->NewNode(store_op, base, indices[i], rep_inputs[i],
effect_input, control_input);
effect_input = rep_nodes[i];
}
rep_nodes[0]->ReplaceInput(3, rep_nodes[1]);
} else {
for (size_t i = 1; i < kMaxLanes; ++i) {
for (int i = 1; i < num_lanes; ++i) {
rep_nodes[i] =
graph()->NewNode(store_op, base, indices[i], rep_inputs[i]);
}
}
ReplaceNode(node, rep_nodes);
ReplaceNode(node, rep_nodes, num_lanes);
} else {
DefaultLowering(node);
}
......@@ -286,47 +367,120 @@ void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
if (invert_inputs) {
rep_node[i] = graph()->NewNode(op, rep_right[i], rep_left[i]);
} else {
rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
}
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, num_lanes);
}
Node* SimdScalarLowering::FixUpperBits(Node* input, int32_t shift) {
return graph()->NewNode(machine()->Word32Sar(),
graph()->NewNode(machine()->Word32Shl(), input,
jsgraph_->Int32Constant(shift)),
jsgraph_->Int32Constant(shift));
}
void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
SimdType input_rep_type,
const Operator* op) {
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
rep_node[i] =
FixUpperBits(graph()->NewNode(op, rep_left[i], rep_right[i]), kShift16);
}
ReplaceNode(node, rep_node, num_lanes);
}
Node* SimdScalarLowering::Mask(Node* input, int32_t mask) {
return graph()->NewNode(machine()->Word32And(), input,
jsgraph_->Int32Constant(mask));
}
void SimdScalarLowering::LowerSaturateBinaryOp(Node* node,
SimdType input_rep_type,
const Operator* op,
bool is_signed) {
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int32_t min = 0;
int32_t max = 0;
if (is_signed) {
min = std::numeric_limits<int16_t>::min();
max = std::numeric_limits<int16_t>::max();
} else {
min = std::numeric_limits<uint16_t>::min();
max = std::numeric_limits<uint16_t>::max();
}
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
Node* op_result = nullptr;
Node* left = is_signed ? rep_left[i] : Mask(rep_left[i], kMask16);
Node* right = is_signed ? rep_right[i] : Mask(rep_right[i], kMask16);
op_result = graph()->NewNode(op, left, right);
Diamond d_min(graph(), common(),
graph()->NewNode(machine()->Int32LessThan(), op_result,
jsgraph_->Int32Constant(min)));
rep_node[i] = d_min.Phi(MachineRepresentation::kWord16,
jsgraph_->Int32Constant(min), op_result);
Diamond d_max(graph(), common(),
graph()->NewNode(machine()->Int32LessThan(),
jsgraph_->Int32Constant(max), rep_node[i]));
rep_node[i] = d_max.Phi(MachineRepresentation::kWord16,
jsgraph_->Int32Constant(max), rep_node[i]);
rep_node[i] = is_signed ? rep_node[i] : FixUpperBits(rep_node[i], kShift16);
}
ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerUnaryOp(Node* node, SimdType input_rep_type,
const Operator* op) {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
rep_node[i] = graph()->NewNode(op, rep[i]);
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerIntMinMax(Node* node, const Operator* op,
bool is_max) {
bool is_max, SimdType type) {
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32);
Node** rep_right =
GetReplacementsWithType(node->InputAt(1), SimdType::kInt32);
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
Node** rep_left = GetReplacementsWithType(node->InputAt(0), type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), type);
int num_lanes = NumLanes(type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
MachineRepresentation rep = MachineRepresentation::kNone;
if (type == SimdType::kInt32x4) {
rep = MachineRepresentation::kWord32;
} else if (type == SimdType::kInt16x8) {
rep = MachineRepresentation::kWord16;
} else {
UNREACHABLE();
}
for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(op, rep_left[i], rep_right[i]));
if (is_max) {
rep_node[i] =
d.Phi(MachineRepresentation::kWord32, rep_right[i], rep_left[i]);
rep_node[i] = d.Phi(rep, rep_right[i], rep_left[i]);
} else {
rep_node[i] =
d.Phi(MachineRepresentation::kWord32, rep_left[i], rep_right[i]);
rep_node[i] = d.Phi(rep, rep_left[i], rep_right[i]);
}
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, num_lanes);
}
Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
......@@ -361,14 +515,14 @@ Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kFloat32);
Node* rep_node[kMaxLanes];
Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kFloat32x4);
Node* rep_node[kNumLanes32];
Node* double_zero = graph()->NewNode(common()->Float64Constant(0.0));
Node* min = graph()->NewNode(
common()->Float64Constant(static_cast<double>(is_signed ? kMinInt : 0)));
Node* max = graph()->NewNode(common()->Float64Constant(
static_cast<double>(is_signed ? kMaxInt : 0xffffffffu)));
for (int i = 0; i < kMaxLanes; ++i) {
for (int i = 0; i < kNumLanes32; ++i) {
Node* double_rep =
graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
Diamond nan_d(graph(), common(), graph()->NewNode(machine()->Float64Equal(),
......@@ -389,21 +543,44 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
graph()->NewNode(machine()->TruncateFloat64ToUint32(), trunc);
}
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, kNumLanes32);
}
void SimdScalarLowering::LowerShiftOp(Node* node, const Operator* op) {
static int32_t shift_mask = 0x1f;
void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
DCHECK_EQ(1, node->InputCount());
int32_t shift_amount = OpParameter<int32_t>(node);
Node* shift_node =
graph()->NewNode(common()->Int32Constant(shift_amount & shift_mask));
Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32);
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
rep_node[i] = graph()->NewNode(op, rep[i], shift_node);
Node* shift_node = graph()->NewNode(common()->Int32Constant(shift_amount));
Node** rep = GetReplacementsWithType(node->InputAt(0), type);
int num_lanes = NumLanes(type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
rep_node[i] = rep[i];
switch (node->opcode()) {
case IrOpcode::kI16x8ShrU:
rep_node[i] = Mask(rep_node[i], kMask16); // Fall through.
case IrOpcode::kI32x4ShrU:
rep_node[i] =
graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
break;
case IrOpcode::kI32x4Shl:
rep_node[i] =
graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
break;
case IrOpcode::kI16x8Shl:
rep_node[i] =
graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
rep_node[i] = FixUpperBits(rep_node[i], kShift16);
break;
case IrOpcode::kI32x4ShrS:
case IrOpcode::kI16x8ShrS:
rep_node[i] =
graph()->NewNode(machine()->Word32Sar(), rep_node[i], shift_node);
break;
default:
UNREACHABLE();
}
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
......@@ -411,18 +588,20 @@ void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(op, rep_left[i], rep_right[i]));
rep_node[i] = d.Phi(MachineRepresentation::kWord32,
jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(1));
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerNode(Node* node) {
SimdType rep_type = ReplacementType(node);
int num_lanes = NumLanes(rep_type);
switch (node->opcode()) {
case IrOpcode::kStart: {
int parameter_count = GetParameterCountAfterLowering();
......@@ -448,19 +627,19 @@ void SimdScalarLowering::LowerNode(Node* node) {
if (old_index == new_index) {
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
Node* new_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
Node* new_node[kNumLanes32];
for (int i = 0; i < kNumLanes32; ++i) {
new_node[i] = nullptr;
}
new_node[0] = node;
if (signature()->GetParam(old_index) ==
MachineRepresentation::kSimd128) {
for (int i = 1; i < kMaxLanes; ++i) {
for (int i = 1; i < kNumLanes32; ++i) {
new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
graph()->start());
}
}
ReplaceNode(node, new_node);
ReplaceNode(node, new_node, kNumLanes32);
}
}
break;
......@@ -469,24 +648,36 @@ void SimdScalarLowering::LowerNode(Node* node) {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
if (rep_type == SimdType::kInt32) {
load_op = machine()->Load(MachineType::Int32());
} else if (rep_type == SimdType::kFloat32) {
load_op = machine()->Load(MachineType::Float32());
#define LOAD_CASE(sType, mType) \
case SimdType::k##sType: \
load_op = machine()->Load(MachineType::mType()); \
break;
switch (rep_type) {
FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(LOAD_CASE)
default:
UNREACHABLE();
}
LowerLoadOp(rep, node, load_op);
#undef LOAD_CASE
LowerLoadOp(rep, node, load_op, rep_type);
break;
}
case IrOpcode::kUnalignedLoad: {
MachineRepresentation rep =
UnalignedLoadRepresentationOf(node->op()).representation();
const Operator* load_op;
if (rep_type == SimdType::kInt32) {
load_op = machine()->UnalignedLoad(MachineType::Int32());
} else if (rep_type == SimdType::kFloat32) {
load_op = machine()->UnalignedLoad(MachineType::Float32());
#define UNALIGNED_LOAD_CASE(sType, mType) \
case SimdType::k##sType: \
load_op = machine()->UnalignedLoad(MachineType::mType()); \
break;
switch (rep_type) {
FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(UNALIGNED_LOAD_CASE)
default:
UNREACHABLE();
}
LowerLoadOp(rep, node, load_op);
#undef UNALIGHNED_LOAD_CASE
LowerLoadOp(rep, node, load_op, rep_type);
break;
}
case IrOpcode::kStore: {
......@@ -495,24 +686,35 @@ void SimdScalarLowering::LowerNode(Node* node) {
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
if (rep_type == SimdType::kInt32) {
store_op = machine()->Store(StoreRepresentation(
MachineRepresentation::kWord32, write_barrier_kind));
} else {
store_op = machine()->Store(StoreRepresentation(
MachineRepresentation::kFloat32, write_barrier_kind));
#define STORE_CASE(sType, mType) \
case SimdType::k##sType: \
store_op = machine()->Store(StoreRepresentation( \
MachineRepresentation::k##mType, write_barrier_kind)); \
break;
switch (rep_type) {
FOREACH_SIMD_TYPE_TO_MACHINE_REP(STORE_CASE)
default:
UNREACHABLE();
}
#undef STORE_CASE
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
if (rep_type == SimdType::kInt32) {
store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
} else {
store_op = machine()->UnalignedStore(MachineRepresentation::kFloat32);
#define UNALIGNED_STORE_CASE(sType, mType) \
case SimdType::k##sType: \
store_op = machine()->UnalignedStore(MachineRepresentation::k##mType); \
break;
switch (rep_type) {
FOREACH_SIMD_TYPE_TO_MACHINE_REP(UNALIGNED_STORE_CASE)
default:
UNREACHABLE();
}
#undef UNALIGNED_STORE_CASE
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
......@@ -540,12 +742,12 @@ void SimdScalarLowering::LowerNode(Node* node) {
if (descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Simd128()) {
// We access the additional return values through projections.
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
Node* rep_node[kNumLanes32];
for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] =
graph()->NewNode(common()->Projection(i), node, graph()->start());
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, kNumLanes32);
}
break;
}
......@@ -558,7 +760,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
Node** rep_input =
GetReplacementsWithType(node->InputAt(i), rep_type);
for (int j = 0; j < kMaxLanes; j++) {
for (int j = 0; j < num_lanes; j++) {
rep_node[j]->ReplaceInput(i, rep_input[j]);
}
}
......@@ -579,42 +781,74 @@ void SimdScalarLowering::LowerNode(Node* node) {
I32X4_BINOP_CASE(kS128Or, Word32Or)
I32X4_BINOP_CASE(kS128Xor, Word32Xor)
#undef I32X4_BINOP_CASE
case IrOpcode::kI32x4MaxS: {
LowerIntMinMax(node, machine()->Int32LessThan(), true);
case IrOpcode::kI16x8Add: {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add());
break;
}
case IrOpcode::kI16x8Sub: {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Sub());
break;
}
case IrOpcode::kI16x8Mul: {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Mul());
break;
}
case IrOpcode::kI16x8AddSaturateS: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), true);
break;
}
case IrOpcode::kI16x8SubSaturateS: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), true);
break;
}
case IrOpcode::kI16x8AddSaturateU: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), false);
break;
}
case IrOpcode::kI32x4MinS: {
LowerIntMinMax(node, machine()->Int32LessThan(), false);
case IrOpcode::kI16x8SubSaturateU: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), false);
break;
}
case IrOpcode::kI32x4MaxU: {
LowerIntMinMax(node, machine()->Uint32LessThan(), true);
case IrOpcode::kI32x4MaxS:
case IrOpcode::kI16x8MaxS: {
LowerIntMinMax(node, machine()->Int32LessThan(), true, rep_type);
break;
}
case IrOpcode::kI32x4MinU: {
LowerIntMinMax(node, machine()->Uint32LessThan(), false);
case IrOpcode::kI32x4MinS:
case IrOpcode::kI16x8MinS: {
LowerIntMinMax(node, machine()->Int32LessThan(), false, rep_type);
break;
}
case IrOpcode::kI32x4MaxU:
case IrOpcode::kI16x8MaxU: {
LowerIntMinMax(node, machine()->Uint32LessThan(), true, rep_type);
break;
}
case IrOpcode::kI32x4MinU:
case IrOpcode::kI16x8MinU: {
LowerIntMinMax(node, machine()->Uint32LessThan(), false, rep_type);
break;
}
case IrOpcode::kI32x4Neg: {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
Node* rep_node[kMaxLanes];
Node* rep_node[kNumLanes32];
Node* zero = graph()->NewNode(common()->Int32Constant(0));
for (int i = 0; i < kMaxLanes; ++i) {
for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] = graph()->NewNode(machine()->Int32Sub(), zero, rep[i]);
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, kNumLanes32);
break;
}
case IrOpcode::kS128Not: {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
Node* rep_node[kMaxLanes];
Node* rep_node[kNumLanes32];
Node* mask = graph()->NewNode(common()->Int32Constant(0xffffffff));
for (int i = 0; i < kMaxLanes; ++i) {
for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] = graph()->NewNode(machine()->Word32Xor(), rep[i], mask);
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, kNumLanes32);
break;
}
case IrOpcode::kI32x4SConvertF32x4: {
......@@ -625,16 +859,13 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerConvertFromFloat(node, false);
break;
}
case IrOpcode::kI32x4Shl: {
LowerShiftOp(node, machine()->Word32Shl());
break;
}
case IrOpcode::kI32x4ShrS: {
LowerShiftOp(node, machine()->Word32Sar());
break;
}
case IrOpcode::kI32x4ShrU: {
LowerShiftOp(node, machine()->Word32Shr());
case IrOpcode::kI32x4Shl:
case IrOpcode::kI16x8Shl:
case IrOpcode::kI32x4ShrS:
case IrOpcode::kI16x8ShrS:
case IrOpcode::kI32x4ShrU:
case IrOpcode::kI16x8ShrU: {
LowerShiftOp(node, rep_type);
break;
}
#define F32X4_BINOP_CASE(name) \
......@@ -657,48 +888,52 @@ void SimdScalarLowering::LowerNode(Node* node) {
F32X4_UNOP_CASE(Neg)
#undef F32x4_UNOP_CASE
case IrOpcode::kF32x4SConvertI32x4: {
LowerUnaryOp(node, SimdType::kInt32, machine()->RoundInt32ToFloat32());
LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundInt32ToFloat32());
break;
}
case IrOpcode::kF32x4UConvertI32x4: {
LowerUnaryOp(node, SimdType::kInt32, machine()->RoundUint32ToFloat32());
LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32());
break;
}
case IrOpcode::kI32x4Splat:
case IrOpcode::kF32x4Splat: {
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
case IrOpcode::kF32x4Splat:
case IrOpcode::kI16x8Splat: {
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
if (HasReplacement(0, node->InputAt(0))) {
rep_node[i] = GetReplacements(node->InputAt(0))[0];
} else {
rep_node[i] = node->InputAt(0);
}
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, num_lanes);
break;
}
case IrOpcode::kI32x4ExtractLane:
case IrOpcode::kF32x4ExtractLane: {
case IrOpcode::kF32x4ExtractLane:
case IrOpcode::kI16x8ExtractLane: {
int32_t lane = OpParameter<int32_t>(node);
Node* rep_node[kMaxLanes] = {
GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
nullptr, nullptr};
ReplaceNode(node, rep_node);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
rep_node[0] = GetReplacementsWithType(node->InputAt(0), rep_type)[lane];
for (int i = 1; i < num_lanes; ++i) {
rep_node[i] = nullptr;
}
ReplaceNode(node, rep_node, num_lanes);
break;
}
case IrOpcode::kI32x4ReplaceLane:
case IrOpcode::kF32x4ReplaceLane: {
case IrOpcode::kF32x4ReplaceLane:
case IrOpcode::kI16x8ReplaceLane: {
DCHECK_EQ(2, node->InputCount());
Node* repNode = node->InputAt(1);
int32_t lane = OpParameter<int32_t>(node);
DCHECK(lane >= 0 && lane <= 3);
Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
if (HasReplacement(0, repNode)) {
rep_node[lane] = GetReplacements(repNode)[0];
} else {
rep_node[lane] = repNode;
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, num_lanes);
break;
}
#define COMPARISON_CASE(type, simd_op, lowering_op, invert) \
......@@ -706,51 +941,69 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerBinaryOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
break; \
}
COMPARISON_CASE(Float32, kF32x4Eq, Float32Equal, false)
COMPARISON_CASE(Float32, kF32x4Lt, Float32LessThan, false)
COMPARISON_CASE(Float32, kF32x4Le, Float32LessThanOrEqual, false)
COMPARISON_CASE(Float32, kF32x4Gt, Float32LessThan, true)
COMPARISON_CASE(Float32, kF32x4Ge, Float32LessThanOrEqual, true)
COMPARISON_CASE(Int32, kI32x4Eq, Word32Equal, false)
COMPARISON_CASE(Int32, kI32x4LtS, Int32LessThan, false)
COMPARISON_CASE(Int32, kI32x4LeS, Int32LessThanOrEqual, false)
COMPARISON_CASE(Int32, kI32x4GtS, Int32LessThan, true)
COMPARISON_CASE(Int32, kI32x4GeS, Int32LessThanOrEqual, true)
COMPARISON_CASE(Int32, kI32x4LtU, Uint32LessThan, false)
COMPARISON_CASE(Int32, kI32x4LeU, Uint32LessThanOrEqual, false)
COMPARISON_CASE(Int32, kI32x4GtU, Uint32LessThan, true)
COMPARISON_CASE(Int32, kI32x4GeU, Uint32LessThanOrEqual, true)
COMPARISON_CASE(Float32x4, kF32x4Eq, Float32Equal, false)
COMPARISON_CASE(Float32x4, kF32x4Lt, Float32LessThan, false)
COMPARISON_CASE(Float32x4, kF32x4Le, Float32LessThanOrEqual, false)
COMPARISON_CASE(Float32x4, kF32x4Gt, Float32LessThan, true)
COMPARISON_CASE(Float32x4, kF32x4Ge, Float32LessThanOrEqual, true)
COMPARISON_CASE(Int32x4, kI32x4Eq, Word32Equal, false)
COMPARISON_CASE(Int32x4, kI32x4LtS, Int32LessThan, false)
COMPARISON_CASE(Int32x4, kI32x4LeS, Int32LessThanOrEqual, false)
COMPARISON_CASE(Int32x4, kI32x4GtS, Int32LessThan, true)
COMPARISON_CASE(Int32x4, kI32x4GeS, Int32LessThanOrEqual, true)
COMPARISON_CASE(Int32x4, kI32x4LtU, Uint32LessThan, false)
COMPARISON_CASE(Int32x4, kI32x4LeU, Uint32LessThanOrEqual, false)
COMPARISON_CASE(Int32x4, kI32x4GtU, Uint32LessThan, true)
COMPARISON_CASE(Int32x4, kI32x4GeU, Uint32LessThanOrEqual, true)
COMPARISON_CASE(Int16x8, kI16x8Eq, Word32Equal, false)
COMPARISON_CASE(Int16x8, kI16x8LtS, Int32LessThan, false)
COMPARISON_CASE(Int16x8, kI16x8LeS, Int32LessThanOrEqual, false)
COMPARISON_CASE(Int16x8, kI16x8GtS, Int32LessThan, true)
COMPARISON_CASE(Int16x8, kI16x8GeS, Int32LessThanOrEqual, true)
COMPARISON_CASE(Int16x8, kI16x8LtU, Uint32LessThan, false)
COMPARISON_CASE(Int16x8, kI16x8LeU, Uint32LessThanOrEqual, false)
COMPARISON_CASE(Int16x8, kI16x8GtU, Uint32LessThan, true)
COMPARISON_CASE(Int16x8, kI16x8GeU, Uint32LessThanOrEqual, true)
#undef COMPARISON_CASE
case IrOpcode::kF32x4Ne: {
LowerNotEqual(node, SimdType::kFloat32, machine()->Float32Equal());
LowerNotEqual(node, SimdType::kFloat32x4, machine()->Float32Equal());
break;
}
case IrOpcode::kI32x4Ne: {
LowerNotEqual(node, SimdType::kInt32, machine()->Word32Equal());
LowerNotEqual(node, SimdType::kInt32x4, machine()->Word32Equal());
break;
}
case IrOpcode::kI16x8Ne: {
LowerNotEqual(node, SimdType::kInt16x8, machine()->Word32Equal());
break;
}
case IrOpcode::kS32x4Select: {
case IrOpcode::kS32x4Select:
case IrOpcode::kS16x8Select: {
DCHECK(node->InputCount() == 3);
DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kSimd1x4);
DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kSimd1x4 ||
ReplacementType(node->InputAt(0)) == SimdType::kSimd1x8);
Node** boolean_input = GetReplacements(node->InputAt(0));
Node** rep_left = GetReplacementsWithType(node->InputAt(1), rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
Node* rep_node[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(machine()->Word32Equal(), boolean_input[i],
jsgraph_->Int32Constant(0)));
if (rep_type == SimdType::kFloat32) {
rep_node[i] =
d.Phi(MachineRepresentation::kFloat32, rep_right[1], rep_left[0]);
} else if (rep_type == SimdType::kInt32) {
rep_node[i] =
d.Phi(MachineRepresentation::kWord32, rep_right[1], rep_left[0]);
} else {
UNREACHABLE();
#define SELECT_CASE(sType, mType) \
case SimdType::k##sType: \
rep_node[i] = \
d.Phi(MachineRepresentation::k##mType, rep_right[1], rep_left[0]); \
break;
switch (rep_type) {
FOREACH_SIMD_TYPE_TO_MACHINE_REP(SELECT_CASE)
default:
UNREACHABLE();
}
#undef SELECT_CASE
}
ReplaceNode(node, rep_node);
ReplaceNode(node, rep_node, num_lanes);
break;
}
default: { DefaultLowering(node); }
......@@ -767,7 +1020,7 @@ bool SimdScalarLowering::DefaultLowering(Node* node) {
}
if (HasReplacement(1, input)) {
something_changed = true;
for (int j = 1; j < kMaxLanes; j++) {
for (int j = 1; j < ReplacementCount(input); ++j) {
node->InsertInput(zone(), i + j, GetReplacements(input)[j]);
}
}
......@@ -775,18 +1028,17 @@ bool SimdScalarLowering::DefaultLowering(Node* node) {
return something_changed;
}
void SimdScalarLowering::ReplaceNode(Node* old, Node** new_node) {
// if new_low == nullptr, then also new_high == nullptr.
DCHECK(new_node[0] != nullptr ||
(new_node[1] == nullptr && new_node[2] == nullptr &&
new_node[3] == nullptr));
for (int i = 0; i < kMaxLanes; ++i) {
replacements_[old->id()].node[i] = new_node[i];
void SimdScalarLowering::ReplaceNode(Node* old, Node** new_nodes, int count) {
replacements_[old->id()].node = zone()->NewArray<Node*>(count);
for (int i = 0; i < count; ++i) {
replacements_[old->id()].node[i] = new_nodes[i];
}
replacements_[old->id()].num_replacements = count;
}
bool SimdScalarLowering::HasReplacement(size_t index, Node* node) {
return replacements_[node->id()].node[index] != nullptr;
return replacements_[node->id()].node != nullptr &&
replacements_[node->id()].node[index] != nullptr;
}
SimdScalarLowering::SimdType SimdScalarLowering::ReplacementType(Node* node) {
......@@ -799,30 +1051,61 @@ Node** SimdScalarLowering::GetReplacements(Node* node) {
return result;
}
int SimdScalarLowering::ReplacementCount(Node* node) {
return replacements_[node->id()].num_replacements;
}
void SimdScalarLowering::Int32ToFloat32(Node** replacements, Node** result) {
for (int i = 0; i < kNumLanes32; ++i) {
if (replacements[i] != nullptr) {
result[i] =
graph()->NewNode(machine()->BitcastInt32ToFloat32(), replacements[i]);
} else {
result[i] = nullptr;
}
}
}
void SimdScalarLowering::Float32ToInt32(Node** replacements, Node** result) {
for (int i = 0; i < kNumLanes32; ++i) {
if (replacements[i] != nullptr) {
result[i] =
graph()->NewNode(machine()->BitcastFloat32ToInt32(), replacements[i]);
} else {
result[i] = nullptr;
}
}
}
Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
Node** replacements = GetReplacements(node);
if (ReplacementType(node) == type) {
return GetReplacements(node);
}
Node** result = zone()->NewArray<Node*>(kMaxLanes);
if (ReplacementType(node) == SimdType::kInt32 && type == SimdType::kFloat32) {
for (int i = 0; i < kMaxLanes; ++i) {
if (replacements[i] != nullptr) {
result[i] = graph()->NewNode(machine()->BitcastInt32ToFloat32(),
replacements[i]);
} else {
result[i] = nullptr;
}
int num_lanes = NumLanes(type);
Node** result = zone()->NewArray<Node*>(num_lanes);
if (type == SimdType::kInt32x4) {
if (ReplacementType(node) == SimdType::kFloat32x4) {
Float32ToInt32(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt16x8) {
UNIMPLEMENTED();
} else {
UNREACHABLE();
}
} else if (ReplacementType(node) == SimdType::kFloat32 &&
type == SimdType::kInt32) {
for (int i = 0; i < kMaxLanes; ++i) {
if (replacements[i] != nullptr) {
result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
replacements[i]);
} else {
result[i] = nullptr;
}
} else if (type == SimdType::kFloat32x4) {
if (ReplacementType(node) == SimdType::kInt32x4) {
Int32ToFloat32(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt16x8) {
UNIMPLEMENTED();
} else {
UNREACHABLE();
}
} else if (type == SimdType::kInt16x8) {
if (ReplacementType(node) == SimdType::kInt32x4 ||
ReplacementType(node) == SimdType::kFloat32x4) {
UNIMPLEMENTED();
} else {
UNREACHABLE();
}
} else {
UNREACHABLE();
......@@ -839,31 +1122,34 @@ void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
// graph verifier.
int value_count = phi->op()->ValueInputCount();
SimdType type = ReplacementType(phi);
Node** inputs_rep[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
int num_lanes = NumLanes(type);
Node*** inputs_rep = zone()->NewArray<Node**>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
}
for (int i = 0; i < value_count; ++i) {
for (int j = 0; j < kMaxLanes; j++) {
for (int j = 0; j < num_lanes; ++j) {
inputs_rep[j][i] = placeholder_;
}
}
Node* rep_nodes[kMaxLanes];
for (int i = 0; i < kMaxLanes; ++i) {
if (type == SimdType::kInt32) {
rep_nodes[i] = graph()->NewNode(
common()->Phi(MachineRepresentation::kWord32, value_count),
value_count + 1, inputs_rep[i], false);
} else if (type == SimdType::kFloat32) {
rep_nodes[i] = graph()->NewNode(
common()->Phi(MachineRepresentation::kFloat32, value_count),
value_count + 1, inputs_rep[i], false);
} else {
UNREACHABLE();
Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
#define PHI_CASE(sType, mType) \
case SimdType::k##sType: \
rep_nodes[i] = graph()->NewNode( \
common()->Phi(MachineRepresentation::k##mType, value_count), \
value_count + 1, inputs_rep[i], false); \
break;
switch (type) {
FOREACH_SIMD_TYPE_TO_MACHINE_REP(PHI_CASE)
default:
UNREACHABLE();
}
#undef PHI_CASE
}
ReplaceNode(phi, rep_nodes);
ReplaceNode(phi, rep_nodes, num_lanes);
}
}
} // namespace compiler
......
......@@ -28,14 +28,18 @@ class SimdScalarLowering {
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
enum class SimdType : uint8_t { kInt32, kFloat32, kSimd1x4 };
static const int kMaxLanes = 4;
static const int kLaneWidth = 16 / kMaxLanes;
enum class SimdType : uint8_t {
kFloat32x4,
kInt32x4,
kInt16x8,
kSimd1x4,
kSimd1x8
};
struct Replacement {
Node* node[kMaxLanes];
SimdType type; // represents what input type is expected
Node** node = nullptr;
SimdType type; // represents output type
int num_replacements = 0;
};
struct NodeState {
......@@ -52,24 +56,35 @@ class SimdScalarLowering {
void LowerNode(Node* node);
bool DefaultLowering(Node* node);
void ReplaceNode(Node* old, Node** new_nodes);
int NumLanes(SimdType type);
void ReplaceNode(Node* old, Node** new_nodes, int count);
bool HasReplacement(size_t index, Node* node);
Node** GetReplacements(Node* node);
int ReplacementCount(Node* node);
void Float32ToInt32(Node** replacements, Node** result);
void Int32ToFloat32(Node** replacements, Node** result);
Node** GetReplacementsWithType(Node* node, SimdType type);
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
void SetLoweredType(Node* node, Node* output);
void GetIndexNodes(Node* index, Node** new_indices);
void GetIndexNodes(Node* index, Node** new_indices, SimdType type);
void LowerLoadOp(MachineRepresentation rep, Node* node,
const Operator* load_op);
const Operator* load_op, SimdType type);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
bool invert_inputs = false);
Node* FixUpperBits(Node* input, int32_t shift);
void LowerBinaryOpForSmallInt(Node* node, SimdType input_rep_type,
const Operator* op);
Node* Mask(Node* input, int32_t mask);
void LowerSaturateBinaryOp(Node* node, SimdType input_rep_type,
const Operator* op, bool is_signed);
void LowerUnaryOp(Node* node, SimdType input_rep_type, const Operator* op);
void LowerIntMinMax(Node* node, const Operator* op, bool is_max);
void LowerIntMinMax(Node* node, const Operator* op, bool is_max,
SimdType type);
void LowerConvertFromFloat(Node* node, bool is_signed);
void LowerShiftOp(Node* node, const Operator* op);
void LowerShiftOp(Node* node, SimdType type);
Node* BuildF64Trunc(Node* input);
void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
......
......@@ -660,7 +660,7 @@ WASM_EXEC_COMPILED_TEST(I32x4ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2));
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(I16x8Splat) {
FLAG_wasm_simd_prototype = true;
......@@ -723,7 +723,9 @@ WASM_EXEC_COMPILED_TEST(I16x8ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2));
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
WASM_EXEC_COMPILED_TEST(I8x16Splat) {
FLAG_wasm_simd_prototype = true;
......@@ -1179,7 +1181,7 @@ WASM_EXEC_COMPILED_TEST(I16x8ConvertI32x4) {
}
#endif // V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
void RunI16x8BinOpTest(WasmOpcode simd_op, Int16BinOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
......@@ -1263,9 +1265,9 @@ WASM_EXEC_COMPILED_TEST(I16x8Eq) { RunI16x8CompareOpTest(kExprI16x8Eq, Equal); }
WASM_EXEC_COMPILED_TEST(I16x8Ne) {
RunI16x8CompareOpTest(kExprI16x8Ne, NotEqual);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(I16x8LtS) {
RunI16x8CompareOpTest(kExprI16x8LtS, Less);
}
......@@ -1297,9 +1299,9 @@ WASM_EXEC_COMPILED_TEST(I16x8LtU) {
WASM_EXEC_COMPILED_TEST(I16x8LeU) {
RunI16x8CompareOpTest(kExprI16x8LeU, UnsignedLessEqual);
}
#endif // V8_TARGET_ARCH_ARM
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
void RunI16x8ShiftOpTest(WasmOpcode simd_op, Int16ShiftOp expected_op,
int shift) {
FLAG_wasm_simd_prototype = true;
......@@ -1326,7 +1328,7 @@ WASM_EXEC_COMPILED_TEST(I16x8ShrS) {
WASM_EXEC_COMPILED_TEST(I16x8ShrU) {
RunI16x8ShiftOpTest(kExprI16x8ShrU, LogicalShiftRight, 1);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM
void RunI8x16UnOpTest(WasmOpcode simd_op, Int8UnOp expected_op) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment