Commit 282766c2 authored by Milad Farazmand's avatar Milad Farazmand Committed by Commit Bot

s390: [wasm-simd] Implement VisitSimd128ReverseBytes

LoadReverseSimd128 and StoreReverseSimd128 are implemented
to support the above instruction selection.

Change-Id: I5dcb30ce68b3478c69668b7589e77a52e77d9388
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1846460
Commit-Queue: Milad Farazmand <miladfar@ca.ibm.com>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarJunliang Yan <jyan@ca.ibm.com>
Reviewed-by: 's avatarJoran Siu <joransiu@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#64350}
parent 7c0c052a
......@@ -62,6 +62,7 @@ enum CpuFeature {
FLOATING_POINT_EXT,
VECTOR_FACILITY,
VECTOR_ENHANCE_FACILITY_1,
VECTOR_ENHANCE_FACILITY_2,
MISC_INSTR_EXT2,
#endif
......
......@@ -218,6 +218,11 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
supportsCPUFeature("vx")) {
supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
}
// Test for Vector Enhancement Facility 2 - Bit 148
if (facilities[2] & (one << (63 - (148 - 128))) &&
supportsCPUFeature("vx")) {
supported_ |= (1u << VECTOR_ENHANCE_FACILITY_2);
}
// Test for Miscellaneous Instruction Extension Facility - Bit 58
if (facilities[0] & (1lu << (63 - 58))) {
supported_ |= (1u << MISC_INSTR_EXT2);
......
......@@ -1146,6 +1146,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
S390_VRR_E_OPCODE_LIST(DECLARE_VRR_E_INSTRUCTIONS)
#undef DECLARE_VRR_E_INSTRUCTIONS
#define DECLARE_VRR_F_INSTRUCTIONS(name, opcode_name, opcode_value) \
void name(DoubleRegister v1, Register r1, Register r2) { \
uint64_t code = (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
(static_cast<uint64_t>(v1.code())) * B36 | \
(static_cast<uint64_t>(r1.code())) * B32 | \
(static_cast<uint64_t>(r2.code())) * B28 | \
(static_cast<uint64_t>(0)) * B8 | \
(static_cast<uint64_t>(opcode_value & 0x00FF)); \
emit6bytes(code); \
}
S390_VRR_F_OPCODE_LIST(DECLARE_VRR_F_INSTRUCTIONS)
#undef DECLARE_VRR_E_INSTRUCTIONS
#define DECLARE_VRX_INSTRUCTIONS(name, opcode_name, opcode_value) \
void name(DoubleRegister v1, const MemOperand& opnd, Condition m3) { \
uint64_t code = \
......
......@@ -1563,7 +1563,10 @@ using SixByteInstr = uint64_t;
V(vsteh, VSTEH, 0xE709) /* type = VRX VECTOR STORE ELEMENT (16) */ \
V(vsteg, VSTEG, 0xE70A) /* type = VRX VECTOR STORE ELEMENT (64) */ \
V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
V(vst, VST, 0xE70E) /* type = VRX VECTOR STORE */
V(vst, VST, 0xE70E) /* type = VRX VECTOR STORE */ \
V(vlbr, VLBR, 0xE606) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENTS */ \
V(vstbr, VSTBR, 0xE60E) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENTS \
*/
#define S390_RIE_G_OPCODE_LIST(V) \
V(lochi, LOCHI, \
......
......@@ -2517,6 +2517,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_LoadReverse64RR:
__ lrvgr(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_LoadReverseSimd128RR:
__ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, 0), Condition(3));
__ vlgv(r1, i.InputSimd128Register(0), MemOperand(r0, 1), Condition(3));
__ lrvgr(r0, r0);
__ lrvgr(r1, r1);
__ vlvg(i.OutputSimd128Register(), r0, MemOperand(r0, 1), Condition(3));
__ vlvg(i.OutputSimd128Register(), r1, MemOperand(r0, 0), Condition(3));
break;
case kS390_LoadReverseSimd128: {
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode);
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
__ vlbr(i.OutputSimd128Register(), operand, Condition(4));
} else {
__ lrvg(r0, operand);
__ lrvg(r1, MemOperand(operand.rx(), operand.rb(),
operand.offset() + kBitsPerByte));
__ vlvgp(i.OutputSimd128Register(), r1, r0);
}
break;
}
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
EmitWordLoadPoisoningIfNeeded(this, instr, i);
......@@ -2558,6 +2579,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_StoreReverse64:
ASSEMBLE_STORE_INTEGER(strvg);
break;
case kS390_StoreReverseSimd128: {
size_t index = 0;
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode, &index);
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
__ vstbr(i.InputSimd128Register(index), operand, Condition(4));
} else {
__ vlgv(r0, i.InputSimd128Register(index), MemOperand(r0, 1),
Condition(3));
__ vlgv(r1, i.InputSimd128Register(index), MemOperand(r0, 0),
Condition(3));
__ strvg(r0, operand);
__ strvg(r1, MemOperand(operand.rx(), operand.rb(),
operand.offset() + kBitsPerByte));
}
break;
}
case kS390_StoreFloat32:
ASSEMBLE_STORE_FLOAT32();
break;
......
......@@ -148,6 +148,8 @@ namespace compiler {
V(S390_LoadReverse16RR) \
V(S390_LoadReverse32RR) \
V(S390_LoadReverse64RR) \
V(S390_LoadReverseSimd128RR) \
V(S390_LoadReverseSimd128) \
V(S390_LoadReverse16) \
V(S390_LoadReverse32) \
V(S390_LoadReverse64) \
......@@ -161,6 +163,7 @@ namespace compiler {
V(S390_StoreReverse16) \
V(S390_StoreReverse32) \
V(S390_StoreReverse64) \
V(S390_StoreReverseSimd128) \
V(S390_StoreFloat32) \
V(S390_StoreDouble) \
V(S390_DecompressSigned) \
......
......@@ -134,6 +134,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadReverse16RR:
case kS390_LoadReverse32RR:
case kS390_LoadReverse64RR:
case kS390_LoadReverseSimd128RR:
case kS390_LoadReverseSimd128:
case kS390_LoadAndTestWord32:
case kS390_LoadAndTestWord64:
case kS390_LoadAndTestFloat32:
......@@ -165,6 +167,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_StoreWord16:
case kS390_StoreWord32:
case kS390_StoreWord64:
case kS390_StoreReverseSimd128:
case kS390_StoreReverse16:
case kS390_StoreReverse32:
case kS390_StoreReverse64:
......
......@@ -1171,9 +1171,22 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
}
void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
// TODO(miladfar): Implement the s390 selector for reversing SIMD bytes.
// Check if the input node is a Load and do a Load Reverse at once.
UNIMPLEMENTED();
S390OperandGenerator g(this);
NodeMatcher input(node->InputAt(0));
if (CanCover(node, input.node()) && input.IsLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
if (load_rep.representation() == MachineRepresentation::kSimd128) {
Node* base = input.node()->InputAt(0);
Node* offset = input.node()->InputAt(1);
Emit(kS390_LoadReverseSimd128 | AddressingModeField::encode(kMode_MRR),
// TODO(miladfar): one of the base and offset can be imm.
g.DefineAsRegister(node), g.UseRegister(base),
g.UseRegister(offset));
return;
}
}
Emit(kS390_LoadReverseSimd128RR, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
template <class Matcher, ArchOpcode neg_opcode>
......
......@@ -827,6 +827,13 @@ bool Decoder::DecodeGeneric(Instruction* instr) {
S390_VRR_E_OPCODE_LIST(DECODE_VRR_E_INSTRUCTIONS)
#undef DECODE_VRR_E_INSTRUCTIONS
#define DECODE_VRR_F_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'f1,'r1,'r2"); \
break;
S390_VRR_F_OPCODE_LIST(DECODE_VRR_F_INSTRUCTIONS)
#undef DECODE_VRR_F_INSTRUCTIONS
#define DECODE_VRX_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'f1,'d1('r2d,'r3),'m4"); \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment