Commit db436925 authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

S390[liftoff]: Implement simd store lane ops

Change-Id: Id5295f5afe3bc850f2e8726d696f42fd26f07a51
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3470565Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#79152}
parent 8f3c3419
......@@ -6140,15 +6140,16 @@ LOAD_LANE_LIST(LOAD_LANE)
V(16, vstebrh, StoreU16LE, 1) \
V(8, vsteb, StoreU8, 0)
#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
const MemOperand& mem, int lane) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
vector_instr(src, mem, Condition(lane)); \
return; \
} \
vlgv(r1, src, MemOperand(r0, lane), Condition(condition)); \
scalar_instr(r1, mem); \
#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
const MemOperand& mem, int lane, \
Register scratch) { \
if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
vector_instr(src, mem, Condition(lane)); \
return; \
} \
vlgv(scratch, src, MemOperand(r0, lane), Condition(condition)); \
scalar_instr(scratch, mem); \
}
STORE_LANE_LIST(STORE_LANE)
#undef STORE_LANE
......
......@@ -419,10 +419,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch);
void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane,
Register scratch);
void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane);
void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane);
void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane);
void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane);
void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane,
Register scratch);
void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane,
Register scratch);
void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane,
Register scratch);
void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane,
Register scratch);
// Load And Test
void LoadAndTest32(Register dst, Register src);
......
......@@ -3114,7 +3114,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 2; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
Simd128Register src = i.InputSimd128Register(0); \
__ StoreLane##type##LE(src, operand, lane);
__ StoreLane##type##LE(src, operand, lane, kScratchReg);
case kS390_S128Store8Lane: {
STORE_LANE(8, 15 - i.InputUint8(1));
break;
......
......@@ -2612,7 +2612,30 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
uint32_t* protected_store_pc) {
bailout(kSimd, "store lane");
if (!is_int20(offset_imm)) {
mov(ip, Operand(offset_imm));
if (offset != no_reg) {
AddS64(ip, offset);
}
offset = ip;
offset_imm = 0;
}
MemOperand dst_op =
MemOperand(dst, offset == no_reg ? r0 : offset, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
MachineRepresentation rep = type.mem_rep();
if (rep == MachineRepresentation::kWord8) {
StoreLane8LE(src.fp(), dst_op, 15 - lane, r1);
} else if (rep == MachineRepresentation::kWord16) {
StoreLane16LE(src.fp(), dst_op, 7 - lane, r1);
} else if (rep == MachineRepresentation::kWord32) {
StoreLane32LE(src.fp(), dst_op, 3 - lane, r1);
} else {
DCHECK_EQ(MachineRepresentation::kWord64, rep);
StoreLane64LE(src.fp(), dst_op, 1 - lane, r1);
}
}
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment