Commit 327cf664 authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

S390 [lifotff]: Implement simd splat ops

Shared ops between TurboFan and Liftoff are moved into
the macro-assembler.

Change-Id: I03cd3af10074b6b4666a7d2a13e652629576f76f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3035764Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Milad Fa <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/master@{#75768}
parent 17eea527
......@@ -5086,6 +5086,35 @@ void TurboAssembler::AtomicExchangeU16(Register addr, Register value,
bind(&done);
}
// Simd Support.
void TurboAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) {
vrep(dst, src, Operand(0), Condition(3));
}
void TurboAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) {
vrep(dst, src, Operand(0), Condition(2));
}
void TurboAssembler::I64x2Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(3));
vrep(dst, dst, Operand(0), Condition(3));
}
void TurboAssembler::I32x4Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(2));
vrep(dst, dst, Operand(0), Condition(2));
}
void TurboAssembler::I16x8Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(1));
vrep(dst, dst, Operand(0), Condition(1));
}
void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) {
vlvg(dst, src, MemOperand(r0, 0), Condition(0));
vrep(dst, dst, Operand(0), Condition(0));
}
} // namespace internal
} // namespace v8
......
......@@ -1030,6 +1030,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
// ---------------------------------------------------------------------------
// Simd Support.
void F64x2Splat(Simd128Register dst, Simd128Register src);
void F32x4Splat(Simd128Register dst, Simd128Register src);
void I64x2Splat(Simd128Register dst, Register src);
void I32x4Splat(Simd128Register dst, Register src);
void I16x8Splat(Simd128Register dst, Register src);
void I8x16Splat(Simd128Register dst, Register src);
// ---------------------------------------------------------------------------
// Pointer compression Support
......
......@@ -2481,41 +2481,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
// vector replicate element
case kS390_F64x2Splat: {
__ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
Condition(3));
break;
}
case kS390_F32x4Splat: {
__ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
Condition(2));
break;
}
case kS390_I64x2Splat: {
Simd128Register dst = i.OutputSimd128Register();
__ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(3));
__ vrep(dst, dst, Operand(0), Condition(3));
break;
}
case kS390_I32x4Splat: {
Simd128Register dst = i.OutputSimd128Register();
__ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(2));
__ vrep(dst, dst, Operand(0), Condition(2));
break;
}
case kS390_I16x8Splat: {
Simd128Register dst = i.OutputSimd128Register();
__ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(1));
__ vrep(dst, dst, Operand(0), Condition(1));
break;
}
case kS390_I8x16Splat: {
Simd128Register dst = i.OutputSimd128Register();
__ vlvg(dst, i.InputRegister(0), MemOperand(r0, 0), Condition(0));
__ vrep(dst, dst, Operand(0), Condition(0));
break;
// Simd Support.
#define SIMD_UNOP_LIST(V) \
V(F64x2Splat, F64x2Splat, Simd128Register, DoubleRegister) \
V(F32x4Splat, F32x4Splat, Simd128Register, DoubleRegister) \
V(I64x2Splat, I64x2Splat, Simd128Register, Register) \
V(I32x4Splat, I32x4Splat, Simd128Register, Register) \
V(I16x8Splat, I16x8Splat, Simd128Register, Register) \
V(I8x16Splat, I8x16Splat, Simd128Register, Register)
#define EMIT_SIMD_UNOP(name, op, dtype, stype) \
case kS390_##name: { \
__ op(i.Output##dtype(), i.Input##stype(0)); \
break; \
}
SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
#undef EMIT_SIMD_UNOP
#undef SIMD_UNOP_LIST
// vector extract element
case kS390_F64x2ExtractLane: {
__ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
......
......@@ -2142,6 +2142,23 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
#define SIMD_UNOP_LIST(V) \
V(f64x2_splat, F64x2Splat, fp, fp) \
V(f32x4_splat, F32x4Splat, fp, fp) \
V(i64x2_splat, I64x2Splat, fp, gp) \
V(i32x4_splat, I32x4Splat, fp, gp) \
V(i16x8_splat, I16x8Splat, fp, gp) \
V(i8x16_splat, I8x16Splat, fp, gp)
#define EMIT_SIMD_UNOP(name, op, dtype, stype) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
LiftoffRegister src) { \
op(dst.dtype(), src.stype()); \
}
SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
#undef EMIT_SIMD_UNOP
#undef SIMD_UNOP_LIST
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type,
......@@ -2170,11 +2187,6 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16_swizzle");
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2splat");
}
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
......@@ -2282,11 +2294,6 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
bailout(kSimd, "f64x2.promote_low_f32x4");
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
}
void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
......@@ -2379,11 +2386,6 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "pmax unimplemented");
}
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i64x2splat");
}
void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
......@@ -2503,11 +2505,6 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
bailout(kSimd, "i64x2_extmul_high_i32x4_u unsupported");
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i32x4_splat");
}
void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
......@@ -2647,11 +2644,6 @@ void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
bailout(kSimd, "i32x4_extmul_high_i16x8_u unsupported");
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
}
void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
......@@ -2834,11 +2826,6 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
bailout(kSimd, "i8x16.popcnt");
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16splat");
}
void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment