Commit 49c95bd9 authored by Liu Yu's avatar Liu Yu Committed by V8 LUCI CQ

[liftoff][loong64][mips64] Implement Atomic ops and ConvertSat ops

Change-Id: I4378e4f99c6b034f7b29782218896d5485178109
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3514656
Auto-Submit: Yu Liu <liuyu@loongson.cn>
Reviewed-by: 's avatarZhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#79452}
parent a2ea2cd0
......@@ -443,9 +443,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
__ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
__ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
__ ExtractBits(i.TempRegister(2), i.InputRegister(2), zero_reg, size, \
sign_extend); \
__ BranchShort(&exit, ne, i.InputRegister(2), \
__ BranchShort(&exit, ne, i.TempRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
size); \
......
......@@ -457,9 +457,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
__ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
__ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
__ ExtractBits(i.TempRegister(2), i.InputRegister(2), zero_reg, size, \
sign_extend); \
__ BranchShort(&exit, ne, i.InputRegister(2), \
__ BranchShort(&exit, ne, i.TempRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
size); \
......
......@@ -4267,12 +4267,28 @@ void Simulator::DecodeTypeOp17() {
case FSCALEB_D:
printf("Sim UNIMPLEMENTED: FSCALEB_D\n");
UNIMPLEMENTED();
case FCOPYSIGN_S:
printf("Sim UNIMPLEMENTED: FCOPYSIGN_S\n");
UNIMPLEMENTED();
case FCOPYSIGN_D:
printf("Sim UNIMPLEMENTED: FCOPYSIGN_D\n");
UNIMPLEMENTED();
case FCOPYSIGN_S: {
printf_instr("FCOPYSIGN_S\t %s: %016f, %s, %016f, %s, %016f\n",
FPURegisters::Name(fd_reg()), fd_float(),
FPURegisters::Name(fj_reg()), fj_float(),
FPURegisters::Name(fk_reg()), fk_float());
SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
[](float lhs, float rhs) {
return std::copysign(lhs, rhs);
},
fj_float(), fk_float()));
} break;
case FCOPYSIGN_D: {
printf_instr("FCOPYSIGN_d\t %s: %016f, %s, %016f, %s, %016f\n",
FPURegisters::Name(fd_reg()), fd_double(),
FPURegisters::Name(fj_reg()), fj_double(),
FPURegisters::Name(fk_reg()), fk_double());
SetFPUDoubleResult(fd_reg(), FPUCanonalizeOperation(
[](double lhs, double rhs) {
return std::copysign(lhs, rhs);
},
fj_double(), fk_double()));
} break;
default:
UNREACHABLE();
}
......
......@@ -508,58 +508,323 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicLoad");
UseScratchRegisterScope temps(this);
MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
switch (type.value()) {
case LoadType::kI32Load8U:
case LoadType::kI64Load8U: {
Ld_bu(dst.gp(), src_op);
dbar(0);
return;
}
case LoadType::kI32Load16U:
case LoadType::kI64Load16U: {
Ld_hu(dst.gp(), src_op);
dbar(0);
return;
}
case LoadType::kI32Load: {
Ld_w(dst.gp(), src_op);
dbar(0);
return;
}
case LoadType::kI64Load32U: {
Ld_wu(dst.gp(), src_op);
dbar(0);
return;
}
case LoadType::kI64Load: {
Ld_d(dst.gp(), src_op);
dbar(0);
return;
}
default:
UNREACHABLE();
}
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
UseScratchRegisterScope temps(this);
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8: {
dbar(0);
St_b(src.gp(), dst_op);
return;
}
case StoreType::kI64Store16:
case StoreType::kI32Store16: {
dbar(0);
St_h(src.gp(), dst_op);
return;
}
case StoreType::kI64Store32:
case StoreType::kI32Store: {
dbar(0);
St_w(src.gp(), dst_op);
return;
}
case StoreType::kI64Store: {
dbar(0);
St_d(src.gp(), dst_op);
return;
}
default:
UNREACHABLE();
}
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, size, \
bin_instr, aligned) \
do { \
Label binop; \
andi(temp3, temp0, aligned); \
Sub_d(temp0, temp0, Operand(temp3)); \
slli_w(temp3, temp3, 3); \
dbar(0); \
bind(&binop); \
load_linked(temp1, MemOperand(temp0, 0)); \
ExtractBits(result.gp(), temp1, temp3, size, false); \
bin_instr(temp2, result.gp(), Operand(value.gp())); \
InsertBits(temp1, temp2, temp3, size); \
store_conditional(temp1, MemOperand(temp0, 0)); \
BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
dbar(0); \
} while (0)
#define ATOMIC_BINOP_CASE(name, inst32, inst64, opcode) \
void LiftoffAssembler::Atomic##name( \
Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
LiftoffRegister value, LiftoffRegister result, StoreType type) { \
LiftoffRegList pinned = \
LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result); \
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
MemOperand dst_op = \
liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm); \
Add_d(temp0, dst_op.base(), dst_op.offset()); \
switch (type.value()) { \
case StoreType::kI64Store8: \
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 8, inst64, 7); \
break; \
case StoreType::kI32Store8: \
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 8, inst32, 3); \
break; \
case StoreType::kI64Store16: \
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 16, inst64, 7); \
break; \
case StoreType::kI32Store16: \
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 16, inst32, 3); \
break; \
case StoreType::kI64Store32: \
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 32, inst64, 7); \
break; \
case StoreType::kI32Store: \
am##opcode##_db_w(result.gp(), value.gp(), temp0); \
break; \
case StoreType::kI64Store: \
am##opcode##_db_d(result.gp(), value.gp(), temp0); \
break; \
default: \
UNREACHABLE(); \
} \
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
ATOMIC_BINOP_CASE(Add, Add_w, Add_d, add)
ATOMIC_BINOP_CASE(And, And, And, and)
ATOMIC_BINOP_CASE(Or, Or, Or, or)
ATOMIC_BINOP_CASE(Xor, Xor, Xor, xor)
#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
do { \
Label binop; \
dbar(0); \
bind(&binop); \
load_linked(result.gp(), MemOperand(temp0, 0)); \
bin_instr(temp1, result.gp(), Operand(value.gp())); \
store_conditional(temp1, MemOperand(temp0, 0)); \
BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
dbar(0); \
} while (0)
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Add_d(temp0, dst_op.base(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 8, Sub_d, 7);
break;
case StoreType::kI32Store8:
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 8, Sub_w, 3);
break;
case StoreType::kI64Store16:
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 16, Sub_d, 7);
break;
case StoreType::kI32Store16:
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 16, Sub_w, 3);
break;
case StoreType::kI64Store32:
ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 32, Sub_d, 7);
break;
case StoreType::kI32Store:
ASSEMBLE_ATOMIC_BINOP(Ll_w, Sc_w, Sub_w);
break;
case StoreType::kI64Store:
ASSEMBLE_ATOMIC_BINOP(Ll_d, Sc_d, Sub_d);
break;
default:
UNREACHABLE();
}
}
#undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_ATOMIC_BINOP_EXT
#undef ATOMIC_BINOP_CASE
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(load_linked, store_conditional, \
size, aligned) \
do { \
Label exchange; \
andi(temp1, temp0, aligned); \
Sub_d(temp0, temp0, Operand(temp1)); \
slli_w(temp1, temp1, 3); \
dbar(0); \
bind(&exchange); \
load_linked(temp2, MemOperand(temp0, 0)); \
ExtractBits(result.gp(), temp2, temp1, size, false); \
InsertBits(temp2, value.gp(), temp1, size); \
store_conditional(temp2, MemOperand(temp0, 0)); \
BranchShort(&exchange, eq, temp2, Operand(zero_reg)); \
dbar(0); \
} while (0)
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Add_d(temp0, dst_op.base(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 8, 7);
break;
case StoreType::kI32Store8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, 8, 3);
break;
case StoreType::kI64Store16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 16, 7);
break;
case StoreType::kI32Store16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, 16, 3);
break;
case StoreType::kI64Store32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 32, 7);
break;
case StoreType::kI32Store:
amswap_db_w(result.gp(), value.gp(), temp0);
break;
case StoreType::kI64Store:
amswap_db_d(result.gp(), value.gp(), temp0);
break;
default:
UNREACHABLE();
}
}
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
store_conditional) \
do { \
Label compareExchange; \
Label exit; \
dbar(0); \
bind(&compareExchange); \
load_linked(result.gp(), MemOperand(temp0, 0)); \
BranchShort(&exit, ne, expected.gp(), Operand(result.gp())); \
mov(temp2, new_value.gp()); \
store_conditional(temp2, MemOperand(temp0, 0)); \
BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
bind(&exit); \
dbar(0); \
} while (0)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
load_linked, store_conditional, size, aligned) \
do { \
Label compareExchange; \
Label exit; \
andi(temp1, temp0, aligned); \
Sub_d(temp0, temp0, Operand(temp1)); \
slli_w(temp1, temp1, 3); \
dbar(0); \
bind(&compareExchange); \
load_linked(temp2, MemOperand(temp0, 0)); \
ExtractBits(result.gp(), temp2, temp1, size, false); \
ExtractBits(temp2, expected.gp(), zero_reg, size, false); \
BranchShort(&exit, ne, temp2, Operand(result.gp())); \
InsertBits(temp2, new_value.gp(), temp1, size); \
store_conditional(temp2, MemOperand(temp0, 0)); \
BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
bind(&exit); \
dbar(0); \
} while (0)
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
LiftoffRegList pinned = LiftoffRegList::ForRegs(dst_addr, offset_reg,
expected, new_value, result);
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Add_d(temp0, dst_op.base(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 8, 7);
break;
case StoreType::kI32Store8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, 8, 3);
break;
case StoreType::kI64Store16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 16, 7);
break;
case StoreType::kI32Store16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, 16, 3);
break;
case StoreType::kI64Store32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, 32, 7);
break;
case StoreType::kI32Store:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_w, Sc_w);
break;
case StoreType::kI64Store:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_d, Sc_d);
break;
default:
UNREACHABLE();
}
}
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
void LiftoffAssembler::AtomicFence() { dbar(0); }
......@@ -977,7 +1242,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
bailout(kComplexOperation, "f32_copysign");
fcopysign_s(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
......@@ -1004,7 +1269,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
bailout(kComplexOperation, "f64_copysign");
fcopysign_d(dst, lhs, rhs);
}
#define FP_BINOP(name, instruction) \
......@@ -1240,55 +1505,87 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
movgr2fr_d(dst.fp(), src.gp());
return true;
case kExprI32SConvertSatF32:
bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
ftintrz_w_s(kScratchDoubleReg, src.fp());
movfr2gr_s(dst.gp(), kScratchDoubleReg);
return true;
case kExprI32UConvertSatF32:
bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
case kExprI32UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
bind(&isnan_or_lessthan_or_equal_zero);
return true;
}
case kExprI32SConvertSatF64:
bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
ftintrz_w_d(kScratchDoubleReg, src.fp());
movfr2gr_s(dst.gp(), kScratchDoubleReg);
return true;
case kExprI32UConvertSatF64:
bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
case kExprI32UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
bind(&isnan_or_lessthan_or_equal_zero);
return true;
}
case kExprI64SConvertSatF32:
bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
ftintrz_l_s(kScratchDoubleReg, src.fp());
movfr2gr_d(dst.gp(), kScratchDoubleReg);
return true;
case kExprI64UConvertSatF32:
bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
case kExprI64UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg);
bind(&isnan_or_lessthan_or_equal_zero);
return true;
}
case kExprI64SConvertSatF64:
bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
ftintrz_l_d(kScratchDoubleReg, src.fp());
movfr2gr_d(dst.gp(), kScratchDoubleReg);
return true;
case kExprI64UConvertSatF64:
bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
case kExprI64UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(src.fp(), kScratchDoubleReg, CULE);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg);
bind(&isnan_or_lessthan_or_equal_zero);
return true;
}
default:
return false;
}
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
bailout(kComplexOperation, "i32_signextend_i8");
ext_w_b(dst, src);
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
bailout(kComplexOperation, "i32_signextend_i16");
ext_w_h(dst, src);
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kComplexOperation, "i64_signextend_i8");
ext_w_b(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kComplexOperation, "i64_signextend_i16");
ext_w_h(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kComplexOperation, "i64_signextend_i32");
slli_w(dst.gp(), src.gp(), 0);
}
void LiftoffAssembler::emit_jump(Label* label) {
......
......@@ -635,58 +635,299 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicLoad");
UseScratchRegisterScope temps(this);
MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
switch (type.value()) {
case LoadType::kI32Load8U:
case LoadType::kI64Load8U: {
Lbu(dst.gp(), src_op);
sync();
return;
}
case LoadType::kI32Load16U:
case LoadType::kI64Load16U: {
Lhu(dst.gp(), src_op);
sync();
return;
}
case LoadType::kI32Load: {
Lw(dst.gp(), src_op);
sync();
return;
}
case LoadType::kI64Load32U: {
Lwu(dst.gp(), src_op);
sync();
return;
}
case LoadType::kI64Load: {
Ld(dst.gp(), src_op);
sync();
return;
}
default:
UNREACHABLE();
}
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
UseScratchRegisterScope temps(this);
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8: {
sync();
Sb(src.gp(), dst_op);
return;
}
case StoreType::kI64Store16:
case StoreType::kI32Store16: {
sync();
Sh(src.gp(), dst_op);
return;
}
case StoreType::kI64Store32:
case StoreType::kI32Store: {
sync();
Sw(src.gp(), dst_op);
return;
}
case StoreType::kI64Store: {
sync();
Sd(src.gp(), dst_op);
return;
}
default:
UNREACHABLE();
}
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
do { \
Label binop; \
sync(); \
bind(&binop); \
load_linked(result.gp(), MemOperand(temp0, 0)); \
bin_instr(temp1, result.gp(), Operand(value.gp())); \
store_conditional(temp1, MemOperand(temp0, 0)); \
BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
sync(); \
} while (0)
#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, size, \
bin_instr, aligned) \
do { \
Label binop; \
andi(temp3, temp0, aligned); \
Dsubu(temp0, temp0, Operand(temp3)); \
sll(temp3, temp3, 3); \
sync(); \
bind(&binop); \
load_linked(temp1, MemOperand(temp0, 0)); \
ExtractBits(result.gp(), temp1, temp3, size, false); \
bin_instr(temp2, result.gp(), value.gp()); \
InsertBits(temp1, temp2, temp3, size); \
store_conditional(temp1, MemOperand(temp0, 0)); \
BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
sync(); \
} while (0)
#define ATOMIC_BINOP_CASE(name, inst32, inst64) \
void LiftoffAssembler::Atomic##name( \
Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
LiftoffRegister value, LiftoffRegister result, StoreType type) { \
LiftoffRegList pinned = \
LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result); \
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
MemOperand dst_op = \
liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm); \
Daddu(temp0, dst_op.rm(), dst_op.offset()); \
switch (type.value()) { \
case StoreType::kI64Store8: \
ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 8, inst64, 7); \
break; \
case StoreType::kI32Store8: \
ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 8, inst32, 3); \
break; \
case StoreType::kI64Store16: \
ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 16, inst64, 7); \
break; \
case StoreType::kI32Store16: \
ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 16, inst32, 3); \
break; \
case StoreType::kI64Store32: \
ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 32, inst64, 7); \
break; \
case StoreType::kI32Store: \
ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
break; \
case StoreType::kI64Store: \
ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
break; \
default: \
UNREACHABLE(); \
} \
}
ATOMIC_BINOP_CASE(Add, Addu, Daddu)
ATOMIC_BINOP_CASE(Sub, Subu, Dsubu)
ATOMIC_BINOP_CASE(And, And, And)
ATOMIC_BINOP_CASE(Or, Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor, Xor)
#undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_ATOMIC_BINOP_EXT
#undef ATOMIC_BINOP_CASE
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
do { \
Label exchange; \
sync(); \
bind(&exchange); \
load_linked(result.gp(), MemOperand(temp0, 0)); \
mov(temp1, value.gp()); \
store_conditional(temp1, MemOperand(temp0, 0)); \
BranchShort(&exchange, eq, temp1, Operand(zero_reg)); \
sync(); \
} while (0)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(load_linked, store_conditional, \
size, aligned) \
do { \
Label exchange; \
andi(temp1, temp0, aligned); \
Dsubu(temp0, temp0, Operand(temp1)); \
sll(temp1, temp1, 3); \
sync(); \
bind(&exchange); \
load_linked(temp2, MemOperand(temp0, 0)); \
ExtractBits(result.gp(), temp2, temp1, size, false); \
InsertBits(temp2, value.gp(), temp1, size); \
store_conditional(temp2, MemOperand(temp0, 0)); \
BranchShort(&exchange, eq, temp2, Operand(zero_reg)); \
sync(); \
} while (0)
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Daddu(temp0, dst_op.rm(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 7);
break;
case StoreType::kI32Store8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 3);
break;
case StoreType::kI64Store16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 7);
break;
case StoreType::kI32Store16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 3);
break;
case StoreType::kI64Store32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, 32, 7);
break;
case StoreType::kI32Store:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
break;
case StoreType::kI64Store:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
default:
UNREACHABLE();
}
}
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
store_conditional) \
do { \
Label compareExchange; \
Label exit; \
sync(); \
bind(&compareExchange); \
load_linked(result.gp(), MemOperand(temp0, 0)); \
BranchShort(&exit, ne, expected.gp(), Operand(result.gp())); \
mov(temp2, new_value.gp()); \
store_conditional(temp2, MemOperand(temp0, 0)); \
BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
bind(&exit); \
sync(); \
} while (0)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
load_linked, store_conditional, size, aligned) \
do { \
Label compareExchange; \
Label exit; \
andi(temp1, temp0, aligned); \
Dsubu(temp0, temp0, Operand(temp1)); \
sll(temp1, temp1, 3); \
sync(); \
bind(&compareExchange); \
load_linked(temp2, MemOperand(temp0, 0)); \
ExtractBits(result.gp(), temp2, temp1, size, false); \
ExtractBits(temp2, expected.gp(), zero_reg, size, false); \
BranchShort(&exit, ne, temp2, Operand(result.gp())); \
InsertBits(temp2, new_value.gp(), temp1, size); \
store_conditional(temp2, MemOperand(temp0, 0)); \
BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
bind(&exit); \
sync(); \
} while (0)
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
LiftoffRegList pinned = LiftoffRegList::ForRegs(dst_addr, offset_reg,
expected, new_value, result);
Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Daddu(temp0, dst_op.rm(), dst_op.offset());
switch (type.value()) {
case StoreType::kI64Store8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 7);
break;
case StoreType::kI32Store8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 3);
break;
case StoreType::kI64Store16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 7);
break;
case StoreType::kI32Store16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 3);
break;
case StoreType::kI64Store32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, 32, 7);
break;
case StoreType::kI32Store:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
break;
case StoreType::kI64Store:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
default:
UNREACHABLE();
}
}
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
void LiftoffAssembler::AtomicFence() { sync(); }
......@@ -1106,7 +1347,26 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
bailout(kComplexOperation, "f32_copysign");
if (CpuFeatures::IsSupported(MIPS_SIMD)) {
DoubleRegister scratch = rhs;
if (dst == rhs) {
scratch = kScratchDoubleReg;
Move_d(scratch, rhs);
}
if (dst != lhs) {
Move_d(dst, lhs);
}
binsli_w(dst.toW(), scratch.toW(), 0);
} else {
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
Register scratch2 = temps.Acquire();
mfc1(scratch1, lhs);
mfc1(scratch2, rhs);
srl(scratch2, scratch2, 31);
Ins(scratch1, scratch2, 31, 1);
mtc1(scratch1, dst);
}
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
......@@ -1133,7 +1393,26 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
bailout(kComplexOperation, "f64_copysign");
if (CpuFeatures::IsSupported(MIPS_SIMD)) {
DoubleRegister scratch = rhs;
if (dst == rhs) {
scratch = kScratchDoubleReg;
Move_d(scratch, rhs);
}
if (dst != lhs) {
Move_d(dst, lhs);
}
binsli_d(dst.toW(), scratch.toW(), 0);
} else {
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
Register scratch2 = temps.Acquire();
dmfc1(scratch1, lhs);
dmfc1(scratch2, rhs);
dsrl32(scratch2, scratch2, 31);
Dins(scratch1, scratch2, 63, 1);
dmtc1(scratch1, dst);
}
}
#define FP_BINOP(name, instruction) \
......@@ -1368,56 +1647,157 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprF64ReinterpretI64:
dmtc1(src.gp(), dst.fp());
return true;
case kExprI32SConvertSatF32:
bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
case kExprI32SConvertSatF32: {
// Other arches use round to zero here, so we follow.
if (CpuFeatures::IsSupported(MIPS_SIMD)) {
trunc_w_s(kScratchDoubleReg, src.fp());
mfc1(dst.gp(), kScratchDoubleReg);
} else {
Label done;
mov(dst.gp(), zero_reg);
CompareIsNanF32(src.fp(), src.fp());
BranchTrueShortF(&done);
li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
TurboAssembler::Move(
kScratchDoubleReg,
static_cast<float>(std::numeric_limits<int32_t>::min()));
CompareF32(OLT, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&done);
trunc_w_s(kScratchDoubleReg, src.fp());
mfc1(dst.gp(), kScratchDoubleReg);
bind(&done);
}
return true;
case kExprI32UConvertSatF32:
bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
}
case kExprI32UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
bind(&isnan_or_lessthan_or_equal_zero);
return true;
case kExprI32SConvertSatF64:
bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
}
case kExprI32SConvertSatF64: {
if (CpuFeatures::IsSupported(MIPS_SIMD)) {
trunc_w_d(kScratchDoubleReg, src.fp());
mfc1(dst.gp(), kScratchDoubleReg);
} else {
Label done;
mov(dst.gp(), zero_reg);
CompareIsNanF64(src.fp(), src.fp());
BranchTrueShortF(&done);
li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
TurboAssembler::Move(
kScratchDoubleReg,
static_cast<double>(std::numeric_limits<int32_t>::min()));
CompareF64(OLT, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&done);
trunc_w_d(kScratchDoubleReg, src.fp());
mfc1(dst.gp(), kScratchDoubleReg);
bind(&done);
}
return true;
case kExprI32UConvertSatF64:
bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
}
case kExprI32UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
bind(&isnan_or_lessthan_or_equal_zero);
return true;
case kExprI64SConvertSatF32:
bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
}
case kExprI64SConvertSatF32: {
if (CpuFeatures::IsSupported(MIPS_SIMD)) {
trunc_l_s(kScratchDoubleReg, src.fp());
dmfc1(dst.gp(), kScratchDoubleReg);
} else {
Label done;
mov(dst.gp(), zero_reg);
CompareIsNanF32(src.fp(), src.fp());
BranchTrueShortF(&done);
li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
TurboAssembler::Move(
kScratchDoubleReg,
static_cast<float>(std::numeric_limits<int64_t>::min()));
CompareF32(OLT, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&done);
trunc_l_s(kScratchDoubleReg, src.fp());
dmfc1(dst.gp(), kScratchDoubleReg);
bind(&done);
}
return true;
case kExprI64UConvertSatF32:
bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
}
case kExprI64UConvertSatF32: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
CompareF32(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
bind(&isnan_or_lessthan_or_equal_zero);
return true;
case kExprI64SConvertSatF64:
bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
}
case kExprI64SConvertSatF64: {
if (CpuFeatures::IsSupported(MIPS_SIMD)) {
trunc_l_d(kScratchDoubleReg, src.fp());
dmfc1(dst.gp(), kScratchDoubleReg);
} else {
Label done;
mov(dst.gp(), zero_reg);
CompareIsNanF64(src.fp(), src.fp());
BranchTrueShortF(&done);
li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
TurboAssembler::Move(
kScratchDoubleReg,
static_cast<double>(std::numeric_limits<int64_t>::min()));
CompareF64(OLT, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&done);
trunc_l_d(kScratchDoubleReg, src.fp());
dmfc1(dst.gp(), kScratchDoubleReg);
bind(&done);
}
return true;
case kExprI64UConvertSatF64:
bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
}
case kExprI64UConvertSatF64: {
Label isnan_or_lessthan_or_equal_zero;
mov(dst.gp(), zero_reg);
TurboAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
CompareF64(ULE, src.fp(), kScratchDoubleReg);
BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
bind(&isnan_or_lessthan_or_equal_zero);
return true;
}
default:
return false;
}
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
bailout(kComplexOperation, "i32_signextend_i8");
seb(dst, src);
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
bailout(kComplexOperation, "i32_signextend_i16");
seh(dst, src);
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kComplexOperation, "i64_signextend_i8");
seb(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kComplexOperation, "i64_signextend_i16");
seh(dst.gp(), src.gp());
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kComplexOperation, "i64_signextend_i32");
sll(dst.gp(), src.gp(), 0);
}
void LiftoffAssembler::emit_jump(Label* label) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment