Commit 52f07582 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Implement i64 division and remainder

This adds support for i64.div_s, i64.div_u, i64.rem_s, and i64.rem_u.

R=ahaas@chromium.org

Bug: v8:6600
Change-Id: I37e564684b278c8d2f664a859851c67f4bd83190
Reviewed-on: https://chromium-review.googlesource.com/1027612Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52785}
parent 7f78e75a
......@@ -216,6 +216,31 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
return false;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
return false;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
return false;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
return false;
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
......
......@@ -349,6 +349,35 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
BAILOUT("i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_remu");
return true;
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
......
......@@ -770,6 +770,31 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
if (dst != dst_tmp) Move(dst, dst_tmp, kWasmI64);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
return false;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
return false;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
return false;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
return false;
}
namespace liftoff {
inline bool PairContains(LiftoffRegister pair, Register reg) {
return pair.low_gp() == reg || pair.high_gp() == reg;
......
......@@ -419,6 +419,15 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, Label* trap_div_by_zero,
Label* trap_div_unrepresentable);
inline bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, Label* trap_div_by_zero);
inline bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, Label* trap_rem_by_zero);
inline bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, Label* trap_rem_by_zero);
inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
......
......@@ -452,7 +452,8 @@ class LiftoffCompiler {
DCHECK(!is_stack_check);
__ CallTrapCallbackForTesting();
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->StackParameterCount()));
return;
}
......@@ -751,6 +752,30 @@ class LiftoffCompiler {
__ PushRegister(result_type, dst);
}
void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, ExternalReference ext_ref,
Label* trap_by_zero,
Label* trap_unrepresentable = nullptr) {
// Cannot emit native instructions, build C call.
LiftoffRegister ret =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister tmp =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
LiftoffRegister arg_regs[] = {lhs, rhs};
LiftoffRegister result_regs[] = {ret, dst};
ValueType sig_types[] = {kWasmI32, kWasmI64, kWasmI64};
// <i64, i64> -> i32 (with i64 output argument)
FunctionSig sig(1, 2, sig_types);
GenerateCCall(result_regs, &sig, kWasmI64, arg_regs, ext_ref);
__ LoadConstant(tmp, WasmValue(int32_t{0}));
__ emit_cond_jump(kEqual, trap_by_zero, kWasmI32, ret.gp(), tmp.gp());
if (trap_unrepresentable) {
__ LoadConstant(tmp, WasmValue(int32_t{-1}));
__ emit_cond_jump(kEqual, trap_unrepresentable, kWasmI32, ret.gp(),
tmp.gp());
}
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
#define CASE_I32_BINOP(opcode, fn) \
......@@ -923,6 +948,62 @@ class LiftoffCompiler {
__ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
});
break;
case WasmOpcode::kExprI64DivS:
EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
WasmCodePosition position = decoder->position();
AddOutOfLineTrap(position, Builtins::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
// the first one, thus get both pointers afterwards.
AddOutOfLineTrap(position,
Builtins::kThrowWasmTrapDivUnrepresentable);
Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero,
div_unrepresentable)) {
ExternalReference ext_ref = ExternalReference::wasm_int64_div();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero,
div_unrepresentable);
}
});
break;
case WasmOpcode::kExprI64DivU:
EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* div_by_zero = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapDivByZero);
if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_uint64_div();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
}
});
break;
case WasmOpcode::kExprI64RemS:
EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapRemByZero);
if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
}
});
break;
case WasmOpcode::kExprI64RemU:
EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapRemByZero);
if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_uint64_mod();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
}
});
break;
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
......
......@@ -632,6 +632,31 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
kScratchReg, kScratchReg2);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
return false;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
return false;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
return false;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
return false;
}
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
......
......@@ -552,6 +552,35 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
BAILOUT("i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_remu");
return true;
}
#define I64_BINOP(name, instruction) \
void LiftoffAssembler::emit_i64_##name( \
LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
......
......@@ -221,6 +221,35 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
BAILOUT("i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_remu");
return true;
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
......
......@@ -221,6 +221,35 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
BAILOUT("i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
BAILOUT("i64_remu");
return true;
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
......
......@@ -445,36 +445,49 @@ void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
namespace liftoff {
enum class DivOrRem : uint8_t { kDiv, kRem };
template <bool is_signed, DivOrRem div_or_rem>
void EmitInt32DivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
Register rhs, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
template <typename type, DivOrRem div_or_rem>
void EmitIntDivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
Register rhs, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
constexpr bool needs_unrepresentable_check =
is_signed && div_or_rem == DivOrRem::kDiv;
std::is_signed<type>::value && div_or_rem == DivOrRem::kDiv;
constexpr bool special_case_minus_1 =
is_signed && div_or_rem == DivOrRem::kRem;
std::is_signed<type>::value && div_or_rem == DivOrRem::kRem;
DCHECK_EQ(needs_unrepresentable_check, trap_div_unrepresentable != nullptr);
#define iop(name, ...) \
do { \
if (sizeof(type) == 4) { \
assm->name##l(__VA_ARGS__); \
} else { \
assm->name##q(__VA_ARGS__); \
} \
} while (false)
// Check for division by zero.
assm->testl(rhs, rhs);
iop(test, rhs, rhs);
assm->j(zero, trap_div_by_zero);
Label done;
if (needs_unrepresentable_check) {
// Check for {kMinInt / -1}. This is unrepresentable.
Label do_div;
assm->cmpl(rhs, Immediate(-1));
iop(cmp, rhs, Immediate(-1));
assm->j(not_equal, &do_div);
assm->cmpl(lhs, Immediate(kMinInt));
assm->j(equal, trap_div_unrepresentable);
// {lhs} is min int if {lhs - 1} overflows.
iop(cmp, lhs, Immediate(1));
assm->j(overflow, trap_div_unrepresentable);
assm->bind(&do_div);
} else if (special_case_minus_1) {
// {lhs % -1} is always 0 (needs to be special cased because {kMinInt / -1}
// cannot be computed).
Label do_rem;
assm->cmpl(rhs, Immediate(-1));
iop(cmp, rhs, Immediate(-1));
assm->j(not_equal, &do_rem);
assm->xorl(dst, dst);
// clang-format off
// (conflicts with presubmit checks because it is confused about "xor")
iop(xor, dst, dst);
// clang-format on
assm->jmp(&done);
assm->bind(&do_rem);
}
......@@ -486,24 +499,32 @@ void EmitInt32DivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
if (rhs == rax || rhs == rdx) {
LiftoffRegList unavailable = LiftoffRegList::ForRegs(rax, rdx, lhs);
Register tmp = assm->GetUnusedRegister(kGpReg, unavailable).gp();
assm->movl(tmp, rhs);
iop(mov, tmp, rhs);
rhs = tmp;
}
// Now move {lhs} into {eax}, then zero-extend or sign-extend into {edx}, then
// do the division.
if (lhs != rax) assm->movl(rax, lhs);
if (is_signed) {
if (lhs != rax) iop(mov, rax, lhs);
if (std::is_same<int32_t, type>::value) { // i32
assm->cdq();
assm->idivl(rhs);
} else {
} else if (std::is_same<uint32_t, type>::value) { // u32
assm->xorl(rdx, rdx);
assm->divl(rhs);
} else if (std::is_same<int64_t, type>::value) { // i64
assm->cqo();
assm->idivq(rhs);
} else { // u64
assm->xorq(rdx, rdx);
assm->divq(rhs);
}
// Move back the result (in {eax} or {edx}) into the {dst} register.
constexpr Register kResultReg = div_or_rem == DivOrRem::kDiv ? rax : rdx;
if (dst != kResultReg) assm->movl(dst, kResultReg);
if (dst != kResultReg) {
iop(mov, dst, kResultReg);
}
if (special_case_minus_1) assm->bind(&done);
}
} // namespace liftoff
......@@ -511,25 +532,25 @@ void EmitInt32DivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
liftoff::EmitInt32DivOrRem<true, liftoff::DivOrRem::kDiv>(
liftoff::EmitIntDivOrRem<int32_t, liftoff::DivOrRem::kDiv>(
this, dst, lhs, rhs, trap_div_by_zero, trap_div_unrepresentable);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
liftoff::EmitInt32DivOrRem<false, liftoff::DivOrRem::kDiv>(
liftoff::EmitIntDivOrRem<uint32_t, liftoff::DivOrRem::kDiv>(
this, dst, lhs, rhs, trap_div_by_zero, nullptr);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
liftoff::EmitInt32DivOrRem<true, liftoff::DivOrRem::kRem>(
liftoff::EmitIntDivOrRem<int32_t, liftoff::DivOrRem::kRem>(
this, dst, lhs, rhs, trap_div_by_zero, nullptr);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
liftoff::EmitInt32DivOrRem<false, liftoff::DivOrRem::kRem>(
liftoff::EmitIntDivOrRem<uint32_t, liftoff::DivOrRem::kRem>(
this, dst, lhs, rhs, trap_div_by_zero, nullptr);
}
......@@ -669,6 +690,40 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
this, dst.gp(), lhs.gp(), rhs.gp());
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
liftoff::EmitIntDivOrRem<int64_t, liftoff::DivOrRem::kDiv>(
this, dst.gp(), lhs.gp(), rhs.gp(), trap_div_by_zero,
trap_div_unrepresentable);
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
liftoff::EmitIntDivOrRem<uint64_t, liftoff::DivOrRem::kDiv>(
this, dst.gp(), lhs.gp(), rhs.gp(), trap_div_by_zero, nullptr);
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
liftoff::EmitIntDivOrRem<int64_t, liftoff::DivOrRem::kRem>(
this, dst.gp(), lhs.gp(), rhs.gp(), trap_div_by_zero, nullptr);
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
liftoff::EmitIntDivOrRem<uint64_t, liftoff::DivOrRem::kRem>(
this, dst.gp(), lhs.gp(), rhs.gp(), trap_div_by_zero, nullptr);
return true;
}
void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::andq, &Assembler::movq>(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment