Commit bafb568b authored by jarin's avatar jarin Committed by Commit bot

[turbofan] Add Int64(Add|Sub)WithOverflow support.

Review URL: https://codereview.chromium.org/1544743004

Cr-Commit-Position: refs/heads/master@{#33039}
parent ac33eaba
......@@ -212,6 +212,26 @@ inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
}
// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed summation resulted in an overflow.
inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
uint64_t res = static_cast<uint64_t>(lhs) + static_cast<uint64_t>(rhs);
*val = bit_cast<int64_t>(res);
return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0;
}
// SignedSubOverflow64(lhs,rhs,val) performs a signed subtraction of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed subtraction resulted in an overflow.
inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
uint64_t res = static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs);
*val = bit_cast<int64_t>(res);
return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0;
}
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
// |rhs|, extracts the most significant 32 bits of the result, and returns
// those.
......
......@@ -657,8 +657,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Add:
if (FlagsModeField::decode(opcode) != kFlags_none) {
__ Adds(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
} else {
__ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
}
break;
case kArm64Add32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
......@@ -800,8 +805,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputOperand2_32(1));
break;
case kArm64Sub:
if (FlagsModeField::decode(opcode) != kFlags_none) {
__ Subs(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
} else {
__ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
}
break;
case kArm64Sub32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
......
......@@ -1847,6 +1847,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
kArithmeticImm, &cont);
case IrOpcode::kInt64AddWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
kArithmeticImm, &cont);
case IrOpcode::kInt64SubWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
kArithmeticImm, &cont);
default:
break;
}
......@@ -2034,6 +2042,28 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
}
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
&cont);
}
FlagsContinuation cont;
VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
}
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
&cont);
}
FlagsContinuation cont;
VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
}
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
......
......@@ -836,8 +836,12 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitUint32MulHigh(node);
case IrOpcode::kInt64Add:
return MarkAsWord64(node), VisitInt64Add(node);
case IrOpcode::kInt64AddWithOverflow:
return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
case IrOpcode::kInt64Sub:
return MarkAsWord64(node), VisitInt64Sub(node);
case IrOpcode::kInt64SubWithOverflow:
return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
case IrOpcode::kInt64Mul:
return MarkAsWord64(node), VisitInt64Mul(node);
case IrOpcode::kInt64Div:
......@@ -1079,9 +1083,19 @@ void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
......@@ -1251,6 +1265,8 @@ void InstructionSelector::VisitProjection(Node* node) {
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
case IrOpcode::kInt64AddWithOverflow:
case IrOpcode::kInt64SubWithOverflow:
case IrOpcode::kTryTruncateFloat32ToInt64:
case IrOpcode::kTryTruncateFloat64ToInt64:
case IrOpcode::kTryTruncateFloat32ToUint64:
......
......@@ -128,7 +128,10 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
0, 2) \
V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Int64SubWithOverflow, Operator::kNoProperties, 2, 0, 2) \
V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
......
......@@ -192,7 +192,9 @@ class MachineOperatorBuilder final : public ZoneObject {
bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
const Operator* Int64Add();
const Operator* Int64AddWithOverflow();
const Operator* Int64Sub();
const Operator* Int64SubWithOverflow();
const Operator* Int64Mul();
const Operator* Int64Div();
const Operator* Int64Mod();
......
......@@ -641,12 +641,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64Dadd:
__ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64DaddOvf:
// Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMips64Sub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dsub:
__ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64DsubOvf:
// Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMips64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
......@@ -1388,6 +1394,34 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
} else if (instr->arch_opcode() == kMips64DaddOvf) {
switch (branch->condition) {
case kOverflow:
__ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
break;
case kNotOverflow:
__ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), flabel, tlabel);
break;
default:
UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
break;
}
} else if (instr->arch_opcode() == kMips64DsubOvf) {
switch (branch->condition) {
case kOverflow:
__ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
break;
case kNotOverflow:
__ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), flabel, tlabel);
break;
default:
UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
break;
}
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
......@@ -1462,6 +1496,28 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (cc == eq) // Toggle result for not overflow.
__ xori(result, result, 1);
return;
} else if (instr->arch_opcode() == kMips64DaddOvf ||
instr->arch_opcode() == kMips64DsubOvf) {
Label flabel, tlabel;
switch (instr->arch_opcode()) {
case kMips64DaddOvf:
__ DaddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), &flabel);
break;
case kMips64DsubOvf:
__ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), &flabel);
break;
default:
UNREACHABLE();
break;
}
__ li(result, 1);
__ Branch(&tlabel);
__ bind(&flabel);
__ li(result, 0);
__ bind(&tlabel);
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
switch (cc) {
......
......@@ -14,8 +14,10 @@ namespace compiler {
#define TARGET_ARCH_OPCODE_LIST(V) \
V(Mips64Add) \
V(Mips64Dadd) \
V(Mips64DaddOvf) \
V(Mips64Sub) \
V(Mips64Dsub) \
V(Mips64DsubOvf) \
V(Mips64Mul) \
V(Mips64MulHigh) \
V(Mips64DMulHigh) \
......
......@@ -1608,6 +1608,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64Dsub, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64DaddOvf, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64DsubOvf, cont);
default:
break;
}
......@@ -1718,6 +1724,26 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
}
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64DaddOvf, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kMips64DaddOvf, &cont);
}
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64DsubOvf, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kMips64DsubOvf, &cont);
}
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int64BinopMatcher m(node);
......
......@@ -258,7 +258,9 @@
V(Uint32Mod) \
V(Uint32MulHigh) \
V(Int64Add) \
V(Int64AddWithOverflow) \
V(Int64Sub) \
V(Int64SubWithOverflow) \
V(Int64Mul) \
V(Int64Div) \
V(Int64Mod) \
......
......@@ -280,9 +280,15 @@ class RawMachineAssembler {
Node* Int64Add(Node* a, Node* b) {
return AddNode(machine()->Int64Add(), a, b);
}
Node* Int64AddWithOverflow(Node* a, Node* b) {
return AddNode(machine()->Int64AddWithOverflow(), a, b);
}
Node* Int64Sub(Node* a, Node* b) {
return AddNode(machine()->Int64Sub(), a, b);
}
Node* Int64SubWithOverflow(Node* a, Node* b) {
return AddNode(machine()->Int64SubWithOverflow(), a, b);
}
Node* Int64Mul(Node* a, Node* b) {
return AddNode(machine()->Int64Mul(), a, b);
}
......
......@@ -2081,9 +2081,19 @@ Type* Typer::Visitor::TypeUint32MulHigh(Node* node) {
Type* Typer::Visitor::TypeInt64Add(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeInt64AddWithOverflow(Node* node) {
return Type::Internal();
}
Type* Typer::Visitor::TypeInt64Sub(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeInt64SubWithOverflow(Node* node) {
return Type::Internal();
}
Type* Typer::Visitor::TypeInt64Mul(Node* node) { return Type::Internal(); }
......
......@@ -861,7 +861,9 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kInt64Add:
case IrOpcode::kInt64AddWithOverflow:
case IrOpcode::kInt64Sub:
case IrOpcode::kInt64SubWithOverflow:
case IrOpcode::kInt64Mul:
case IrOpcode::kInt64Div:
case IrOpcode::kInt64Mod:
......
......@@ -655,6 +655,16 @@ void InstructionSelector::VisitInt64Add(Node* node) {
}
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
VisitBinop(this, node, kX64Add, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kX64Add, &cont);
}
void InstructionSelector::VisitInt32Sub(Node* node) {
X64OperandGenerator g(this);
Int32BinopMatcher m(node);
......@@ -685,6 +695,16 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
}
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kX64Sub, &cont);
}
FlagsContinuation cont;
VisitBinop(this, node, kX64Sub, &cont);
}
namespace {
void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
......@@ -1497,6 +1517,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
case IrOpcode::kInt32SubWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(this, node, kX64Sub32, &cont);
case IrOpcode::kInt64AddWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(this, node, kX64Add, &cont);
case IrOpcode::kInt64SubWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(this, node, kX64Sub, &cont);
default:
break;
}
......
......@@ -4730,6 +4730,89 @@ void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
}
static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
Label* overflow_label,
Label* no_overflow_label) {
DCHECK(overflow_label || no_overflow_label);
if (!overflow_label) {
DCHECK(no_overflow_label);
masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
} else {
masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
if (no_overflow_label) masm->Branch(no_overflow_label);
}
}
void MacroAssembler::DaddBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
if (right.is_reg()) {
DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
scratch);
} else {
Register overflow_dst = t9;
DCHECK(!dst.is(scratch));
DCHECK(!dst.is(overflow_dst));
DCHECK(!scratch.is(overflow_dst));
DCHECK(!left.is(overflow_dst));
li(overflow_dst, right); // Load right.
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
Daddu(dst, left, overflow_dst); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
xor_(overflow_dst, dst, overflow_dst);
and_(overflow_dst, overflow_dst, scratch);
} else {
Daddu(dst, left, overflow_dst);
xor_(scratch, dst, overflow_dst);
xor_(overflow_dst, dst, left);
and_(overflow_dst, scratch, overflow_dst);
}
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
Label* overflow_label,
Label* no_overflow_label, Register scratch) {
Register overflow_dst = t9;
DCHECK(!dst.is(scratch));
DCHECK(!dst.is(overflow_dst));
DCHECK(!scratch.is(overflow_dst));
DCHECK(!left.is(overflow_dst));
DCHECK(!right.is(overflow_dst));
DCHECK(!left.is(scratch));
DCHECK(!right.is(scratch));
if (left.is(right) && dst.is(left)) {
mov(overflow_dst, right);
right = overflow_dst;
}
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
daddu(dst, left, right); // Left is overwritten.
xor_(scratch, dst, scratch); // Original left.
xor_(overflow_dst, dst, right);
and_(overflow_dst, overflow_dst, scratch);
} else if (dst.is(right)) {
mov(scratch, right); // Preserve right.
daddu(dst, left, right); // Right is overwritten.
xor_(scratch, dst, scratch); // Original right.
xor_(overflow_dst, dst, left);
and_(overflow_dst, overflow_dst, scratch);
} else {
daddu(dst, left, right);
xor_(overflow_dst, dst, left);
xor_(scratch, dst, right);
and_(overflow_dst, scratch, overflow_dst);
}
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
const Operand& right,
Register overflow_dst,
......@@ -4861,6 +4944,83 @@ void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
}
}
void MacroAssembler::DsubBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
if (right.is_reg()) {
DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
scratch);
} else {
Register overflow_dst = t9;
DCHECK(!dst.is(scratch));
DCHECK(!dst.is(overflow_dst));
DCHECK(!scratch.is(overflow_dst));
DCHECK(!left.is(overflow_dst));
DCHECK(!left.is(scratch));
li(overflow_dst, right); // Load right.
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
Dsubu(dst, left, overflow_dst); // Left is overwritten.
xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
xor_(scratch, dst, scratch); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
} else {
Dsubu(dst, left, overflow_dst);
xor_(scratch, left, overflow_dst);
xor_(overflow_dst, dst, left);
and_(overflow_dst, scratch, overflow_dst);
}
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
Register overflow_dst = t9;
DCHECK(!dst.is(scratch));
DCHECK(!dst.is(overflow_dst));
DCHECK(!scratch.is(overflow_dst));
DCHECK(!overflow_dst.is(left));
DCHECK(!overflow_dst.is(right));
DCHECK(!scratch.is(left));
DCHECK(!scratch.is(right));
// This happens with some crankshaft code. Since Subu works fine if
// left == right, let's not make that restriction here.
if (left.is(right)) {
mov(dst, zero_reg);
if (no_overflow_label) {
Branch(no_overflow_label);
}
}
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
dsubu(dst, left, right); // Left is overwritten.
xor_(overflow_dst, dst, scratch); // scratch is original left.
xor_(scratch, scratch, right); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
} else if (dst.is(right)) {
mov(scratch, right); // Preserve right.
dsubu(dst, left, right); // Right is overwritten.
xor_(overflow_dst, dst, left);
xor_(scratch, left, scratch); // Original right.
and_(overflow_dst, scratch, overflow_dst);
} else {
dsubu(dst, left, right);
xor_(overflow_dst, dst, left);
xor_(scratch, left, right);
and_(overflow_dst, scratch, overflow_dst);
}
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
......
......@@ -1266,6 +1266,24 @@ class MacroAssembler: public Assembler {
const Operand& right, Register overflow_dst,
Register scratch);
inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
Label* overflow_label, Register scratch = at) {
DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
}
inline void DaddBranchNoOvf(Register dst, Register left, const Operand& right,
Label* no_overflow_label, Register scratch = at) {
DaddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
}
void DaddBranchOvf(Register dst, Register left, const Operand& right,
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
void DaddBranchOvf(Register dst, Register left, Register right,
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
void DsubuAndCheckForOverflow(Register dst, Register left, Register right,
Register overflow_dst, Register scratch = at);
......@@ -1273,6 +1291,24 @@ class MacroAssembler: public Assembler {
const Operand& right, Register overflow_dst,
Register scratch);
inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
Label* overflow_label, Register scratch = at) {
DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
}
inline void DsubBranchNoOvf(Register dst, Register left, const Operand& right,
Label* no_overflow_label, Register scratch = at) {
DsubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
}
void DsubBranchOvf(Register dst, Register left, const Operand& right,
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
void DsubBranchOvf(Register dst, Register left, Register right,
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
void BranchOnOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
......
......@@ -369,6 +369,16 @@ class Int32BinopTester : public BinopTester<int32_t, USE_RETURN_REGISTER> {
};
// A helper class for testing code sequences that take two int parameters and
// return an int value.
class Int64BinopTester : public BinopTester<int64_t, USE_RETURN_REGISTER> {
public:
explicit Int64BinopTester(RawMachineAssemblerTester<int32_t>* tester)
: BinopTester<int64_t, USE_RETURN_REGISTER>(tester,
MachineType::Int64()) {}
};
// A helper class for testing code sequences that take two uint parameters and
// return an uint value.
class Uint32BinopTester : public BinopTester<uint32_t, USE_RETURN_REGISTER> {
......
......@@ -416,6 +416,190 @@ TEST(CodeGenInt64Binop) {
}
TEST(RunInt64AddWithOverflowP) {
int64_t actual_val = -1;
RawMachineAssemblerTester<int32_t> m;
Int64BinopTester bt(&m);
Node* add = m.Int64AddWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
bt.AddReturn(ovf);
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
int64_t expected_val;
int expected_ovf = bits::SignedAddOverflow64(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, bt.call(*i, *j));
CHECK_EQ(expected_val, actual_val);
}
}
}
TEST(RunInt64AddWithOverflowImm) {
int64_t actual_val = -1, expected_val = 0;
FOR_INT64_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
Node* add = m.Int64AddWithOverflow(m.Int64Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
FOR_INT64_INPUTS(j) {
int expected_ovf = bits::SignedAddOverflow64(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
}
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
Node* add = m.Int64AddWithOverflow(m.Parameter(0), m.Int64Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
FOR_INT64_INPUTS(j) {
int expected_ovf = bits::SignedAddOverflow64(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
}
FOR_INT64_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* add =
m.Int64AddWithOverflow(m.Int64Constant(*i), m.Int64Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
int expected_ovf = bits::SignedAddOverflow64(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
CHECK_EQ(expected_val, actual_val);
}
}
}
TEST(RunInt64AddWithOverflowInBranchP) {
int constant = 911777;
RawMachineLabel blocka, blockb;
RawMachineAssemblerTester<int32_t> m;
Int64BinopTester bt(&m);
Node* add = m.Int64AddWithOverflow(bt.param0, bt.param1);
Node* ovf = m.Projection(1, add);
m.Branch(ovf, &blocka, &blockb);
m.Bind(&blocka);
bt.AddReturn(m.Int64Constant(constant));
m.Bind(&blockb);
Node* val = m.Projection(0, add);
Node* truncated = m.TruncateInt64ToInt32(val);
bt.AddReturn(truncated);
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
int32_t expected = constant;
int64_t result;
if (!bits::SignedAddOverflow64(*i, *j, &result)) {
expected = static_cast<int32_t>(result);
}
CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
TEST(RunInt64SubWithOverflowP) {
int64_t actual_val = -1;
RawMachineAssemblerTester<int32_t> m;
Int64BinopTester bt(&m);
Node* add = m.Int64SubWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
bt.AddReturn(ovf);
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
int64_t expected_val;
int expected_ovf = bits::SignedSubOverflow64(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, bt.call(*i, *j));
CHECK_EQ(expected_val, actual_val);
}
}
}
TEST(RunInt64SubWithOverflowImm) {
int64_t actual_val = -1, expected_val = 0;
FOR_INT64_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
Node* add = m.Int64SubWithOverflow(m.Int64Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
FOR_INT64_INPUTS(j) {
int expected_ovf = bits::SignedSubOverflow64(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
}
{
RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
Node* add = m.Int64SubWithOverflow(m.Parameter(0), m.Int64Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
FOR_INT64_INPUTS(j) {
int expected_ovf = bits::SignedSubOverflow64(*j, *i, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
}
FOR_INT64_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* add =
m.Int64SubWithOverflow(m.Int64Constant(*i), m.Int64Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
m.Return(ovf);
int expected_ovf = bits::SignedSubOverflow64(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
CHECK_EQ(expected_val, actual_val);
}
}
}
TEST(RunInt64SubWithOverflowInBranchP) {
int constant = 911999;
RawMachineLabel blocka, blockb;
RawMachineAssemblerTester<int32_t> m;
Int64BinopTester bt(&m);
Node* sub = m.Int64SubWithOverflow(bt.param0, bt.param1);
Node* ovf = m.Projection(1, sub);
m.Branch(ovf, &blocka, &blockb);
m.Bind(&blocka);
bt.AddReturn(m.Int64Constant(constant));
m.Bind(&blockb);
Node* val = m.Projection(0, sub);
Node* truncated = m.TruncateInt64ToInt32(val);
bt.AddReturn(truncated);
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
int32_t expected = constant;
int64_t result;
if (!bits::SignedSubOverflow64(*i, *j, &result)) {
expected = static_cast<int32_t>(result);
}
CHECK_EQ(expected, static_cast<int32_t>(bt.call(*i, *j)));
}
}
}
// TODO(titzer): add tests that run 64-bit integer operations.
#endif // V8_TARGET_ARCH_64_BIT
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment