Commit 6ccb8704 authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

[turbofan] Add Int32AddWithOverflow machine operator.

TEST=cctest/test-run-machops,cctest/test-instruction-selector-arm
R=titzer@chromium.org

Review URL: https://codereview.chromium.org/436593002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22784 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4491e0e1
......@@ -501,6 +501,12 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
case kUnsignedGreaterThan:
__ b(hi, tlabel);
break;
case kOverflow:
__ b(vs, tlabel);
break;
case kNotOverflow:
__ b(vc, tlabel);
break;
}
if (!fallthru) __ b(flabel); // no fallthru to flabel.
__ bind(&done);
......@@ -513,9 +519,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
ArmOperandConverter i(this, instr);
Label done;
// Materialize a full 32-bit 1 or 0 value.
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
Register reg = i.OutputRegister();
ASSERT_NE(0, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
switch (condition) {
case kUnorderedEqual:
......@@ -578,6 +586,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case kUnsignedGreaterThan:
cc = hi;
break;
case kOverflow:
cc = vs;
break;
case kNotOverflow:
cc = vc;
break;
}
__ bind(&check);
__ mov(reg, Operand(0));
......
......@@ -247,6 +247,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
InstructionOperand* inputs[3];
size_t input_count = 0;
InstructionOperand* outputs[1] = {g.DefineAsRegister(node)};
const size_t output_count = ARRAY_SIZE(outputs);
if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
&input_count, &inputs[1])) {
......@@ -268,8 +270,54 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
InstructionOperand* outputs[1] = {g.DefineAsRegister(node)};
const size_t output_count = ARRAY_SIZE(outputs);
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
InstructionCode opcode,
InstructionCode reverse_opcode) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[3];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
&input_count, &inputs[1])) {
inputs[0] = g.UseRegister(m.left().node());
input_count++;
} else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
m.left().node(), &input_count,
&inputs[1])) {
inputs[0] = g.UseRegister(m.right().node());
opcode = reverse_opcode;
input_count++;
} else {
opcode |= AddressingModeField::encode(kMode_Operand2_R);
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseRegister(m.right().node());
}
// Define outputs depending on the projections.
Node* projections[2];
node->CollectProjections(ARRAY_SIZE(projections), projections);
if (projections[0]) {
outputs[output_count++] = g.DefineAsRegister(projections[0]);
}
if (projections[1]) {
opcode |= FlagsModeField::encode(kFlags_set);
opcode |= FlagsConditionField::encode(kOverflow);
outputs[output_count++] = g.DefineAsRegister(projections[1]);
}
ASSERT_NE(0, input_count);
ASSERT_NE(0, output_count);
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
......@@ -539,6 +587,11 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kArmAdd, kArmAdd);
}
void InstructionSelector::VisitInt32Sub(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
......
......@@ -129,8 +129,8 @@ class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter {
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
InstructionCode opcode = instr->opcode();
switch (ArchOpcodeField::decode(opcode)) {
case kArchJmp:
__ B(code_->GetLabel(i.InputBlock(0)));
break;
......@@ -153,7 +153,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kArm64Add32:
__ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
if (FlagsModeField::decode(opcode) != kFlags_none) {
__ Adds(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
} else {
__ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
}
break;
case kArm64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
......@@ -507,6 +512,12 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
case kUnsignedGreaterThan:
__ B(hi, tlabel);
break;
case kOverflow:
__ B(vs, tlabel);
break;
case kNotOverflow:
__ B(vc, tlabel);
break;
}
if (!fallthru) __ B(flabel); // no fallthru to flabel.
__ Bind(&done);
......@@ -519,9 +530,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
Arm64OperandConverter i(this, instr);
Label done;
// Materialize a full 64-bit 1 or 0 value.
// Materialize a full 64-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
Register reg = i.OutputRegister();
ASSERT_NE(0, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = nv;
switch (condition) {
case kUnorderedEqual:
......@@ -584,6 +597,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case kUnsignedGreaterThan:
cc = hi;
break;
case kOverflow:
cc = vs;
break;
case kNotOverflow:
cc = vc;
break;
}
__ bind(&check);
__ Cset(reg, cc);
......
......@@ -114,6 +114,39 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
}
static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
Arm64OperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[2];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseRegister(m.right().node());
// Define outputs depending on the projections.
Node* projections[2];
node->CollectProjections(ARRAY_SIZE(projections), projections);
if (projections[0]) {
outputs[output_count++] = g.DefineAsRegister(projections[0]);
}
if (projections[1]) {
opcode |= FlagsModeField::encode(kFlags_set);
opcode |= FlagsConditionField::encode(kOverflow);
outputs[output_count++] = g.DefineAsRegister(projections[1]);
}
ASSERT_NE(0, input_count);
ASSERT_NE(0, output_count);
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
void InstructionSelector::VisitLoad(Node* node) {
MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
Arm64OperandGenerator g(this);
......@@ -299,6 +332,11 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kArm64Add32);
}
void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop(this, node, kArm64Add, kArithimeticImm, true);
}
......
......@@ -72,7 +72,9 @@ class InstructionOperandConverter {
return gen_->schedule()->GetBlockById(block_id);
}
Register OutputRegister() { return ToRegister(instr_->Output()); }
Register OutputRegister(int index = 0) {
return ToRegister(instr_->OutputAt(index));
}
DoubleRegister OutputDoubleRegister() {
return ToDoubleRegister(instr_->Output());
......
......@@ -464,6 +464,12 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
case kOverflow:
__ j(overflow, tlabel);
break;
case kNotOverflow:
__ j(no_overflow, tlabel);
break;
}
if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
__ bind(&done);
......@@ -476,9 +482,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
IA32OperandConverter i(this, instr);
Label done;
// Materialize a full 32-bit 1 or 0 value.
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
Register reg = i.OutputRegister();
ASSERT_NE(0, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
case kUnorderedEqual:
......@@ -541,6 +549,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case kUnsignedGreaterThan:
cc = above;
break;
case kOverflow:
cc = overflow;
break;
case kNotOverflow:
cc = no_overflow;
break;
}
__ bind(&check);
if (reg.is_byte_register()) {
......
......@@ -160,25 +160,64 @@ void InstructionSelector::VisitStore(Node* node) {
// Shared routine for multiple binary operations.
static inline void VisitBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
static void VisitBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Int32BinopMatcher m(node);
// TODO(turbofan): match complex addressing modes.
// TODO(turbofan): if commutative, pick the non-live-in operand as the left as
// this might be the last use and therefore its register can be reused.
if (g.CanBeImmediate(right)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
g.UseImmediate(right));
} else if (g.CanBeImmediate(left) &&
node->op()->HasProperty(Operator::kCommutative)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
g.UseImmediate(left));
if (g.CanBeImmediate(m.right().node())) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(m.left().node()),
g.UseImmediate(m.right().node()));
} else {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.Use(right));
selector->Emit(opcode, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.Use(m.right().node()));
}
}
static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
IA32OperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[2];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
// TODO(turbofan): if commutative, pick the non-live-in operand as the left as
// this might be the last use and therefore its register can be reused.
if (g.CanBeImmediate(m.right().node())) {
inputs[input_count++] = g.Use(m.left().node());
inputs[input_count++] = g.UseImmediate(m.right().node());
} else {
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.Use(m.right().node());
}
// Define outputs depending on the projections.
Node* projections[2];
node->CollectProjections(ARRAY_SIZE(projections), projections);
if (projections[0]) {
outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
}
if (projections[1]) {
opcode |= FlagsModeField::encode(kFlags_set);
opcode |= FlagsConditionField::encode(kOverflow);
// TODO(turbofan): Use byte register here.
outputs[output_count++] =
(projections[0] ? g.DefineAsRegister(projections[1])
: g.DefineSameAsFirst(projections[1]));
}
ASSERT_NE(0, input_count);
ASSERT_NE(0, output_count);
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
......@@ -248,6 +287,11 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kIA32Add);
}
void InstructionSelector::VisitInt32Sub(Node* node) {
IA32OperandGenerator g(this);
Int32BinopMatcher m(node);
......
......@@ -87,7 +87,9 @@ enum FlagsCondition {
kUnorderedLessThan,
kUnorderedGreaterThanOrEqual,
kUnorderedLessThanOrEqual,
kUnorderedGreaterThan
kUnorderedGreaterThan,
kOverflow,
kNotOverflow
};
OStream& operator<<(OStream& os, const FlagsCondition& fc);
......@@ -105,7 +107,7 @@ typedef int32_t InstructionCode;
typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
typedef BitField<AddressingMode, 7, 4> AddressingModeField;
typedef BitField<FlagsMode, 11, 2> FlagsModeField;
typedef BitField<FlagsCondition, 13, 4> FlagsConditionField;
typedef BitField<FlagsCondition, 13, 5> FlagsConditionField;
typedef BitField<int, 13, 19> MiscField;
} // namespace compiler
......
......@@ -256,6 +256,8 @@ class FlagsContinuation V8_FINAL {
switch (condition_) {
case kEqual:
case kNotEqual:
case kOverflow:
case kNotOverflow:
return;
case kSignedLessThan:
condition_ = kSignedGreaterThan;
......
......@@ -252,17 +252,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (buffer->descriptor->ReturnCount() == 1) {
buffer->output_nodes[0] = call;
} else {
// Iterate over all uses of {call} and collect the projections into the
// {result} buffer.
for (UseIter i = call->uses().begin(); i != call->uses().end(); ++i) {
if ((*i)->opcode() == IrOpcode::kProjection) {
int index = OpParameter<int32_t>(*i);
ASSERT_GE(index, 0);
ASSERT_LT(index, buffer->descriptor->ReturnCount());
ASSERT_EQ(NULL, buffer->output_nodes[index]);
buffer->output_nodes[index] = *i;
}
}
call->CollectProjections(buffer->descriptor->ReturnCount(),
buffer->output_nodes);
}
// Filter out the outputs that aren't live because no projection uses them.
......@@ -447,13 +438,10 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kIfFalse:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
case IrOpcode::kProjection:
case IrOpcode::kLazyDeoptimization:
case IrOpcode::kContinuation:
// No code needed for these graph artifacts.
return;
case IrOpcode::kPhi:
return VisitPhi(node);
case IrOpcode::kParameter: {
int index = OpParameter<int>(node);
MachineRepresentation rep = linkage()
......@@ -463,6 +451,10 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(rep, node);
return VisitParameter(node);
}
case IrOpcode::kPhi:
return VisitPhi(node);
case IrOpcode::kProjection:
return VisitProjection(node);
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kExternalConstant:
......@@ -515,6 +507,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
return VisitInt32Add(node);
case IrOpcode::kInt32AddWithOverflow:
return VisitInt32AddWithOverflow(node);
case IrOpcode::kInt32Sub:
return VisitInt32Sub(node);
case IrOpcode::kInt32Mul:
......@@ -736,6 +730,13 @@ void InstructionSelector::VisitWord64Compare(Node* node,
#endif // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_TARGET
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
OpParameter<int>(node))));
}
void InstructionSelector::VisitPhi(Node* node) {
// TODO(bmeurer): Emit a PhiInstruction here.
for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
......@@ -744,10 +745,10 @@ void InstructionSelector::VisitPhi(Node* node) {
}
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
OpParameter<int>(node))));
void InstructionSelector::VisitProjection(Node* node) {
for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
MarkAsUsed(*i);
}
}
......
......@@ -129,8 +129,9 @@ class InstructionSelector V8_FINAL {
void VisitWord64Compare(Node* node, FlagsContinuation* cont);
void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
void VisitPhi(Node* node);
void VisitParameter(Node* node);
void VisitPhi(Node* node);
void VisitProjection(Node* node);
void VisitConstant(Node* node);
void VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization);
......
......@@ -239,6 +239,10 @@ OStream& operator<<(OStream& os, const FlagsCondition& fc) {
return os << "unordered less than or equal";
case kUnorderedGreaterThan:
return os << "unordered greater than";
case kOverflow:
return os << "overflow";
case kNotOverflow:
return os << "not overflow";
}
UNREACHABLE();
return os;
......
......@@ -195,6 +195,12 @@ class MachineNodeFactory {
Node* Int32Add(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Add(), a, b);
}
void Int32AddWithOverflow(Node* a, Node* b, Node** val_return,
Node** ovf_return) {
Node* add = NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b);
if (val_return) *val_return = NEW_NODE_1(COMMON()->Projection(0), add);
if (ovf_return) *ovf_return = NEW_NODE_1(COMMON()->Projection(1), add);
}
Node* Int32Sub(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Sub(), a, b);
}
......
......@@ -74,6 +74,10 @@ class MachineOperatorBuilder {
SIMPLE(name, \
Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
1)
#define BINOP_ACO(name) \
SIMPLE(name, \
Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
2)
#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
#define WORD_SIZE(x) return is64() ? Word64##x() : Word32##x()
......@@ -113,6 +117,7 @@ class MachineOperatorBuilder {
Operator* Word64Equal() { BINOP_C(Word64Equal); }
Operator* Int32Add() { BINOP_AC(Int32Add); }
Operator* Int32AddWithOverflow() { BINOP_ACO(Int32AddWithOverflow); }
Operator* Int32Sub() { BINOP(Int32Sub); }
Operator* Int32Mul() { BINOP_AC(Int32Mul); }
Operator* Int32Div() { BINOP(Int32Div); }
......
......@@ -4,10 +4,25 @@
#include "src/compiler/node.h"
#include "src/compiler/generic-node-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
void Node::CollectProjections(int projection_count, Node** projections) {
for (int i = 0; i < projection_count; ++i) projections[i] = NULL;
for (UseIter i = uses().begin(); i != uses().end(); ++i) {
if ((*i)->opcode() != IrOpcode::kProjection) continue;
int32_t index = OpParameter<int32_t>(*i);
ASSERT_GE(index, 0);
ASSERT_LT(index, projection_count);
ASSERT_EQ(NULL, projections[index]);
projections[index] = *i;
}
}
OStream& operator<<(OStream& os, const Operator& op) { return op.PrintTo(os); }
......@@ -23,6 +38,7 @@ OStream& operator<<(OStream& os, const Node& n) {
}
return os;
}
}
}
} // namespace v8::internal::compiler
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -53,6 +53,8 @@ class Node : public GenericNode<NodeData, Node> {
: GenericNode<NodeData, Node>(graph, input_count) {}
void Initialize(Operator* op) { set_op(op); }
void CollectProjections(int projection_count, Node** projections);
};
OStream& operator<<(OStream& os, const Node& n);
......
......@@ -173,6 +173,7 @@
V(Word64Sar) \
V(Word64Equal) \
V(Int32Add) \
V(Int32AddWithOverflow) \
V(Int32Sub) \
V(Int32Mul) \
V(Int32Div) \
......
......@@ -819,11 +819,11 @@ void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
// Handle "output same as input" for second instruction.
for (size_t i = 0; i < second->OutputCount(); i++) {
InstructionOperand* output = second->Output();
InstructionOperand* output = second->OutputAt(i);
if (!output->IsUnallocated()) continue;
UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
if (second_output->HasSameAsInputPolicy()) {
ASSERT(second->OutputCount() == 1); // Only valid for one output.
ASSERT(i == 0); // Only valid for first output.
UnallocatedOperand* cur_input =
UnallocatedOperand::cast(second->InputAt(0));
int output_vreg = second_output->virtual_register();
......
......@@ -667,6 +667,12 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
case kOverflow:
__ j(overflow, tlabel);
break;
case kNotOverflow:
__ j(no_overflow, tlabel);
break;
}
if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
__ bind(&done);
......@@ -679,9 +685,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
X64OperandConverter i(this, instr);
Label done;
// Materialize a full 32-bit 1 or 0 value.
// Materialize a full 64-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
Register reg = i.OutputRegister();
ASSERT_NE(0, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
case kUnorderedEqual:
......@@ -744,6 +752,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case kUnsignedGreaterThan:
cc = above;
break;
case kOverflow:
cc = overflow;
break;
case kNotOverflow:
cc = no_overflow;
break;
}
__ bind(&check);
__ setcc(cc, reg);
......
......@@ -196,6 +196,49 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
}
static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
X64OperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[2];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
// TODO(turbofan): if commutative, pick the non-live-in operand as the left as
// this might be the last use and therefore its register can be reused.
if (g.CanBeImmediate(m.right().node())) {
inputs[input_count++] = g.Use(m.left().node());
inputs[input_count++] = g.UseImmediate(m.right().node());
} else {
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.Use(m.right().node());
}
// Define outputs depending on the projections.
Node* projections[2];
node->CollectProjections(ARRAY_SIZE(projections), projections);
if (projections[0]) {
outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
}
if (projections[1]) {
opcode |= FlagsModeField::encode(kFlags_set);
opcode |= FlagsConditionField::encode(kOverflow);
outputs[output_count++] =
(projections[0] ? g.DefineAsRegister(projections[1])
: g.DefineSameAsFirst(projections[1]));
}
ASSERT_NE(0, input_count);
ASSERT_NE(0, output_count);
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kX64And32, true);
}
......@@ -327,6 +370,11 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kX64Add32);
}
void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop(this, node, kX64Add, true);
}
......
......@@ -225,6 +225,363 @@ TEST(InstructionSelectorDPIAndShiftImm) {
}
TEST(InstructionSelectorInt32AddWithOverflowP) {
{
InstructionSelectorTester m;
Node* ovf;
m.Int32AddWithOverflow(m.Parameter(0), m.Parameter(1), NULL, &ovf);
m.Return(ovf);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val;
m.Int32AddWithOverflow(m.Parameter(0), m.Parameter(1), &val, NULL);
m.Return(val);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val, *ovf;
m.Int32AddWithOverflow(m.Parameter(0), m.Parameter(1), &val, &ovf);
m.Return(m.Word32Equal(val, ovf));
m.SelectInstructions();
CHECK_LE(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(2, m.code[0]->OutputCount());
}
}
TEST(InstructionSelectorInt32AddWithOverflowImm) {
Immediates immediates;
for (Immediates::const_iterator i = immediates.begin(); i != immediates.end();
++i) {
int32_t imm = *i;
{
InstructionSelectorTester m;
Node* ovf;
m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(imm), NULL, &ovf);
m.Return(ovf);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* ovf;
m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0), NULL, &ovf);
m.Return(ovf);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val;
m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(imm), &val, NULL);
m.Return(val);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val;
m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0), &val, NULL);
m.Return(val);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val, *ovf;
m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(imm), &val, &ovf);
m.Return(m.Word32Equal(val, ovf));
m.SelectInstructions();
CHECK_LE(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(2, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val, *ovf;
m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0), &val, &ovf);
m.Return(m.Word32Equal(val, ovf));
m.SelectInstructions();
CHECK_LE(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(2, m.code[0]->OutputCount());
}
}
}
TEST(InstructionSelectorInt32AddWithOverflowAndShiftP) {
Shifts shifts;
for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
Shift shift = *i;
{
InstructionSelectorTester m;
Node* ovf;
m.Int32AddWithOverflow(
m.Parameter(0), m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
NULL, &ovf);
m.Return(ovf);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* ovf;
m.Int32AddWithOverflow(
m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)), m.Parameter(2),
NULL, &ovf);
m.Return(ovf);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val;
m.Int32AddWithOverflow(
m.Parameter(0), m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
&val, NULL);
m.Return(val);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val;
m.Int32AddWithOverflow(
m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)), m.Parameter(2),
&val, NULL);
m.Return(val);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val, *ovf;
m.Int32AddWithOverflow(
m.Parameter(0), m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
&val, &ovf);
m.Return(m.Word32Equal(val, ovf));
m.SelectInstructions();
CHECK_LE(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(2, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val, *ovf;
m.Int32AddWithOverflow(
m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)), m.Parameter(2),
&val, &ovf);
m.Return(m.Word32Equal(val, ovf));
m.SelectInstructions();
CHECK_LE(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(2, m.code[0]->OutputCount());
}
}
}
TEST(InstructionSelectorInt32AddWithOverflowAndShiftImm) {
Shifts shifts;
for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
Shift shift = *i;
for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
{
InstructionSelectorTester m;
Node* ovf;
m.Int32AddWithOverflow(
m.Parameter(0),
m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)), NULL,
&ovf);
m.Return(ovf);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* ovf;
m.Int32AddWithOverflow(
m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)),
m.Parameter(1), NULL, &ovf);
m.Return(ovf);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val;
m.Int32AddWithOverflow(
m.Parameter(0),
m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)), &val,
NULL);
m.Return(val);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val;
m.Int32AddWithOverflow(
m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)),
m.Parameter(1), &val, NULL);
m.Return(val);
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val, *ovf;
m.Int32AddWithOverflow(
m.Parameter(0),
m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)), &val,
&ovf);
m.Return(m.Word32Equal(val, ovf));
m.SelectInstructions();
CHECK_LE(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(2, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
Node* val, *ovf;
m.Int32AddWithOverflow(
m.NewNode(shift.op, m.Parameter(0), m.Int32Constant(imm)),
m.Parameter(1), &val, &ovf);
m.Return(m.Word32Equal(val, ovf));
m.SelectInstructions();
CHECK_LE(1, m.code.size());
CHECK_EQ(kArmAdd, m.code[0]->arch_opcode());
CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(2, m.code[0]->OutputCount());
}
}
}
}
TEST(InstructionSelectorWord32AndAndWord32XorWithMinus1P) {
{
InstructionSelectorTester m;
......
......@@ -3882,4 +3882,94 @@ TEST(RunSpillLotsOfThingsWithCall) {
#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
#endif
static bool sadd_overflow(int32_t x, int32_t y, int32_t* val) {
int32_t v =
static_cast<int32_t>(static_cast<uint32_t>(x) + static_cast<uint32_t>(y));
*val = v;
return (((v ^ x) & (v ^ y)) >> 31) & 1;
}
TEST(RunInt32AddWithOverflowP) {
int32_t actual_val = -1;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
Node* val, *ovf;
m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
m.StoreToPointer(&actual_val, kMachineWord32, val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected_val;
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, bt.call(*i, *j));
CHECK_EQ(expected_val, actual_val);
}
}
}
TEST(RunInt32AddWithOverflowImm) {
int32_t actual_val = -1, expected_val = 0;
FOR_INT32_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
Node* val, *ovf;
m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0), &val, &ovf);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
}
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
Node* val, *ovf;
m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i), &val, &ovf);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call(*j));
CHECK_EQ(expected_val, actual_val);
}
}
FOR_INT32_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* val, *ovf;
m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j), &val,
&ovf);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
CHECK_EQ(expected_val, actual_val);
}
}
}
TEST(RunInt32AddWithOverflowInBranchP) {
MLabel blocka, blockb;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
Node* val, *ovf;
m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
m.Branch(ovf, &blocka, &blockb);
m.Bind(&blocka);
bt.AddReturn(m.Word32Not(val));
m.Bind(&blockb);
bt.AddReturn(val);
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
int32_t expected;
if (sadd_overflow(*i, *j, &expected)) expected = ~expected;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
}
#endif // V8_TURBOFAN_TARGET
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment