Commit 35f09768 authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

[turbofan] Support for combining branches with <Operation>WithOverflow.

Also unify the handling of binops in the InstructionSelector
backends.

TEST=cctest/test-run-machops,cctest/test-instruction-selector-arm
R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/415403005

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22800 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 7682126a
......@@ -249,46 +249,12 @@ static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
}
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, InstructionCode reverse_opcode) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[3];
size_t input_count = 0;
InstructionOperand* outputs[1] = {g.DefineAsRegister(node)};
const size_t output_count = ARRAY_SIZE(outputs);
if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
&input_count, &inputs[1])) {
inputs[0] = g.UseRegister(m.left().node());
input_count++;
} else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
m.left().node(), &input_count,
&inputs[1])) {
inputs[0] = g.UseRegister(m.right().node());
opcode = reverse_opcode;
input_count++;
} else {
opcode |= AddressingModeField::encode(kMode_Operand2_R);
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseRegister(m.right().node());
}
ASSERT_NE(0, input_count);
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
InstructionCode opcode,
InstructionCode reverse_opcode) {
InstructionCode opcode, InstructionCode reverse_opcode,
FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[3];
InstructionOperand* inputs[5];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
......@@ -309,16 +275,14 @@ static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegister(m.right().node());
}
// Define outputs depending on the projections.
Node* projections[2];
node->CollectProjections(ARRAY_SIZE(projections), projections);
if (projections[0]) {
outputs[output_count++] = g.DefineAsRegister(projections[0]);
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
if (projections[1]) {
opcode |= FlagsModeField::encode(kFlags_set);
opcode |= FlagsConditionField::encode(kOverflow);
outputs[output_count++] = g.DefineAsRegister(projections[1]);
outputs[output_count++] = g.DefineAsRegister(node);
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
ASSERT_NE(0, input_count);
......@@ -327,7 +291,16 @@ static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
selector->Emit(opcode, output_count, outputs, input_count, inputs);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
if (cont->IsBranch()) instr->MarkAsControl();
}
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, InstructionCode reverse_opcode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, reverse_opcode, &cont);
}
......@@ -597,11 +570,6 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kArmAdd, kArmAdd);
}
void InstructionSelector::VisitInt32Sub(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
......@@ -616,11 +584,6 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kArmSub, kArmRsb);
}
void InstructionSelector::VisitInt32Mul(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
......@@ -867,10 +830,22 @@ void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kArmAdd, kArmAdd, cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kArmSub, kArmRsb, cont);
}
// Shared routine for multiple compare operations.
static void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont,
bool commutative, bool requires_output) {
bool commutative) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[5];
......@@ -894,9 +869,6 @@ static void VisitWordCompare(InstructionSelector* selector, Node* node,
}
if (cont->IsBranch()) {
if (requires_output) {
outputs[output_count++] = g.DefineAsRegister(node);
}
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
} else {
......@@ -917,15 +889,15 @@ static void VisitWordCompare(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
switch (node->opcode()) {
case IrOpcode::kInt32Add:
return VisitWordCompare(this, node, kArmCmn, cont, true, false);
return VisitWordCompare(this, node, kArmCmn, cont, true);
case IrOpcode::kInt32Sub:
return VisitWordCompare(this, node, kArmCmp, cont, false, false);
return VisitWordCompare(this, node, kArmCmp, cont, false);
case IrOpcode::kWord32And:
return VisitWordCompare(this, node, kArmTst, cont, true, false);
return VisitWordCompare(this, node, kArmTst, cont, true);
case IrOpcode::kWord32Or:
return VisitWordCompare(this, node, kArmOrr, cont, true, true);
return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
case IrOpcode::kWord32Xor:
return VisitWordCompare(this, node, kArmTeq, cont, true, false);
return VisitWordCompare(this, node, kArmTeq, cont, true);
default:
break;
}
......@@ -946,7 +918,7 @@ void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
void InstructionSelector::VisitWord32Compare(Node* node,
FlagsContinuation* cont) {
VisitWordCompare(this, node, kArmCmp, cont, false, false);
VisitWordCompare(this, node, kArmCmp, cont, false);
}
......
......@@ -110,34 +110,26 @@ static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode, ImmediateMode operand_mode,
bool commutative) {
VisitRRO(selector, opcode, node, operand_mode);
}
static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[2];
InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseRegister(m.right().node());
inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
// Define outputs depending on the projections.
Node* projections[2];
node->CollectProjections(ARRAY_SIZE(projections), projections);
if (projections[0]) {
outputs[output_count++] = g.DefineAsRegister(projections[0]);
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
if (projections[1]) {
opcode |= FlagsModeField::encode(kFlags_set);
opcode |= FlagsConditionField::encode(kOverflow);
outputs[output_count++] = g.DefineAsRegister(projections[1]);
outputs[output_count++] = g.DefineAsRegister(node);
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
ASSERT_NE(0, input_count);
......@@ -145,7 +137,17 @@ static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
selector->Emit(opcode, output_count, outputs, input_count, inputs);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
if (cont->IsBranch()) instr->MarkAsControl();
}
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode, ImmediateMode operand_mode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, operand_mode);
}
......@@ -256,22 +258,22 @@ void InstructionSelector::VisitStore(Node* node) {
void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kArm64And32, kLogical32Imm, true);
VisitBinop(this, node, kArm64And32, kLogical32Imm);
}
void InstructionSelector::VisitWord64And(Node* node) {
VisitBinop(this, node, kArm64And, kLogical64Imm, true);
VisitBinop(this, node, kArm64And, kLogical64Imm);
}
void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kArm64Or32, kLogical32Imm, true);
VisitBinop(this, node, kArm64Or32, kLogical32Imm);
}
void InstructionSelector::VisitWord64Or(Node* node) {
VisitBinop(this, node, kArm64Or, kLogical64Imm, true);
VisitBinop(this, node, kArm64Or, kLogical64Imm);
}
......@@ -284,7 +286,7 @@ static void VisitXor(InstructionSelector* selector, Node* node,
selector->Emit(not_opcode, g.DefineAsRegister(node),
g.UseRegister(m.left().node()));
} else {
VisitBinop(selector, node, xor_opcode, kLogical32Imm, true);
VisitBinop(selector, node, xor_opcode, kLogical32Imm);
}
}
......@@ -330,17 +332,12 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kArm64Add32, kArithimeticImm, true);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kArm64Add32);
VisitBinop(this, node, kArm64Add32, kArithimeticImm);
}
void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop(this, node, kArm64Add, kArithimeticImm, true);
VisitBinop(this, node, kArm64Add, kArithimeticImm);
}
......@@ -353,7 +350,7 @@ static void VisitSub(InstructionSelector* selector, Node* node,
selector->Emit(neg_opcode, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
} else {
VisitBinop(selector, node, sub_opcode, kArithimeticImm, false);
VisitBinop(selector, node, sub_opcode, kArithimeticImm);
}
}
......@@ -363,11 +360,6 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kArm64Sub32);
}
void InstructionSelector::VisitInt64Sub(Node* node) {
VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
}
......@@ -489,6 +481,18 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kArm64Add32, kArithimeticImm, cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kArm64Sub32, kArithimeticImm, cont);
}
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand* left, InstructionOperand* right,
......
......@@ -161,27 +161,10 @@ void InstructionSelector::VisitStore(Node* node) {
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
InstructionCode opcode, FlagsContinuation* cont) {
IA32OperandGenerator g(selector);
Int32BinopMatcher m(node);
// TODO(turbofan): match complex addressing modes.
// TODO(turbofan): if commutative, pick the non-live-in operand as the left as
// this might be the last use and therefore its register can be reused.
if (g.CanBeImmediate(m.right().node())) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(m.left().node()),
g.UseImmediate(m.right().node()));
} else {
selector->Emit(opcode, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.Use(m.right().node()));
}
}
static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
IA32OperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[2];
InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
......@@ -197,19 +180,15 @@ static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Use(m.right().node());
}
// Define outputs depending on the projections.
Node* projections[2];
node->CollectProjections(ARRAY_SIZE(projections), projections);
if (projections[0]) {
outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
if (projections[1]) {
opcode |= FlagsModeField::encode(kFlags_set);
opcode |= FlagsConditionField::encode(kOverflow);
outputs[output_count++] = g.DefineSameAsFirst(node);
if (cont->IsSet()) {
// TODO(turbofan): Use byte register here.
outputs[output_count++] =
(projections[0] ? g.DefineAsRegister(projections[1])
: g.DefineSameAsFirst(projections[1]));
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
ASSERT_NE(0, input_count);
......@@ -217,7 +196,17 @@ static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
selector->Emit(opcode, output_count, outputs, input_count, inputs);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
if (cont->IsBranch()) instr->MarkAsControl();
}
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, &cont);
}
......@@ -287,11 +276,6 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kIA32Add);
}
void InstructionSelector::VisitInt32Sub(Node* node) {
IA32OperandGenerator g(this);
Int32BinopMatcher m(node);
......@@ -303,11 +287,6 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kIA32Sub);
}
void InstructionSelector::VisitInt32Mul(Node* node) {
IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
......@@ -438,6 +417,18 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kIA32Add, cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kIA32Sub, cont);
}
// Shared routine for multiple compare operations.
static inline void VisitCompare(InstructionSelector* selector,
InstructionCode opcode,
......
......@@ -48,6 +48,7 @@ class OperandGenerator {
}
InstructionOperand* DefineAsConstant(Node* node) {
selector()->MarkAsDefined(node);
sequence()->AddConstant(node->id(), ToConstant(node));
return ConstantOperand::Create(node->id(), zone());
}
......@@ -179,12 +180,16 @@ class OperandGenerator {
ASSERT_NOT_NULL(node);
ASSERT_NOT_NULL(operand);
operand->set_virtual_register(node->id());
selector()->MarkAsDefined(node);
return operand;
}
UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
selector_->MarkAsUsed(node);
return Define(node, operand);
ASSERT_NOT_NULL(node);
ASSERT_NOT_NULL(operand);
operand->set_virtual_register(node->id());
selector()->MarkAsUsed(node);
return operand;
}
UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location) {
......@@ -215,6 +220,8 @@ class OperandGenerator {
// instruction and the branch or set it should be combined with.
class FlagsContinuation V8_FINAL {
public:
FlagsContinuation() : mode_(kFlags_none) {}
// Creates a new flags continuation from the given condition and true/false
// blocks.
FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
......@@ -236,7 +243,10 @@ class FlagsContinuation V8_FINAL {
bool IsNone() const { return mode_ == kFlags_none; }
bool IsBranch() const { return mode_ == kFlags_branch; }
bool IsSet() const { return mode_ == kFlags_set; }
FlagsCondition condition() const { return condition_; }
FlagsCondition condition() const {
ASSERT(!IsNone());
return condition_;
}
Node* result() const {
ASSERT(IsSet());
return result_;
......@@ -250,9 +260,13 @@ class FlagsContinuation V8_FINAL {
return false_block_;
}
void Negate() { condition_ = static_cast<FlagsCondition>(condition_ ^ 1); }
void Negate() {
ASSERT(!IsNone());
condition_ = static_cast<FlagsCondition>(condition_ ^ 1);
}
void Commute() {
ASSERT(!IsNone());
switch (condition_) {
case kEqual:
case kNotEqual:
......@@ -312,8 +326,11 @@ class FlagsContinuation V8_FINAL {
// Encodes this flags continuation into the given opcode.
InstructionCode Encode(InstructionCode opcode) {
return opcode | FlagsModeField::encode(mode_) |
FlagsConditionField::encode(condition_);
opcode |= FlagsModeField::encode(mode_);
if (mode_ != kFlags_none) {
opcode |= FlagsConditionField::encode(condition_);
}
return opcode;
}
private:
......
......@@ -20,6 +20,7 @@ InstructionSelector::InstructionSelector(InstructionSequence* sequence,
source_positions_(source_positions),
current_block_(NULL),
instructions_(InstructionDeque::allocator_type(zone())),
defined_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())),
used_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())) {}
......@@ -149,6 +150,24 @@ bool InstructionSelector::CanCover(Node* user, Node* node) const {
}
bool InstructionSelector::IsDefined(Node* node) const {
ASSERT_NOT_NULL(node);
NodeId id = node->id();
ASSERT(id >= 0);
ASSERT(id < static_cast<NodeId>(defined_.size()));
return defined_[id];
}
void InstructionSelector::MarkAsDefined(Node* node) {
ASSERT_NOT_NULL(node);
NodeId id = node->id();
ASSERT(id >= 0);
ASSERT(id < static_cast<NodeId>(defined_.size()));
defined_[id] = true;
}
bool InstructionSelector::IsUsed(Node* node) const {
if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
NodeId id = node->id();
......@@ -347,7 +366,8 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
++i) {
Node* node = *i;
if (!IsUsed(node)) continue;
// Skip nodes that are unused or already defined.
if (!IsUsed(node) || IsDefined(node)) continue;
// Generate code for this node "top down", but schedule the code "bottom
// up".
size_t current_node_end = instructions_.size();
......@@ -630,6 +650,26 @@ void InstructionSelector::VisitWord64Equal(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = node->FindProjection(1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitInt32AddWithOverflow(node, &cont);
}
FlagsContinuation cont;
VisitInt32AddWithOverflow(node, &cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = node->FindProjection(1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitInt32SubWithOverflow(node, &cont);
}
FlagsContinuation cont;
VisitInt32SubWithOverflow(node, &cont);
}
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
VisitWord64Compare(node, &cont);
......@@ -748,8 +788,20 @@ void InstructionSelector::VisitPhi(Node* node) {
void InstructionSelector::VisitProjection(Node* node) {
for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
MarkAsUsed(*i);
OperandGenerator g(this);
Node* value = node->InputAt(0);
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
if (OpParameter<int32_t>(node) == 0) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
ASSERT_EQ(1, OpParameter<int32_t>(node));
MarkAsUsed(value);
}
break;
default:
break;
}
}
......@@ -849,6 +901,31 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
case IrOpcode::kFloat64LessThanOrEqual:
cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
return VisitFloat64Compare(value, &cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
if (OpParameter<int32_t>(value) == 1) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
// <Operation> is either NULL, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* node = value->InputAt(0);
Node* result = node->FindProjection(0);
if (result == NULL || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitInt32AddWithOverflow(node, &cont);
case IrOpcode::kInt32SubWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitInt32SubWithOverflow(node, &cont);
default:
break;
}
}
}
break;
default:
break;
}
......@@ -885,6 +962,7 @@ void InstructionSelector::VisitDeoptimization(Node* deopt) {
Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), NULL);
}
#if !V8_TURBOFAN_TARGET
#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
......@@ -893,6 +971,18 @@ MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
#undef DECLARE_UNIMPLEMENTED_SELECTOR
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
FlagsContinuation* cont) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
UNIMPLEMENTED();
}
......@@ -913,7 +1003,7 @@ void InstructionSelector::VisitFloat64Compare(Node* node,
void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization) {}
#endif
#endif // !V8_TURBOFAN_TARGET
} // namespace compiler
} // namespace internal
......
......@@ -71,6 +71,13 @@ class InstructionSelector V8_FINAL {
// edge and the two are in the same basic block.
bool CanCover(Node* user, Node* node) const;
// Checks if {node} was already defined, and therefore code was already
// generated for it.
bool IsDefined(Node* node) const;
// Inform the instruction selection that {node} was just defined.
void MarkAsDefined(Node* node);
// Checks if {node} has any uses, and therefore code has to be generated for
// it.
bool IsUsed(Node* node) const;
......@@ -123,6 +130,9 @@ class InstructionSelector V8_FINAL {
MACHINE_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont);
void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont);
void VisitWord32Test(Node* node, FlagsContinuation* cont);
void VisitWord64Test(Node* node, FlagsContinuation* cont);
void VisitWord32Compare(Node* node, FlagsContinuation* cont);
......@@ -160,6 +170,7 @@ class InstructionSelector V8_FINAL {
SourcePositionTable* source_positions_;
BasicBlock* current_block_;
Instructions instructions_;
BoolVector defined_;
BoolVector used_;
};
......
......@@ -199,20 +199,14 @@ class MachineNodeFactory {
Node* Int32Add(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Add(), a, b);
}
void Int32AddWithOverflow(Node* a, Node* b, Node** val_return,
Node** ovf_return) {
Node* add = NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b);
if (val_return) *val_return = Projection(0, add);
if (ovf_return) *ovf_return = Projection(1, add);
Node* Int32AddWithOverflow(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b);
}
Node* Int32Sub(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Sub(), a, b);
}
void Int32SubWithOverflow(Node* a, Node* b, Node** val_return,
Node** ovf_return) {
Node* add = NEW_NODE_2(MACHINE()->Int32SubWithOverflow(), a, b);
if (val_return) *val_return = Projection(0, add);
if (ovf_return) *ovf_return = Projection(1, add);
Node* Int32SubWithOverflow(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32SubWithOverflow(), a, b);
}
Node* Int32Mul(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Mul(), a, b);
......
......@@ -23,6 +23,17 @@ void Node::CollectProjections(int projection_count, Node** projections) {
}
Node* Node::FindProjection(int32_t projection_index) {
for (UseIter i = uses().begin(); i != uses().end(); ++i) {
if ((*i)->opcode() == IrOpcode::kProjection &&
OpParameter<int32_t>(*i) == projection_index) {
return *i;
}
}
return NULL;
}
OStream& operator<<(OStream& os, const Operator& op) { return op.PrintTo(os); }
......
......@@ -55,6 +55,7 @@ class Node : public GenericNode<NodeData, Node> {
void Initialize(Operator* op) { set_op(op); }
void CollectProjections(int projection_count, Node** projections);
Node* FindProjection(int32_t projection_index);
};
OStream& operator<<(OStream& os, const Node& n);
......
......@@ -176,31 +176,10 @@ void InstructionSelector::VisitStore(Node* node) {
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode, bool commutative) {
X64OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
// TODO(turbofan): match complex addressing modes.
// TODO(turbofan): if commutative, pick the non-live-in operand as the left as
// this might be the last use and therefore its register can be reused.
if (g.CanBeImmediate(right)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
g.UseImmediate(right));
} else if (commutative && g.CanBeImmediate(left)) {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
g.UseImmediate(left));
} else {
selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
g.Use(right));
}
}
static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
InstructionCode opcode, FlagsContinuation* cont) {
X64OperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[2];
InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
......@@ -216,18 +195,14 @@ static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Use(m.right().node());
}
// Define outputs depending on the projections.
Node* projections[2];
node->CollectProjections(ARRAY_SIZE(projections), projections);
if (projections[0]) {
outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
if (projections[1]) {
opcode |= FlagsModeField::encode(kFlags_set);
opcode |= FlagsConditionField::encode(kOverflow);
outputs[output_count++] =
(projections[0] ? g.DefineAsRegister(projections[1])
: g.DefineSameAsFirst(projections[1]));
outputs[output_count++] = g.DefineSameAsFirst(node);
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
ASSERT_NE(0, input_count);
......@@ -235,27 +210,37 @@ static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
selector->Emit(opcode, output_count, outputs, input_count, inputs);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
if (cont->IsBranch()) instr->MarkAsControl();
}
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, &cont);
}
void InstructionSelector::VisitWord32And(Node* node) {
VisitBinop(this, node, kX64And32, true);
VisitBinop(this, node, kX64And32);
}
void InstructionSelector::VisitWord64And(Node* node) {
VisitBinop(this, node, kX64And, true);
VisitBinop(this, node, kX64And);
}
void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kX64Or32, true);
VisitBinop(this, node, kX64Or32);
}
void InstructionSelector::VisitWord64Or(Node* node) {
VisitBinop(this, node, kX64Or, true);
VisitBinop(this, node, kX64Or);
}
......@@ -268,7 +253,7 @@ static void VisitXor(InstructionSelector* selector, Node* node,
selector->Emit(not_opcode, g.DefineSameAsFirst(node),
g.Use(m.left().node()));
} else {
VisitBinop(selector, node, xor_opcode, true);
VisitBinop(selector, node, xor_opcode);
}
}
......@@ -366,17 +351,12 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kX64Add32, true);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kX64Add32);
VisitBinop(this, node, kX64Add32);
}
void InstructionSelector::VisitInt64Add(Node* node) {
VisitBinop(this, node, kX64Add, true);
VisitBinop(this, node, kX64Add);
}
......@@ -389,7 +369,7 @@ static void VisitSub(InstructionSelector* selector, Node* node,
selector->Emit(neg_opcode, g.DefineSameAsFirst(node),
g.Use(m.right().node()));
} else {
VisitBinop(selector, node, sub_opcode, false);
VisitBinop(selector, node, sub_opcode);
}
}
......@@ -399,11 +379,6 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinopWithOverflow(this, node, kX64Sub32);
}
void InstructionSelector::VisitInt64Sub(Node* node) {
VisitSub<int64_t>(this, node, kX64Sub, kX64Neg);
}
......@@ -584,6 +559,18 @@ void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kX64Add32, cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
FlagsContinuation* cont) {
VisitBinop(this, node, kX64Sub32, cont);
}
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand* left, InstructionOperand* right,
......
......@@ -260,7 +260,7 @@ TEST(InstructionSelectorODPIP) {
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -272,7 +272,7 @@ TEST(InstructionSelectorODPIP) {
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -311,7 +311,7 @@ TEST(InstructionSelectorODPIImm) {
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -325,7 +325,7 @@ TEST(InstructionSelectorODPIImm) {
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -338,7 +338,7 @@ TEST(InstructionSelectorODPIImm) {
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -351,7 +351,7 @@ TEST(InstructionSelectorODPIImm) {
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -405,7 +405,7 @@ TEST(InstructionSelectorODPIAndShiftP) {
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -420,7 +420,7 @@ TEST(InstructionSelectorODPIAndShiftP) {
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -433,7 +433,7 @@ TEST(InstructionSelectorODPIAndShiftP) {
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -447,7 +447,7 @@ TEST(InstructionSelectorODPIAndShiftP) {
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -505,7 +505,7 @@ TEST(InstructionSelectorODPIAndShiftImm) {
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -521,7 +521,7 @@ TEST(InstructionSelectorODPIAndShiftImm) {
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -535,7 +535,7 @@ TEST(InstructionSelectorODPIAndShiftImm) {
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -550,7 +550,7 @@ TEST(InstructionSelectorODPIAndShiftImm) {
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
CHECK_EQ(1, m.code[0]->OutputCount());
CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
......@@ -1753,3 +1753,148 @@ TEST(InstructionSelectorBranchWithDPIP) {
}
}
}
TEST(InstructionSelectorBranchWithODPIP) {
ODPIs odpis;
for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
ODPI odpi = *i;
{
InstructionSelectorTester m;
MLabel blocka, blockb;
Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
m.Branch(m.Projection(1, node), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(0));
m.Bind(&blockb);
m.Return(m.Projection(0, node));
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
}
{
InstructionSelectorTester m;
MLabel blocka, blockb;
Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
m.Branch(m.Word32Equal(m.Projection(1, node), m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(0));
m.Bind(&blockb);
m.Return(m.Projection(0, node));
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
}
{
InstructionSelectorTester m;
MLabel blocka, blockb;
Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
m.Branch(m.Word32Equal(m.Int32Constant(0), m.Projection(1, node)),
&blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(0));
m.Bind(&blockb);
m.Return(m.Projection(0, node));
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
}
}
}
TEST(InstructionSelectorBranchWithODPIImm) {
ODPIs odpis;
Immediates immediates;
for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
ODPI odpi = *i;
for (Immediates::const_iterator j = immediates.begin();
j != immediates.end(); ++j) {
int32_t imm = *j;
{
InstructionSelectorTester m;
MLabel blocka, blockb;
Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Int32Constant(imm));
m.Branch(m.Projection(1, node), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(0));
m.Bind(&blockb);
m.Return(m.Projection(0, node));
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_LE(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
}
{
InstructionSelectorTester m;
MLabel blocka, blockb;
Node* node = m.NewNode(odpi.op, m.Int32Constant(imm), m.Parameter(0));
m.Branch(m.Projection(1, node), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(0));
m.Bind(&blockb);
m.Return(m.Projection(0, node));
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_LE(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
}
{
InstructionSelectorTester m;
MLabel blocka, blockb;
Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Int32Constant(imm));
m.Branch(m.Word32Equal(m.Projection(1, node), m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(0));
m.Bind(&blockb);
m.Return(m.Projection(0, node));
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
CHECK_LE(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
}
{
InstructionSelectorTester m;
MLabel blocka, blockb;
Node* node = m.NewNode(odpi.op, m.Int32Constant(imm), m.Parameter(0));
m.Branch(m.Word32Equal(m.Projection(1, node), m.Int32Constant(0)),
&blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(0));
m.Bind(&blockb);
m.Return(m.Projection(0, node));
m.SelectInstructions();
CHECK_EQ(1, m.code.size());
CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
CHECK_LE(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
}
}
}
}
......@@ -3903,8 +3903,9 @@ TEST(RunInt32AddWithOverflowP) {
int32_t actual_val = -1;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
Node* val, *ovf;
m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
......@@ -3923,8 +3924,9 @@ TEST(RunInt32AddWithOverflowImm) {
FOR_INT32_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
Node* val, *ovf;
m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0), &val, &ovf);
Node* add = m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
......@@ -3935,8 +3937,9 @@ TEST(RunInt32AddWithOverflowImm) {
}
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
Node* val, *ovf;
m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i), &val, &ovf);
Node* add = m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
......@@ -3947,9 +3950,10 @@ TEST(RunInt32AddWithOverflowImm) {
}
FOR_INT32_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* val, *ovf;
m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j), &val,
&ovf);
Node* add =
m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
......@@ -3961,20 +3965,22 @@ TEST(RunInt32AddWithOverflowImm) {
TEST(RunInt32AddWithOverflowInBranchP) {
int constant = 911777;
MLabel blocka, blockb;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
Node* val, *ovf;
m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
Node* ovf = m.Projection(1, add);
m.Branch(ovf, &blocka, &blockb);
m.Bind(&blocka);
bt.AddReturn(m.Word32Not(val));
bt.AddReturn(m.Int32Constant(constant));
m.Bind(&blockb);
Node* val = m.Projection(0, add);
bt.AddReturn(val);
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
int32_t expected;
if (sadd_overflow(*i, *j, &expected)) expected = ~expected;
if (sadd_overflow(*i, *j, &expected)) expected = constant;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
......@@ -3985,8 +3991,9 @@ TEST(RunInt32SubWithOverflowP) {
int32_t actual_val = -1;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
Node* val, *ovf;
m.Int32SubWithOverflow(bt.param0, bt.param1, &val, &ovf);
Node* add = m.Int32SubWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
......@@ -4005,8 +4012,9 @@ TEST(RunInt32SubWithOverflowImm) {
FOR_INT32_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
Node* val, *ovf;
m.Int32SubWithOverflow(m.Int32Constant(*i), m.Parameter(0), &val, &ovf);
Node* add = m.Int32SubWithOverflow(m.Int32Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
......@@ -4017,8 +4025,9 @@ TEST(RunInt32SubWithOverflowImm) {
}
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
Node* val, *ovf;
m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(*i), &val, &ovf);
Node* add = m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
......@@ -4029,9 +4038,10 @@ TEST(RunInt32SubWithOverflowImm) {
}
FOR_INT32_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
Node* val, *ovf;
m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j), &val,
&ovf);
Node* add =
m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
int expected_ovf = ssub_overflow(*i, *j, &expected_val);
......@@ -4043,20 +4053,22 @@ TEST(RunInt32SubWithOverflowImm) {
TEST(RunInt32SubWithOverflowInBranchP) {
int constant = 911999;
MLabel blocka, blockb;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
Node* val, *ovf;
m.Int32SubWithOverflow(bt.param0, bt.param1, &val, &ovf);
Node* sub = m.Int32SubWithOverflow(bt.param0, bt.param1);
Node* ovf = m.Projection(1, sub);
m.Branch(ovf, &blocka, &blockb);
m.Bind(&blocka);
bt.AddReturn(m.Word32Not(val));
bt.AddReturn(m.Int32Constant(constant));
m.Bind(&blockb);
Node* val = m.Projection(0, sub);
bt.AddReturn(val);
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
int32_t expected;
if (ssub_overflow(*i, *j, &expected)) expected = ~expected;
if (ssub_overflow(*i, *j, &expected)) expected = constant;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment