Commit 6619a797 authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

[turbofan] Add support for deferred code.

Branch can now have an optional hint, when the condition is
likely true or false, and if such a hint is present the other
basic block will be marked as deferred and placed at the end
of the function.

We currently use this feature for tagging int32/uint32 in
change lowering, and for load/store bounds checks in simplified
lowering.

TEST=cctest,unittests
R=dcarney@chromium.org

Review URL: https://codereview.chromium.org/642883003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24802 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1efcf093
......@@ -142,7 +142,8 @@ Reduction ChangeLowering::ChangeInt32ToTagged(Node* val, Node* control) {
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), val, val);
Node* ovf = graph()->NewNode(common()->Projection(1), add);
Node* branch = graph()->NewNode(common()->Branch(), ovf, control);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), ovf, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* heap_number = AllocateHeapNumberWithValue(
......@@ -215,7 +216,8 @@ Reduction ChangeLowering::ChangeUint32ToTagged(Node* val, Node* control) {
Node* cmp = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val,
SmiMaxValueConstant());
Node* branch = graph()->NewNode(common()->Branch(), cmp, control);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), cmp, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* smi = graph()->NewNode(
......
......@@ -44,10 +44,20 @@ Handle<Code> CodeGenerator::GenerateCode() {
info->set_prologue_offset(masm()->pc_offset());
AssemblePrologue();
// Assemble all instructions.
for (InstructionSequence::const_iterator i = code()->begin();
i != code()->end(); ++i) {
AssembleInstruction(*i);
// Assemble all non-deferred instructions.
for (auto const block : code()->instruction_blocks()) {
if (block->IsDeferred()) continue;
for (int i = block->code_start(); i < block->code_end(); ++i) {
AssembleInstruction(code()->InstructionAt(i));
}
}
// Assemble all deferred instructions.
for (auto const block : code()->instruction_blocks()) {
if (!block->IsDeferred()) continue;
for (int i = block->code_start(); i < block->code_end(); ++i) {
AssembleInstruction(code()->InstructionAt(i));
}
}
FinishCode(masm());
......@@ -83,6 +93,12 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
bool CodeGenerator::IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const {
return code()->InstructionBlockAt(current_block_)->ao_number().IsNext(
code()->InstructionBlockAt(block)->ao_number());
}
void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode deopt_mode) {
......
......@@ -39,9 +39,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
// Checks if {block} will appear directly after {current_block_} when
// assembling code, in which case, a fall-through can be used.
bool IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const {
return current_block_.IsNext(block);
}
bool IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const;
// Record a safepoint with the given pointer map.
void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
......
......@@ -30,6 +30,26 @@ class ControlOperator : public Operator1<int> {
} // namespace
std::ostream& operator<<(std::ostream& os, BranchHint hint) {
switch (hint) {
case BranchHint::kNone:
return os << "None";
case BranchHint::kTrue:
return os << "True";
case BranchHint::kFalse:
return os << "False";
}
UNREACHABLE();
return os;
}
BranchHint BranchHintOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kBranch, op->opcode());
return OpParameter<BranchHint>(op);
}
size_t hash_value(OutputFrameStateCombine const& sc) {
return base::hash_combine(sc.kind_, sc.parameter_);
}
......@@ -74,7 +94,6 @@ std::ostream& operator<<(std::ostream& os, FrameStateCallInfo const& info) {
#define SHARED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0) \
V(End, Operator::kFoldable, 0, 1) \
V(Branch, Operator::kFoldable, 1, 1) \
V(IfTrue, Operator::kFoldable, 0, 1) \
V(IfFalse, Operator::kFoldable, 0, 1) \
V(Throw, Operator::kFoldable, 1, 1) \
......@@ -110,6 +129,12 @@ SHARED_OP_LIST(SHARED)
#undef SHARED
const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
return new (zone()) Operator1<BranchHint>(
IrOpcode::kBranch, Operator::kFoldable, 1, 0, "Branch", hint);
}
const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
// Outputs are formal parameters, plus context, receiver, and JSFunction.
const int value_output_count = num_formal_parameters + 3;
......
......@@ -23,6 +23,16 @@ struct CommonOperatorBuilderImpl;
class Operator;
// Prediction hint for branches.
enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
std::ostream& operator<<(std::ostream&, BranchHint);
BranchHint BranchHintOf(const Operator* const);
// Flag that describes how to combine the current environment with
// the output of a node to obtain a framestate for lazy bailout.
class OutputFrameStateCombine {
......@@ -123,7 +133,7 @@ class CommonOperatorBuilder FINAL {
const Operator* Dead();
const Operator* End();
const Operator* Branch();
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* Throw();
......
......@@ -562,6 +562,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
BasicBlock* current = (*rpo)[i];
Tag block_tag(this, "block");
PrintBlockProperty("name", current->id());
PrintStringProperty("deferred", current->deferred() ? "true" : "false");
PrintIntProperty("from_bci", -1);
PrintIntProperty("to_bci", -1);
......
......@@ -147,7 +147,7 @@ Instruction* InstructionSelector::Emit(Instruction* instr) {
bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
return current_block_->GetRpoNumber().IsNext(block->GetRpoNumber());
return current_block_->GetAoNumber().IsNext(block->GetAoNumber());
}
......
......@@ -336,11 +336,13 @@ InstructionBlock::InstructionBlock(Zone* zone, const BasicBlock* block)
BasicBlock::RpoNumber::Invalid(), zone),
phis_(zone),
id_(block->id()),
ao_number_(block->GetAoNumber()),
rpo_number_(block->GetRpoNumber()),
loop_header_(GetRpo(block->loop_header())),
loop_end_(GetLoopEndRpo(block)),
code_start_(-1),
code_end_(-1) {
code_end_(-1),
deferred_(block->deferred()) {
// Map successors and precessors
size_t index = 0;
for (BasicBlock::Successors::const_iterator it = block->successors_begin();
......@@ -604,7 +606,10 @@ std::ostream& operator<<(std::ostream& os, const InstructionSequence& code) {
const InstructionBlock* block = code.InstructionBlockAt(rpo);
CHECK(block->rpo_number() == rpo);
os << "RPO#" << block->rpo_number() << ": B" << block->id();
os << "RPO#" << block->rpo_number();
os << ": AO#" << block->ao_number();
os << ": B" << block->id();
if (block->IsDeferred()) os << " (deferred)";
if (block->IsLoopHeader()) {
os << " loop blocks: [" << block->rpo_number() << ", "
<< block->loop_end() << ")";
......
......@@ -795,7 +795,10 @@ class InstructionBlock FINAL : public ZoneObject {
int32_t code_end() const { return code_end_; }
void set_code_end(int32_t end) { code_end_ = end; }
bool IsDeferred() const { return deferred_; }
BasicBlock::Id id() const { return id_; }
BasicBlock::RpoNumber ao_number() const { return ao_number_; }
BasicBlock::RpoNumber rpo_number() const { return rpo_number_; }
BasicBlock::RpoNumber loop_header() const { return loop_header_; }
BasicBlock::RpoNumber loop_end() const {
......@@ -822,12 +825,14 @@ class InstructionBlock FINAL : public ZoneObject {
Predecessors predecessors_;
PhiInstructions phis_;
BasicBlock::Id id_;
BasicBlock::RpoNumber ao_number_; // Assembly order number.
// TODO(dcarney): probably dont't need this.
BasicBlock::RpoNumber rpo_number_;
BasicBlock::RpoNumber loop_header_;
BasicBlock::RpoNumber loop_end_;
int32_t code_start_; // start index of arch-specific code.
int32_t code_end_; // end index of arch-specific code.
int32_t code_start_; // start index of arch-specific code.
int32_t code_end_; // end index of arch-specific code.
const bool deferred_; // Block contains deferred code.
};
typedef ZoneDeque<Constant> ConstantDeque;
......@@ -854,6 +859,10 @@ class InstructionSequence FINAL {
int node_count() const { return static_cast<int>(node_map_.size()); }
const InstructionBlocks& instruction_blocks() const {
return instruction_blocks_;
}
int InstructionBlockCount() const {
return static_cast<int>(instruction_blocks_.size());
}
......
......@@ -123,6 +123,8 @@ inline int OperatorProperties::GetControlInputCount(const Operator* op) {
#define OPCODE_CASE(x) case IrOpcode::k##x:
CONTROL_OP_LIST(OPCODE_CASE)
#undef OPCODE_CASE
// Branch operator is special
if (op->opcode() == IrOpcode::kBranch) return 1;
// Control operators are Operator1<int>.
return OpParameter<int>(op);
default:
......
......@@ -13,7 +13,9 @@ namespace internal {
namespace compiler {
BasicBlock::BasicBlock(Zone* zone, Id id)
: rpo_number_(-1),
: ao_number_(-1),
rpo_number_(-1),
deferred_(false),
dominator_(NULL),
loop_header_(NULL),
loop_depth_(0),
......@@ -240,6 +242,7 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
BasicBlock* block = *i;
os << "--- BLOCK B" << block->id();
if (block->deferred()) os << " (deferred)";
if (block->PredecessorCount() != 0) os << " <- ";
bool comma = false;
for (BasicBlock::Predecessors::iterator j = block->predecessors_begin();
......
......@@ -140,6 +140,9 @@ class BasicBlock FINAL : public ZoneObject {
Node* control_input() const { return control_input_; }
void set_control_input(Node* control_input);
bool deferred() const { return deferred_; }
void set_deferred(bool deferred) { deferred_ = deferred; }
BasicBlock* dominator() const { return dominator_; }
void set_dominator(BasicBlock* dominator);
......@@ -152,6 +155,10 @@ class BasicBlock FINAL : public ZoneObject {
int32_t loop_end() const { return loop_end_; }
void set_loop_end(int32_t loop_end);
RpoNumber GetAoNumber() const { return RpoNumber::FromInt(ao_number_); }
int32_t ao_number() const { return ao_number_; }
void set_ao_number(int32_t ao_number) { ao_number_ = ao_number; }
RpoNumber GetRpoNumber() const { return RpoNumber::FromInt(rpo_number_); }
int32_t rpo_number() const { return rpo_number_; }
void set_rpo_number(int32_t rpo_number);
......@@ -161,7 +168,9 @@ class BasicBlock FINAL : public ZoneObject {
bool LoopContains(BasicBlock* block) const;
private:
int32_t ao_number_; // assembly order number of the block.
int32_t rpo_number_; // special RPO number of the block.
bool deferred_; // true if the block contains deferred code.
BasicBlock* dominator_; // Immediate dominator of the block.
BasicBlock* loop_header_; // Pointer to dominating loop header basic block,
// NULL if none. For loop headers, this points to
......
......@@ -278,6 +278,20 @@ class CFGBuilder {
TraceConnect(branch, branch_block, successor_blocks[0]);
TraceConnect(branch, branch_block, successor_blocks[1]);
// Consider branch hints.
// TODO(turbofan): Propagate the deferred flag to all blocks dominated by
// this IfTrue/IfFalse later.
switch (BranchHintOf(branch->op())) {
case BranchHint::kNone:
break;
case BranchHint::kTrue:
successor_blocks[1]->set_deferred(true);
break;
case BranchHint::kFalse:
successor_blocks[0]->set_deferred(true);
break;
}
schedule_->AddBranch(branch_block, branch, successor_blocks[0],
successor_blocks[1]);
}
......@@ -1195,6 +1209,18 @@ BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool,
}
}
// Compute the assembly order (non-deferred code first, deferred code
// afterwards).
int32_t number = 0;
for (auto block : *final_order) {
if (block->deferred()) continue;
block->set_ao_number(number++);
}
for (auto block : *final_order) {
if (!block->deferred()) continue;
block->set_ao_number(number++);
}
#if DEBUG
if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
VerifySpecialRPO(num_loops, loops, final_order);
......
......@@ -1027,7 +1027,8 @@ void SimplifiedLowering::DoLoadElement(Node* node, MachineType output_type) {
Node* control = node->InputAt(4);
Node* check = graph()->NewNode(machine()->Uint32LessThan(), key, length);
Node* branch = graph()->NewNode(common()->Branch(), check, control);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* load = graph()->NewNode(op, base, index, effect, if_true);
......@@ -1098,7 +1099,8 @@ void SimplifiedLowering::DoStoreElement(Node* node) {
Node* control = node->InputAt(5);
Node* check = graph()->NewNode(machine()->Uint32LessThan(), key, length);
Node* branch = graph()->NewNode(common()->Branch(), check, control);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* store = graph()->NewNode(op, base, index, value, effect, if_true);
......
......@@ -1832,4 +1832,60 @@ TEST(PhisPushedDownToDifferentBranches) {
ComputeAndVerifySchedule(24, &graph);
}
TEST(BranchHintTrue) {
HandleAndZoneScope scope;
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
Node* start = graph.NewNode(common.Start(1));
graph.SetStart(start);
Node* p0 = graph.NewNode(common.Parameter(0), start);
Node* tv = graph.NewNode(common.Int32Constant(6));
Node* fv = graph.NewNode(common.Int32Constant(7));
Node* br = graph.NewNode(common.Branch(BranchHint::kTrue), p0, start);
Node* t = graph.NewNode(common.IfTrue(), br);
Node* f = graph.NewNode(common.IfFalse(), br);
Node* m = graph.NewNode(common.Merge(2), t, f);
Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 2), tv, fv, m);
Node* ret = graph.NewNode(common.Return(), phi, start, start);
Node* end = graph.NewNode(common.End(), ret, start);
graph.SetEnd(end);
Schedule* schedule = ComputeAndVerifySchedule(13, &graph);
// Make sure the false block is marked as deferred.
CHECK(!schedule->block(t)->deferred());
CHECK(schedule->block(f)->deferred());
}
TEST(BranchHintFalse) {
HandleAndZoneScope scope;
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
Node* start = graph.NewNode(common.Start(1));
graph.SetStart(start);
Node* p0 = graph.NewNode(common.Parameter(0), start);
Node* tv = graph.NewNode(common.Int32Constant(6));
Node* fv = graph.NewNode(common.Int32Constant(7));
Node* br = graph.NewNode(common.Branch(BranchHint::kFalse), p0, start);
Node* t = graph.NewNode(common.IfTrue(), br);
Node* f = graph.NewNode(common.IfFalse(), br);
Node* m = graph.NewNode(common.Merge(2), t, f);
Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 2), tv, fv, m);
Node* ret = graph.NewNode(common.Return(), phi, start, start);
Node* end = graph.NewNode(common.End(), ret, start);
graph.SetEnd(end);
Schedule* schedule = ComputeAndVerifySchedule(13, &graph);
// Make sure the true block is marked as deferred.
CHECK(schedule->block(t)->deferred());
CHECK(!schedule->block(f)->deferred());
}
#endif
......@@ -47,7 +47,6 @@ const SharedOperator kSharedOperators[] = {
}
SHARED(Dead, Operator::kFoldable, 0, 0, 0, 0, 1),
SHARED(End, Operator::kFoldable, 0, 0, 1, 0, 0),
SHARED(Branch, Operator::kFoldable, 1, 0, 1, 0, 2),
SHARED(IfTrue, Operator::kFoldable, 0, 0, 1, 0, 1),
SHARED(IfFalse, Operator::kFoldable, 0, 0, 1, 0, 1),
SHARED(Throw, Operator::kFoldable, 1, 0, 1, 0, 1),
......@@ -160,6 +159,24 @@ const double kDoubleValues[] = {-std::numeric_limits<double>::infinity(),
} // namespace
TEST_F(CommonOperatorTest, Branch) {
static const BranchHint kHints[] = {BranchHint::kNone, BranchHint::kTrue,
BranchHint::kFalse};
TRACED_FOREACH(BranchHint, hint, kHints) {
const Operator* const op = common()->Branch(hint);
EXPECT_EQ(IrOpcode::kBranch, op->opcode());
EXPECT_EQ(Operator::kFoldable, op->properties());
EXPECT_EQ(1, OperatorProperties::GetValueInputCount(op));
EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
EXPECT_EQ(2, OperatorProperties::GetControlOutputCount(op));
}
}
TEST_F(CommonOperatorTest, Float32Constant) {
TRACED_FOREACH(float, value, kFloatValues) {
const Operator* op = common()->Float32Constant(value);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment