Commit feb28907 authored by bmeurer's avatar bmeurer Committed by Commit bot

[turbofan] Initial support for Switch.

Adds Switch and Case operators to TurboFan and handles them
appropriately in instruction selection and code generation.

BUG=v8:3872
LOG=n

Review URL: https://codereview.chromium.org/892513003

Cr-Commit-Position: refs/heads/master@{#26515}
parent 4ee51619
......@@ -336,6 +336,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchSwitch:
AssembleArchSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchNop:
// don't emit code for nops.
DCHECK_EQ(LeaveCC, i.OutputSBit());
......@@ -737,6 +741,18 @@ void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
}
void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
ArmOperandConverter i(this, instr);
int const kNumLabels = static_cast<int>(instr->InputCount() - 1);
__ BlockConstPoolFor(kNumLabels + 2);
__ ldr(pc, MemOperand(pc, i.InputRegister(0), LSL, 2));
__ nop();
for (int index = 0; index < kNumLabels; ++index) {
__ dd(GetLabel(i.InputRpo(index + 1)));
}
}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
......@@ -1009,6 +1025,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
// On 32-bit ARM we emit the jump tables inline.
UNREACHABLE();
}
void CodeGenerator::AddNopForSmiCodeInlining() {
// On 32-bit ARM we do not insert nops for inlined Smi code.
}
......
......@@ -357,6 +357,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
case kArchSwitch:
AssembleArchSwitch(instr);
break;
case kArchNop:
// don't emit code for nops.
break;
......@@ -838,6 +841,22 @@ void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
}
void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
UseScratchRegisterScope scope(masm());
Register reg = i.InputRegister(0);
Register tmp = scope.AcquireX();
Label table;
__ Adr(tmp, &table);
__ Add(tmp, tmp, Operand(reg, LSL, 2));
__ Br(tmp);
__ Bind(&table);
for (size_t index = 1; index < instr->InputCount(); ++index) {
__ B(GetLabel(i.InputRpo(index)));
}
}
// Assemble boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
......@@ -1084,6 +1103,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
// On 64-bit ARM we emit the jump tables inline.
UNREACHABLE();
}
void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
......
......@@ -144,6 +144,8 @@ class OutOfLineCode : public ZoneObject {
static inline void FinishCode(MacroAssembler* masm) {
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
masm->CheckConstPool(true, false);
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
masm->ud2();
#endif
}
......
......@@ -12,6 +12,24 @@ namespace v8 {
namespace internal {
namespace compiler {
class CodeGenerator::JumpTable FINAL : public ZoneObject {
public:
JumpTable(JumpTable* next, Label** targets, size_t target_count)
: next_(next), targets_(targets), target_count_(target_count) {}
Label* label() { return &label_; }
JumpTable* next() const { return next_; }
Label** targets() const { return targets_; }
size_t target_count() const { return target_count_; }
private:
Label label_;
JumpTable* const next_;
Label** const targets_;
size_t const target_count_;
};
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info)
: frame_(frame),
......@@ -28,6 +46,7 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
deoptimization_literals_(code->zone()),
translations_(code->zone()),
last_lazy_deopt_pc_(0),
jump_tables_(nullptr),
ools_(nullptr),
osr_pc_offset_(-1) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
......@@ -81,7 +100,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
masm()->bind(ool->entry());
ool->Generate();
masm()->jmp(ool->exit());
if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
}
}
......@@ -95,6 +114,15 @@ Handle<Code> CodeGenerator::GenerateCode() {
FinishCode(masm());
// Emit the jump tables.
if (jump_tables_) {
masm()->Align(kPointerSize);
for (JumpTable* table = jump_tables_; table; table = table->next()) {
masm()->bind(table->label());
AssembleJumpTable(table->targets(), table->target_count());
}
}
safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
// TODO(titzer): what are the right code flags here?
......@@ -292,6 +320,12 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
}
Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
jump_tables_ = new (zone()) JumpTable(jump_tables_, targets, target_count);
return jump_tables_->label();
}
void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
......
......@@ -69,6 +69,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
void AssembleArchInstruction(Instruction* instr);
void AssembleArchJump(BasicBlock::RpoNumber target);
void AssembleArchSwitch(Instruction* instr);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
......@@ -91,6 +92,18 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
void AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) FINAL;
// ===========================================================================
// =================== Jump table construction methods. ======================
// ===========================================================================
class JumpTable;
// Adds a jump table that is emitted after the actual code. Returns label
// pointing to the beginning of the table. {targets} is assumed to be static
// or zone allocated.
Label* AddJumpTable(Label** targets, size_t target_count);
// Emits a jump table.
void AssembleJumpTable(Label** targets, size_t target_count);
// ===========================================================================
// Deoptimization table construction
void AddSafepointAndDeopt(Instruction* instr);
......@@ -145,6 +158,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
ZoneDeque<Handle<Object> > deoptimization_literals_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
JumpTable* jump_tables_;
OutOfLineCode* ools_;
int osr_pc_offset_;
};
......
......@@ -36,6 +36,12 @@ BranchHint BranchHintOf(const Operator* const op) {
}
size_t CaseIndexOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kCase, op->opcode());
return OpParameter<size_t>(op);
}
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
return lhs.type() == rhs.type() && lhs.hint() == rhs.hint();
}
......@@ -250,6 +256,24 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
}
const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
DCHECK_GE(control_output_count, 2u); // Disallow trivial switches.
return new (zone()) Operator( // --
IrOpcode::kSwitch, Operator::kFoldable, // opcode
"Switch", // name
1, 0, 1, 0, 0, control_output_count); // counts
}
const Operator* CommonOperatorBuilder::Case(size_t index) {
return new (zone()) Operator1<size_t>( // --
IrOpcode::kCase, Operator::kFoldable, // opcode
"Case", // name
0, 0, 1, 0, 0, 1, // counts
index); // parameter
}
const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
// Outputs are formal parameters, plus context, receiver, and JSFunction.
const int value_output_count = num_formal_parameters + 3;
......
......@@ -33,6 +33,9 @@ std::ostream& operator<<(std::ostream&, BranchHint);
BranchHint BranchHintOf(const Operator* const);
size_t CaseIndexOf(const Operator* const);
class SelectParameters FINAL {
public:
explicit SelectParameters(MachineType type,
......@@ -172,6 +175,8 @@ class CommonOperatorBuilder FINAL : public ZoneObject {
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* Switch(size_t control_output_count);
const Operator* Case(size_t index);
const Operator* Throw();
const Operator* Return();
......
......@@ -310,6 +310,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
case kArchSwitch:
AssembleArchSwitch(instr);
break;
case kArchNop:
// don't emit code for nops.
break;
......@@ -758,6 +761,18 @@ void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
}
void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
IA32OperandConverter i(this, instr);
size_t const label_count = instr->InputCount() - 1;
Label** labels = zone()->NewArray<Label*>(label_count);
for (size_t index = 0; index < label_count; ++index) {
labels[index] = GetLabel(i.InputRpo(index + 1));
}
Label* const table = AddJumpTable(labels, label_count);
__ jmp(Operand::JumpTable(i.InputRegister(0), times_4, table));
}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
......@@ -1214,6 +1229,13 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
for (size_t index = 0; index < target_count; ++index) {
__ dd(targets[index]);
}
}
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
......
......@@ -37,6 +37,7 @@ namespace compiler {
V(ArchCallCodeObject) \
V(ArchCallJSFunction) \
V(ArchJmp) \
V(ArchSwitch) \
V(ArchNop) \
V(ArchRet) \
V(ArchStackPointer) \
......
......@@ -483,7 +483,9 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
}
static inline void CheckNoPhis(const BasicBlock* block) {
namespace {
V8_INLINE void CheckNoPhis(const BasicBlock* block) {
#ifdef DEBUG
// Branch targets should not have phis.
for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
......@@ -493,6 +495,8 @@ static inline void CheckNoPhis(const BasicBlock* block) {
#endif
}
} // namespace
void InstructionSelector::VisitControl(BasicBlock* block) {
Node* input = block->control_input();
......@@ -513,6 +517,18 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
if (condition->opcode() == IrOpcode::kAlways) return VisitGoto(tbranch);
return VisitBranch(input, tbranch, fbranch);
}
case BasicBlock::kSwitch: {
DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
BasicBlock** const branches = &block->successors().front();
size_t const branch_count = block->SuccessorCount();
DCHECK_LE(2u, branch_count);
// SSA deconstruction requires targets of branches not to have phis.
// Edge split form guarantees this property, but is more strict.
for (size_t index = 0; index < branch_count; ++index) {
CheckNoPhis(branches[index]);
}
return VisitSwitch(input, branches, branch_count);
}
case BasicBlock::kReturn: {
// If the result itself is a return, return its input.
Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
......@@ -525,7 +541,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
return VisitThrow(input->InputAt(0));
case BasicBlock::kNone: {
// TODO(titzer): exit block doesn't have control.
DCHECK(input == NULL);
DCHECK_NULL(input);
break;
}
default:
......@@ -544,6 +560,8 @@ MachineType InstructionSelector::GetMachineType(Node* node) {
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
case IrOpcode::kSwitch:
case IrOpcode::kCase:
case IrOpcode::kEffectPhi:
case IrOpcode::kEffectSet:
case IrOpcode::kMerge:
......@@ -682,6 +700,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
case IrOpcode::kSwitch:
case IrOpcode::kCase:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
// No code needed for these graph artifacts.
......@@ -1038,6 +1058,22 @@ void InstructionSelector::VisitGoto(BasicBlock* target) {
}
void InstructionSelector::VisitSwitch(Node* node, BasicBlock** branches,
size_t branch_count) {
OperandGenerator g(this);
Node* const value = node->InputAt(0);
size_t const input_count = branch_count + 1;
InstructionOperand* const inputs =
zone()->NewArray<InstructionOperand>(static_cast<int>(input_count));
inputs[0] = g.UseRegister(value);
for (size_t index = 0; index < branch_count; ++index) {
inputs[index + 1] = g.Label(branches[index]);
}
Emit(kArchSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
->MarkAsControl();
}
void InstructionSelector::VisitReturn(Node* value) {
OperandGenerator g(this);
if (value != NULL) {
......
......@@ -202,6 +202,7 @@ class InstructionSelector FINAL {
void VisitCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, BasicBlock** branches, size_t branch_count);
void VisitReturn(Node* value);
void VisitThrow(Node* value);
void VisitDeoptimize(Node* deopt);
......
......@@ -12,6 +12,8 @@
V(Branch) \
V(IfTrue) \
V(IfFalse) \
V(Switch) \
V(Case) \
V(Merge) \
V(Return) \
V(OsrNormalEntry) \
......
......@@ -10,13 +10,16 @@ namespace v8 {
namespace internal {
namespace compiler {
namespace {
template <typename N>
static inline N CheckRange(size_t val) {
CHECK(val <= std::numeric_limits<N>::max());
V8_INLINE N CheckRange(size_t val) {
CHECK_LE(val, std::numeric_limits<N>::max());
return static_cast<N>(val);
}
} // namespace
Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
......
......@@ -93,7 +93,7 @@ class Operator : public ZoneObject {
int EffectOutputCount() const { return effect_out_; }
int ControlOutputCount() const { return control_out_; }
static inline size_t ZeroIfPure(Properties properties) {
static size_t ZeroIfPure(Properties properties) {
return (properties & kPure) == kPure ? 0 : 1;
}
......
......@@ -76,6 +76,20 @@ void RawMachineAssembler::Branch(Node* condition, Label* true_val,
}
void RawMachineAssembler::Switch(Node* index, Label** succ_labels,
size_t succ_count) {
DCHECK_NE(schedule()->end(), current_block_);
Node* sw = NewNode(common()->Switch(succ_count), index);
BasicBlock** succ_blocks =
zone()->NewArray<BasicBlock*>(static_cast<int>(succ_count));
for (size_t index = 0; index < succ_count; ++index) {
succ_blocks[index] = Use(succ_labels[index]);
}
schedule()->AddSwitch(CurrentBlock(), sw, succ_blocks, succ_count);
current_block_ = nullptr;
}
void RawMachineAssembler::Return(Node* value) {
schedule()->AddReturn(CurrentBlock(), value);
current_block_ = NULL;
......
......@@ -402,6 +402,7 @@ class RawMachineAssembler : public GraphBuilder {
Label* Exit();
void Goto(Label* label);
void Branch(Node* condition, Label* true_val, Label* false_val);
void Switch(Node* index, Label** succ_labels, size_t succ_count);
// Call through CallFunctionStub with lazy deopt and frame-state.
Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
Node* frame_state, CallFunctionFlags flags);
......
......@@ -102,6 +102,8 @@ std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
return os << "goto";
case BasicBlock::kBranch:
return os << "branch";
case BasicBlock::kSwitch:
return os << "switch";
case BasicBlock::kReturn:
return os << "return";
case BasicBlock::kThrow:
......@@ -209,6 +211,18 @@ void Schedule::AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
}
void Schedule::AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
size_t succ_count) {
DCHECK_EQ(BasicBlock::kNone, block->control());
DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
block->set_control(BasicBlock::kSwitch);
for (size_t index = 0; index < succ_count; ++index) {
AddSuccessor(block, succ_blocks[index]);
}
SetControlInput(block, sw);
}
void Schedule::AddReturn(BasicBlock* block, Node* input) {
DCHECK(block->control() == BasicBlock::kNone);
block->set_control(BasicBlock::kReturn);
......@@ -234,13 +248,30 @@ void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
MoveSuccessors(block, end);
AddSuccessor(block, tblock);
AddSuccessor(block, fblock);
if (block->control_input() != NULL) {
if (block->control_input() != nullptr) {
SetControlInput(end, block->control_input());
}
SetControlInput(block, branch);
}
void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
BasicBlock** succ_blocks, size_t succ_count) {
DCHECK_NE(BasicBlock::kNone, block->control());
DCHECK_EQ(BasicBlock::kNone, end->control());
end->set_control(block->control());
block->set_control(BasicBlock::kSwitch);
MoveSuccessors(block, end);
for (size_t index = 0; index < succ_count; ++index) {
AddSuccessor(block, succ_blocks[index]);
}
if (block->control_input() != nullptr) {
SetControlInput(end, block->control_input());
}
SetControlInput(block, sw);
}
void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
block->AddSuccessor(succ);
succ->AddPredecessor(block);
......
......@@ -33,6 +33,7 @@ class BasicBlock FINAL : public ZoneObject {
kNone, // Control not initialized yet.
kGoto, // Goto a single successor block.
kBranch, // Branch if true to first successor, otherwise second.
kSwitch, // Table dispatch to one of the successor blocks.
kReturn, // Return a value from this method.
kThrow // Throw an exception.
};
......@@ -233,6 +234,10 @@ class Schedule FINAL : public ZoneObject {
void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock);
// BasicBlock building: add a switch at the end of {block}.
void AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
size_t succ_count);
// BasicBlock building: add a return at the end of {block}.
void AddReturn(BasicBlock* block, Node* input);
......@@ -243,6 +248,10 @@ class Schedule FINAL : public ZoneObject {
void InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
BasicBlock* tblock, BasicBlock* fblock);
// BasicBlock mutation: insert a switch into the end of {block}.
void InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
BasicBlock** succ_blocks, size_t succ_count);
// Exposed publicly for testing only.
void AddSuccessorForTesting(BasicBlock* block, BasicBlock* succ) {
return AddSuccessor(block, succ);
......
......@@ -218,7 +218,8 @@ void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
class CFGBuilder : public ZoneObject {
public:
CFGBuilder(Zone* zone, Scheduler* scheduler)
: scheduler_(scheduler),
: zone_(zone),
scheduler_(scheduler),
schedule_(scheduler->schedule_),
queued_(scheduler->graph_, 2),
queue_(zone),
......@@ -316,7 +317,8 @@ class CFGBuilder : public ZoneObject {
BuildBlockForNode(node);
break;
case IrOpcode::kBranch:
BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
case IrOpcode::kSwitch:
BuildBlocksForSuccessors(node);
break;
default:
break;
......@@ -333,6 +335,10 @@ class CFGBuilder : public ZoneObject {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectBranch(node);
break;
case IrOpcode::kSwitch:
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectSwitch(node);
break;
case IrOpcode::kReturn:
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectReturn(node);
......@@ -357,49 +363,67 @@ class CFGBuilder : public ZoneObject {
return block;
}
void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
IrOpcode::Value b) {
Node* successors[2];
CollectSuccessorProjections(node, successors, a, b);
BuildBlockForNode(successors[0]);
BuildBlockForNode(successors[1]);
void BuildBlocksForSuccessors(Node* node) {
size_t const successor_count = node->op()->ControlOutputCount();
Node** successors =
zone_->NewArray<Node*>(static_cast<int>(successor_count));
CollectSuccessorProjections(node, successors, successor_count);
for (size_t index = 0; index < successor_count; ++index) {
BuildBlockForNode(successors[index]);
}
}
// Collect the branch-related projections from a node, such as IfTrue,
// IfFalse.
// TODO(titzer): consider moving this to node.h
void CollectSuccessorProjections(Node* node, Node** buffer,
IrOpcode::Value true_opcode,
IrOpcode::Value false_opcode) {
buffer[0] = NULL;
buffer[1] = NULL;
for (Node* use : node->uses()) {
if (use->opcode() == true_opcode) {
DCHECK(!buffer[0]);
buffer[0] = use;
}
if (use->opcode() == false_opcode) {
DCHECK(!buffer[1]);
buffer[1] = use;
// IfFalse, and Case.
void CollectSuccessorProjections(Node* node, Node** successors,
size_t successor_count) {
#ifdef DEBUG
DCHECK_EQ(static_cast<int>(successor_count), node->UseCount());
std::memset(successors, 0, sizeof(*successors) * successor_count);
#endif
for (Node* const use : node->uses()) {
size_t index;
switch (use->opcode()) {
default:
UNREACHABLE();
// Fall through.
case IrOpcode::kIfTrue:
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
index = 0;
break;
case IrOpcode::kIfFalse:
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
index = 1;
break;
case IrOpcode::kCase:
DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
index = CaseIndexOf(use->op());
break;
}
DCHECK_LT(index, successor_count);
DCHECK(successors[index] == nullptr);
successors[index] = use;
}
#ifdef DEBUG
for (size_t index = 0; index < successor_count; ++index) {
DCHECK_NOT_NULL(successors[index]);
}
DCHECK(buffer[0]);
DCHECK(buffer[1]);
#endif
}
void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
IrOpcode::Value true_opcode,
IrOpcode::Value false_opcode) {
Node* successors[2];
CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
buffer[0] = schedule_->block(successors[0]);
buffer[1] = schedule_->block(successors[1]);
void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
size_t successor_count) {
Node** successors = reinterpret_cast<Node**>(successor_blocks);
CollectSuccessorProjections(node, successors, successor_count);
for (size_t index = 0; index < successor_count; ++index) {
successor_blocks[index] = schedule_->block(successors[index]);
}
}
void ConnectBranch(Node* branch) {
BasicBlock* successor_blocks[2];
CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
IrOpcode::kIfFalse);
CollectSuccessorBlocks(branch, successor_blocks,
arraysize(successor_blocks));
// Consider branch hints.
switch (BranchHintOf(branch->op())) {
......@@ -421,7 +445,7 @@ class CFGBuilder : public ZoneObject {
} else {
Node* branch_block_node = NodeProperties::GetControlInput(branch);
BasicBlock* branch_block = schedule_->block(branch_block_node);
DCHECK(branch_block != NULL);
DCHECK_NOT_NULL(branch_block);
TraceConnect(branch, branch_block, successor_blocks[0]);
TraceConnect(branch, branch_block, successor_blocks[1]);
......@@ -430,12 +454,36 @@ class CFGBuilder : public ZoneObject {
}
}
void ConnectSwitch(Node* sw) {
size_t const successor_count = sw->op()->ControlOutputCount();
BasicBlock** successor_blocks =
zone_->NewArray<BasicBlock*>(static_cast<int>(successor_count));
CollectSuccessorBlocks(sw, successor_blocks, successor_count);
if (sw == component_entry_) {
for (size_t index = 0; index < successor_count; ++index) {
TraceConnect(sw, component_start_, successor_blocks[index]);
}
schedule_->InsertSwitch(component_start_, component_end_, sw,
successor_blocks, successor_count);
} else {
Node* sw_block_node = NodeProperties::GetControlInput(sw);
BasicBlock* sw_block = schedule_->block(sw_block_node);
DCHECK_NOT_NULL(sw_block);
for (size_t index = 0; index < successor_count; ++index) {
TraceConnect(sw, sw_block, successor_blocks[index]);
}
schedule_->AddSwitch(sw_block, sw, successor_blocks, successor_count);
}
}
void ConnectMerge(Node* merge) {
// Don't connect the special merge at the end to its predecessors.
if (IsFinalMerge(merge)) return;
BasicBlock* block = schedule_->block(merge);
DCHECK(block != NULL);
DCHECK_NOT_NULL(block);
// For all of the merge's control inputs, add a goto at the end to the
// merge's basic block.
for (Node* const input : merge->inputs()) {
......@@ -460,7 +508,7 @@ class CFGBuilder : public ZoneObject {
}
void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
DCHECK(block);
DCHECK_NOT_NULL(block);
if (succ == NULL) {
Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
block->id().ToInt());
......@@ -487,6 +535,7 @@ class CFGBuilder : public ZoneObject {
DCHECK(control_.empty());
}
Zone* zone_;
Scheduler* scheduler_;
Schedule* schedule_;
NodeMarker<bool> queued_; // Mark indicating whether node is queued.
......
......@@ -228,6 +228,27 @@ void Verifier::Visitor::Check(Node* node) {
// Type is empty.
CheckNotTyped(node);
break;
case IrOpcode::kSwitch: {
// Switch uses are Case.
std::vector<bool> uses;
uses.resize(node->UseCount());
for (auto use : node->uses()) {
CHECK_EQ(IrOpcode::kCase, use->opcode());
size_t const index = CaseIndexOf(use->op());
CHECK_LT(index, uses.size());
CHECK(!uses[index]);
uses[index] = true;
}
// Type is empty.
CheckNotTyped(node);
break;
}
case IrOpcode::kCase:
CHECK_EQ(IrOpcode::kSwitch,
NodeProperties::GetControlInput(node)->opcode());
// Type is empty.
CheckNotTyped(node);
break;
case IrOpcode::kLoop:
case IrOpcode::kMerge:
CHECK_EQ(control_count, input_count);
......
......@@ -532,6 +532,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
case kArchSwitch:
AssembleArchSwitch(instr);
break;
case kArchNop:
// don't emit code for nops.
break;
......@@ -1067,6 +1070,19 @@ void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
}
void CodeGenerator::AssembleArchSwitch(Instruction* instr) {
X64OperandConverter i(this, instr);
size_t const label_count = instr->InputCount() - 1;
Label** labels = zone()->NewArray<Label*>(static_cast<int>(label_count));
for (size_t index = 0; index < label_count; ++index) {
labels[index] = GetLabel(i.InputRpo(static_cast<int>(index + 1)));
}
Label* const table = AddJumpTable(labels, label_count);
__ leaq(kScratchRegister, Operand(table));
__ jmp(Operand(kScratchRegister, i.InputRegister(0), times_8, 0));
}
// Assembles boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
......@@ -1380,6 +1396,13 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
for (size_t index = 0; index < target_count; ++index) {
__ dq(targets[index]);
}
}
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
......
......@@ -115,7 +115,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
"%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
reinterpret_cast<intptr_t>(ptr),
ptr - begin);
pc += 4;
pc += sizeof(ptr);
} else {
decode_buffer[0] = '\0';
pc += d.InstructionDecode(decode_buffer, pc);
......
......@@ -447,6 +447,56 @@ TEST(RunLoopIncrementFloat64) {
}
TEST(RunSwitch1) {
RawMachineAssemblerTester<int32_t> m;
int constant = 11223344;
MLabel block0, block1, end;
MLabel* cases[] = {&block0, &block1};
m.Switch(m.IntPtrConstant(0), cases, arraysize(cases));
m.Bind(&block0);
m.Goto(&end);
m.Bind(&block1);
m.Goto(&end);
m.Bind(&end);
m.Return(m.Int32Constant(constant));
CHECK_EQ(constant, m.Call());
}
TEST(RunSwitch2) {
RawMachineAssemblerTester<int32_t> m(kMachInt32);
const size_t kNumCases = 255;
int32_t values[kNumCases];
m.main_isolate()->random_number_generator()->NextBytes(values,
sizeof(values));
MLabel end;
MLabel* cases[kNumCases];
Node* results[kNumCases];
for (size_t i = 0; i < kNumCases; ++i) {
cases[i] = new (m.main_zone()->New(sizeof(MLabel))) MLabel;
}
m.Switch(m.ConvertInt32ToIntPtr(m.Parameter(0)), cases, arraysize(cases));
for (size_t i = 0; i < kNumCases; ++i) {
m.Bind(cases[i]);
results[i] = m.Int32Constant(values[i]);
m.Goto(&end);
}
m.Bind(&end);
const int num_results = static_cast<int>(arraysize(results));
Node* phi =
m.NewNode(m.common()->Phi(kMachInt32, num_results), num_results, results);
m.Return(phi);
for (size_t i = 0; i < kNumCases; ++i) {
CHECK_EQ(values[i], m.Call(static_cast<int>(i)));
}
}
TEST(RunLoadInt32) {
RawMachineAssemblerTester<int32_t> m;
......
......@@ -133,6 +133,9 @@ class CommonOperatorTest : public TestWithZone {
const int kArguments[] = {1, 5, 6, 42, 100, 10000, 65000};
const size_t kCases[] = {2, 3, 4, 100, 255};
const float kFloatValues[] = {-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::min(),
-1.0f,
......@@ -180,6 +183,39 @@ TEST_F(CommonOperatorTest, Branch) {
}
TEST_F(CommonOperatorTest, Switch) {
TRACED_FOREACH(size_t, cases, kCases) {
const Operator* const op = common()->Switch(cases);
EXPECT_EQ(IrOpcode::kSwitch, op->opcode());
EXPECT_EQ(Operator::kFoldable, op->properties());
EXPECT_EQ(1, op->ValueInputCount());
EXPECT_EQ(0, op->EffectInputCount());
EXPECT_EQ(1, op->ControlInputCount());
EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(0, op->ValueOutputCount());
EXPECT_EQ(0, op->EffectOutputCount());
EXPECT_EQ(static_cast<int>(cases), op->ControlOutputCount());
}
}
TEST_F(CommonOperatorTest, Case) {
TRACED_FORRANGE(size_t, index, 0, 1024) {
const Operator* const op = common()->Case(index);
EXPECT_EQ(IrOpcode::kCase, op->opcode());
EXPECT_EQ(Operator::kFoldable, op->properties());
EXPECT_EQ(index, CaseIndexOf(op));
EXPECT_EQ(0, op->ValueInputCount());
EXPECT_EQ(0, op->EffectInputCount());
EXPECT_EQ(1, op->ControlInputCount());
EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(0, op->ValueOutputCount());
EXPECT_EQ(0, op->EffectOutputCount());
EXPECT_EQ(1, op->ControlOutputCount());
}
}
TEST_F(CommonOperatorTest, Select) {
static const MachineType kTypes[] = {
kMachInt8, kMachUint8, kMachInt16, kMachUint16,
......
......@@ -1967,6 +1967,48 @@ TARGET_TEST_F(SchedulerTest, BranchHintFalse) {
CHECK(!schedule->block(f)->deferred());
}
TARGET_TEST_F(SchedulerTest, Switch) {
Node* start = graph()->NewNode(common()->Start(1));
graph()->SetStart(start);
Node* p0 = graph()->NewNode(common()->Parameter(0), start);
Node* sw = graph()->NewNode(common()->Switch(2), p0, start);
Node* c0 = graph()->NewNode(common()->Case(0), sw);
Node* v0 = graph()->NewNode(common()->Int32Constant(11));
Node* c1 = graph()->NewNode(common()->Case(1), sw);
Node* v1 = graph()->NewNode(common()->Int32Constant(22));
Node* m = graph()->NewNode(common()->Merge(2), c0, c1);
Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 2), v0, v1, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
Node* end = graph()->NewNode(common()->End(), ret);
graph()->SetEnd(end);
ComputeAndVerifySchedule(13, graph());
}
TARGET_TEST_F(SchedulerTest, FloatingSwitch) {
Node* start = graph()->NewNode(common()->Start(1));
graph()->SetStart(start);
Node* p0 = graph()->NewNode(common()->Parameter(0), start);
Node* sw = graph()->NewNode(common()->Switch(2), p0, start);
Node* c0 = graph()->NewNode(common()->Case(0), sw);
Node* v0 = graph()->NewNode(common()->Int32Constant(11));
Node* c1 = graph()->NewNode(common()->Case(1), sw);
Node* v1 = graph()->NewNode(common()->Int32Constant(22));
Node* m = graph()->NewNode(common()->Merge(2), c0, c1);
Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 2), v0, v1, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
Node* end = graph()->NewNode(common()->End(), ret);
graph()->SetEnd(end);
ComputeAndVerifySchedule(13, graph());
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment