Commit 339000bf authored by epertoso's avatar epertoso Committed by Commit bot

[turbofan] Add the Verifier to the pipeline for code stubs.

Removes some control edges added from the RawMachineAssembler to the end of the graph.
Adds a parameter that tells the Verifier to ignore effect and control inputs.

Review URL: https://codereview.chromium.org/1912853003

Cr-Commit-Position: refs/heads/master@{#35731}
parent b86ec743
......@@ -556,6 +556,12 @@ void Pipeline::Run(Arg0 arg_0) {
phase.Run(this->data_, scope.zone(), arg_0);
}
template <typename Phase, typename Arg0, typename Arg1>
void Pipeline::Run(Arg0 arg_0, Arg1 arg_1) {
PipelineRunScope scope(this->data_, Phase::phase_name());
Phase phase;
phase.Run(this->data_, scope.zone(), arg_0, arg_1);
}
struct LoopAssignmentAnalysisPhase {
static const char* phase_name() { return "loop assignment analysis"; }
......@@ -1142,9 +1148,10 @@ struct PrintGraphPhase {
struct VerifyGraphPhase {
static const char* phase_name() { return nullptr; }
void Run(PipelineData* data, Zone* temp_zone, const bool untyped) {
Verifier::Run(data->graph(),
!untyped ? Verifier::TYPED : Verifier::UNTYPED);
void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
bool values_only = false) {
Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
values_only ? Verifier::kValuesOnly : Verifier::kAll);
}
};
......@@ -1344,6 +1351,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
pipeline.Run<PrintGraphPhase>("Machine");
}
pipeline.Run<VerifyGraphPhase>(false, true);
return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
......
......@@ -115,7 +115,6 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
void RawMachineAssembler::Return(Node* value) {
Node* ret = MakeNode(common()->Return(), 1, &value);
NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
......@@ -124,7 +123,6 @@ void RawMachineAssembler::Return(Node* value) {
void RawMachineAssembler::Return(Node* v1, Node* v2) {
Node* values[] = {v1, v2};
Node* ret = MakeNode(common()->Return(2), 2, values);
NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
......@@ -133,7 +131,6 @@ void RawMachineAssembler::Return(Node* v1, Node* v2) {
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
Node* values[] = {v1, v2, v3};
Node* ret = MakeNode(common()->Return(3), 3, values);
NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
......@@ -260,7 +257,6 @@ Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
buffer[index++] = args[i];
}
Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
......@@ -282,7 +278,6 @@ Node* RawMachineAssembler::TailCallRuntime0(Runtime::FunctionId function,
Node* nodes[] = {centry, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
......@@ -304,7 +299,6 @@ Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
Node* nodes[] = {centry, arg1, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
......@@ -328,7 +322,6 @@ Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
......@@ -351,7 +344,6 @@ Node* RawMachineAssembler::TailCallRuntime3(Runtime::FunctionId function,
Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
......@@ -374,7 +366,6 @@ Node* RawMachineAssembler::TailCallRuntime4(Runtime::FunctionId function,
Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
......
......@@ -42,12 +42,14 @@ static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
class Verifier::Visitor {
public:
Visitor(Zone* z, Typing typed) : zone(z), typing(typed) {}
Visitor(Zone* z, Typing typed, CheckInputs check_inputs)
: zone(z), typing(typed), check_inputs(check_inputs) {}
void Check(Node* node);
Zone* zone;
Typing typing;
CheckInputs check_inputs;
private:
void CheckNotTyped(Node* node) {
......@@ -114,8 +116,10 @@ void Verifier::Visitor::Check(Node* node) {
int control_count = node->op()->ControlInputCount();
// Verify number of inputs matches up.
int input_count = value_count + context_count + frame_state_count +
effect_count + control_count;
int input_count = value_count + context_count + frame_state_count;
if (check_inputs == kAll) {
input_count += effect_count + control_count;
}
CHECK_EQ(input_count, node->InputCount());
// Verify that frame state has been inserted for the nodes that need it.
......@@ -150,20 +154,23 @@ void Verifier::Visitor::Check(Node* node) {
CHECK(IsUseDefChainLinkPresent(context, node));
}
// Verify all effect inputs actually have an effect.
for (int i = 0; i < effect_count; ++i) {
Node* effect = NodeProperties::GetEffectInput(node);
CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
CHECK(IsDefUseChainLinkPresent(effect, node));
CHECK(IsUseDefChainLinkPresent(effect, node));
}
if (check_inputs == kAll) {
// Verify all effect inputs actually have an effect.
for (int i = 0; i < effect_count; ++i) {
Node* effect = NodeProperties::GetEffectInput(node);
CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
CHECK(IsDefUseChainLinkPresent(effect, node));
CHECK(IsUseDefChainLinkPresent(effect, node));
}
// Verify all control inputs are control nodes.
for (int i = 0; i < control_count; ++i) {
Node* control = NodeProperties::GetControlInput(node, i);
CheckOutput(control, node, control->op()->ControlOutputCount(), "control");
CHECK(IsDefUseChainLinkPresent(control, node));
CHECK(IsUseDefChainLinkPresent(control, node));
// Verify all control inputs are control nodes.
for (int i = 0; i < control_count; ++i) {
Node* control = NodeProperties::GetControlInput(node, i);
CheckOutput(control, node, control->op()->ControlOutputCount(),
"control");
CHECK(IsDefUseChainLinkPresent(control, node));
CHECK(IsUseDefChainLinkPresent(control, node));
}
}
switch (node->opcode()) {
......@@ -1020,12 +1027,11 @@ void Verifier::Visitor::Check(Node* node) {
}
} // NOLINT(readability/fn_size)
void Verifier::Run(Graph* graph, Typing typing) {
void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs) {
CHECK_NOT_NULL(graph->start());
CHECK_NOT_NULL(graph->end());
Zone zone(graph->zone()->allocator());
Visitor visitor(&zone, typing);
Visitor visitor(&zone, typing, check_inputs);
AllNodes all(&zone, graph);
for (Node* node : all.live) visitor.Check(node);
......
......@@ -21,8 +21,10 @@ class Schedule;
class Verifier {
public:
enum Typing { TYPED, UNTYPED };
enum CheckInputs { kValuesOnly, kAll };
static void Run(Graph* graph, Typing typing = TYPED);
static void Run(Graph* graph, Typing typing = TYPED,
CheckInputs check_inputs = kAll);
#ifdef DEBUG
// Verifies consistency of node inputs and uses:
......
......@@ -497,9 +497,9 @@ Node* InterpreterAssembler::Advance(Node* delta) {
return IntPtrAdd(BytecodeOffset(), delta);
}
void InterpreterAssembler::Jump(Node* delta) {
Node* InterpreterAssembler::Jump(Node* delta) {
UpdateInterruptBudget(delta);
DispatchTo(Advance(delta));
return DispatchTo(Advance(delta));
}
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
......@@ -522,11 +522,11 @@ void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
JumpConditional(WordNotEqual(lhs, rhs), delta);
}
void InterpreterAssembler::Dispatch() {
DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
Node* InterpreterAssembler::Dispatch() {
return DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
}
void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Node* InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Node* target_bytecode = Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
if (kPointerSize == 8) {
......@@ -541,17 +541,17 @@ void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Load(MachineType::Pointer(), DispatchTableRawPointer(),
WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
}
void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
Node* bytecode_offset) {
Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
Node* bytecode_offset) {
Node* handler_entry =
IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
}
void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
Node* handler_entry, Node* bytecode_offset) {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
......@@ -560,7 +560,7 @@ void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
InterpreterDispatchDescriptor descriptor(isolate());
Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
TailCallBytecodeDispatch(descriptor, handler_entry, args);
return TailCallBytecodeDispatch(descriptor, handler_entry, args);
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
......@@ -602,7 +602,7 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
}
void InterpreterAssembler::InterpreterReturn() {
compiler::Node* InterpreterAssembler::InterpreterReturn() {
// TODO(rmcilroy): Investigate whether it is worth supporting self
// optimization of primitive functions like FullCodegen.
......@@ -615,7 +615,7 @@ void InterpreterAssembler::InterpreterReturn() {
Node* exit_trampoline_code_object =
HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
DispatchToBytecodeHandler(exit_trampoline_code_object);
return DispatchToBytecodeHandler(exit_trampoline_code_object);
}
void InterpreterAssembler::StackCheck() {
......
......@@ -112,7 +112,7 @@ class InterpreterAssembler : public CodeStubAssembler {
compiler::Node* arg_count, int return_size = 1);
// Jump relative to the current bytecode by |jump_offset|.
void Jump(compiler::Node* jump_offset);
compiler::Node* Jump(compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfWordEqual and
......@@ -133,14 +133,14 @@ class InterpreterAssembler : public CodeStubAssembler {
void StackCheck();
// Returns from the function.
void InterpreterReturn();
compiler::Node* InterpreterReturn();
// Dispatch to the bytecode.
void Dispatch();
compiler::Node* Dispatch();
// Dispatch to bytecode handler.
void DispatchToBytecodeHandler(compiler::Node* handler) {
DispatchToBytecodeHandler(handler, BytecodeOffset());
compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) {
return DispatchToBytecodeHandler(handler, BytecodeOffset());
}
// Dispatch bytecode as wide operand variant.
......@@ -213,15 +213,15 @@ class InterpreterAssembler : public CodeStubAssembler {
compiler::Node* Advance(compiler::Node* delta);
// Starts next instruction dispatch at |new_bytecode_offset|.
void DispatchTo(compiler::Node* new_bytecode_offset);
compiler::Node* DispatchTo(compiler::Node* new_bytecode_offset);
// Dispatch to the bytecode handler with code offset |handler|.
void DispatchToBytecodeHandler(compiler::Node* handler,
compiler::Node* bytecode_offset);
compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
compiler::Node* bytecode_offset);
// Dispatch to the bytecode handler with code entry point |handler_entry|.
void DispatchToBytecodeHandlerEntry(compiler::Node* handler_entry,
compiler::Node* bytecode_offset);
compiler::Node* DispatchToBytecodeHandlerEntry(
compiler::Node* handler_entry, compiler::Node* bytecode_offset);
// Abort operations for debug code.
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
......
......@@ -313,12 +313,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.Dispatch();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
Node* tail_call_node = m.Dispatch();
OperandScale operand_scale = OperandScale::kSingle;
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
......@@ -357,11 +352,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
TRACED_FOREACH(int, jump_offset, jump_offsets) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.Jump(m.IntPtrConstant(jump_offset));
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
Node* tail_call_node = m.Jump(m.IntPtrConstant(jump_offset));
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
......@@ -391,68 +382,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
static const int kJumpIfTrueOffset = 73;
// If debug code is enabled we emit extra code in Jump.
if (FLAG_debug_code) return;
MachineOperatorBuilder machine(zone());
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* lhs = m.IntPtrConstant(0);
Node* rhs = m.IntPtrConstant(1);
m.JumpIfWordEqual(lhs, rhs, m.IntPtrConstant(kJumpIfTrueOffset));
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(2, end->InputCount());
OperandScale operand_scale = OperandScale::kSingle;
int jump_offsets[] = {kJumpIfTrueOffset, interpreter::Bytecodes::Size(
bytecode, operand_scale)};
for (int i = 0; i < static_cast<int>(arraysize(jump_offsets)); i++) {
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsIntPtrConstant(jump_offsets[i]));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
if (kPointerSize == 8) {
target_bytecode_matcher =
IsChangeUint32ToUint64(target_bytecode_matcher);
}
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
IsWordShl(target_bytecode_matcher,
IsIntPtrConstant(kPointerSizeLog2)));
EXPECT_THAT(
end->InputAt(i),
IsTailCall(
_, code_target_matcher,
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
next_bytecode_offset_matcher, _,
IsParameter(
InterpreterDispatchDescriptor::kDispatchTableParameter),
_, _));
}
// TODO(oth): test control flow paths.
}
}
TARGET_TEST_F(InterpreterAssemblerTest, InterpreterReturn) {
// If debug code is enabled we emit extra code in InterpreterReturn.
if (FLAG_debug_code) return;
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.InterpreterReturn();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
Node* tail_call_node = m.InterpreterReturn();
Handle<HeapObject> exit_trampoline =
isolate()->builtins()->InterpreterExitTrampoline();
......@@ -548,12 +484,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
EXPECT_THAT(m.GetAccumulator(), accumulator_value_2);
// Should be passed to next bytecode handler on dispatch.
m.Dispatch();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
Node* tail_call_node = m.Dispatch();
EXPECT_THAT(tail_call_node,
IsTailCall(_, _, accumulator_value_2, _, _, _, _));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment