Commit 33142c12 authored by mstarzinger's avatar mstarzinger Committed by Commit bot

[turbofan] Make RawMachineAssembler handle the end node.

This moves the proper handling for the end node withing the constructed
graph into the RawMachineAssembler. This simplifies all assemblers and
makes the handling of {Start} and {End} symmetrical.

R=bmeurer@chromium.org

Review URL: https://codereview.chromium.org/1493963003

Cr-Commit-Position: refs/heads/master@{#32563}
parent 75f11021
......@@ -32,7 +32,6 @@ CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
isolate, new (zone) Graph(zone),
Linkage::GetStubCallDescriptor(isolate, zone, descriptor, 0,
CallDescriptor::kNoFlags))),
end_nodes_(zone),
kind_(kind),
name_(name),
code_generated_(false) {}
......@@ -44,8 +43,6 @@ CodeStubAssembler::~CodeStubAssembler() {}
Handle<Code> CodeStubAssembler::GenerateCode() {
DCHECK(!code_generated_);
End();
Schedule* schedule = raw_assembler_->Export();
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
isolate(), raw_assembler_->call_descriptor(), graph(), schedule, kind_,
......@@ -164,23 +161,6 @@ Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
}
void CodeStubAssembler::AddEndInput(Node* input) {
DCHECK_NOT_NULL(input);
end_nodes_.push_back(input);
}
void CodeStubAssembler::End() {
if (end_nodes_.size() == 0) {
end_nodes_.push_back(graph()->start());
}
int end_count = static_cast<int>(end_nodes_.size());
Node* end = graph()->NewNode(raw_assembler_->common()->End(end_count),
end_count, &end_nodes_[0]);
graph()->SetEnd(end);
}
// RawMachineAssembler delegate helpers:
Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
......
......@@ -10,7 +10,6 @@
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/runtime/runtime.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
......@@ -71,24 +70,18 @@ class CodeStubAssembler {
private:
friend class CodeStubAssemblerTester;
// Close the graph.
void End();
Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* SmiShiftBitsConstant();
// Adds an end node of the graph.
void AddEndInput(Node* input);
// Private helpers which delegate to RawMachineAssembler.
Graph* graph();
Isolate* isolate();
Zone* zone();
base::SmartPointer<RawMachineAssembler> raw_assembler_;
ZoneVector<Node*> end_nodes_;
Code::Kind kind_;
const char* name_;
bool code_generated_;
......
......@@ -399,7 +399,6 @@ CACHED_OP_LIST(CACHED)
const Operator* CommonOperatorBuilder::End(size_t control_input_count) {
DCHECK_NE(0u, control_input_count); // Disallow empty ends.
switch (control_input_count) {
#define CACHED_END(input_count) \
case input_count: \
......
......@@ -32,7 +32,6 @@ InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
isolate, new (zone) Graph(zone),
Linkage::GetInterpreterDispatchDescriptor(zone), kMachPtr,
InstructionSelector::SupportedMachineOperatorFlags())),
end_nodes_(zone),
accumulator_(
raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
context_(
......@@ -46,7 +45,8 @@ InterpreterAssembler::~InterpreterAssembler() {}
Handle<Code> InterpreterAssembler::GenerateCode() {
DCHECK(!code_generated_);
End();
// Disallow empty handlers that never return.
DCHECK_NE(0, graph()->end()->InputCount());
const char* bytecode_name = interpreter::Bytecodes::ToString(bytecode_);
Schedule* schedule = raw_assembler_->Export();
......@@ -527,10 +527,8 @@ void InterpreterAssembler::Return() {
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
GetContext() };
Node* tail_call = raw_assembler_->TailCallN(
call_descriptor(), exit_trampoline_code_object, args);
// This should always be the end node.
AddEndInput(tail_call);
raw_assembler_->TailCallN(call_descriptor(), exit_trampoline_code_object,
args);
}
......@@ -587,10 +585,7 @@ void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
GetContext() };
Node* tail_call =
raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
// This should always be the end node.
AddEndInput(tail_call);
raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
}
......@@ -612,21 +607,6 @@ void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
}
void InterpreterAssembler::AddEndInput(Node* input) {
DCHECK_NOT_NULL(input);
end_nodes_.push_back(input);
}
void InterpreterAssembler::End() {
DCHECK(!end_nodes_.empty());
int end_count = static_cast<int>(end_nodes_.size());
Node* end = graph()->NewNode(raw_assembler_->common()->End(end_count),
end_count, &end_nodes_[0]);
graph()->SetEnd(end);
}
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
......
......@@ -13,7 +13,6 @@
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
......@@ -148,9 +147,6 @@ class InterpreterAssembler {
void Abort(BailoutReason bailout_reason);
protected:
// Close the graph.
void End();
static bool TargetSupportsUnalignedAccess();
// Protected helpers (for testing) which delegate to RawMachineAssembler.
......@@ -191,16 +187,12 @@ class InterpreterAssembler {
// Abort operations for debug code.
void AbortIfWordNotEqual(Node* lhs, Node* rhs, BailoutReason bailout_reason);
// Adds an end node of the graph.
void AddEndInput(Node* input);
// Private helpers which delegate to RawMachineAssembler.
Isolate* isolate();
Zone* zone();
interpreter::Bytecode bytecode_;
base::SmartPointer<RawMachineAssembler> raw_assembler_;
ZoneVector<Node*> end_nodes_;
Node* accumulator_;
Node* context_;
bool code_generated_;
......
......@@ -5,6 +5,7 @@
#include "src/compiler/raw-machine-assembler.h"
#include "src/code-factory.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/scheduler.h"
......@@ -31,6 +32,7 @@ RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
parameters_[i] =
AddNode(common()->Parameter(static_cast<int>(i)), graph->start());
}
graph->SetEnd(graph->NewNode(common_.End(0)));
}
......@@ -212,6 +214,7 @@ Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
buffer[index++] = graph()->start();
buffer[index++] = graph()->start();
Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
......@@ -234,6 +237,7 @@ Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
graph()->start()};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
......@@ -258,6 +262,7 @@ Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
graph()->start()};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
......
......@@ -145,18 +145,11 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
}
Graph*
InterpreterAssemblerTest::InterpreterAssemblerForTest::GetCompletedGraph() {
End();
return graph();
}
TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.Dispatch();
Graph* graph = m.GetCompletedGraph();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
......@@ -195,7 +188,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.Jump(m.Int32Constant(jump_offset));
Graph* graph = m.GetCompletedGraph();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
......@@ -238,7 +231,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
Node* lhs = m.IntPtrConstant(0);
Node* rhs = m.IntPtrConstant(1);
m.JumpIfWordEqual(lhs, rhs, m.Int32Constant(kJumpIfTrueOffset));
Graph* graph = m.GetCompletedGraph();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(2, end->InputCount());
......@@ -276,7 +269,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Return) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.Return();
Graph* graph = m.GetCompletedGraph();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
......@@ -356,7 +349,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
// Should be passed to next bytecode handler on dispatch.
m.Dispatch();
Graph* graph = m.GetCompletedGraph();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
......
......@@ -29,8 +29,6 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
: InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
~InterpreterAssemblerForTest() override {}
Graph* GetCompletedGraph();
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment