Commit c7c5d50d authored by Seth Brenith's avatar Seth Brenith Committed by Commit Bot

[torque] Add C++ backend for Torque compiler

This change adds a new code generator, which supports a subset of the
instructions supported by the existing CSAGenerator, and instead of
generating CSA it generates runtime C++ code. The new generator is used
to generate a set of Torque macros that return slices to indexed fields.
These new macros should be sufficient to eventually support
Torque-generated field accessors, BodyDescriptors, verifier functions,
and postmortem field inspection in debug_helper.

Bug: v8:7793
Change-Id: Ife2d25cfd55a08238c625a8b04aca3ff2a0f4c63
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2429566Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
Cr-Commit-Position: refs/heads/master@{#70313}
parent b90717df
......@@ -1343,6 +1343,8 @@ template("run_torque") {
"$target_gen_dir/torque-generated/internal-class-definitions-inl.h",
"$target_gen_dir/torque-generated/exported-class-definitions.h",
"$target_gen_dir/torque-generated/exported-class-definitions-inl.h",
"$target_gen_dir/torque-generated/runtime-macros.cc",
"$target_gen_dir/torque-generated/runtime-macros.h",
]
outputs = []
......@@ -1459,6 +1461,7 @@ v8_source_set("torque_generated_definitions") {
"$target_gen_dir/torque-generated/class-verifiers.h",
"$target_gen_dir/torque-generated/factory.cc",
"$target_gen_dir/torque-generated/objects-printer.cc",
"$target_gen_dir/torque-generated/runtime-macros.cc",
]
configs = [ ":internal_config" ]
......@@ -3858,6 +3861,8 @@ v8_source_set("torque_base") {
sources = [
"src/torque/ast.h",
"src/torque/cc-generator.cc",
"src/torque/cc-generator.h",
"src/torque/cfg.cc",
"src/torque/cfg.h",
"src/torque/class-debug-reader-generator.cc",
......@@ -3885,6 +3890,8 @@ v8_source_set("torque_base") {
"src/torque/server-data.h",
"src/torque/source-positions.cc",
"src/torque/source-positions.h",
"src/torque/torque-code-generator.cc",
"src/torque/torque-code-generator.h",
"src/torque/torque-compiler.cc",
"src/torque/torque-compiler.h",
"src/torque/torque-parser.cc",
......
......@@ -75,6 +75,7 @@
#include "torque-generated/class-verifiers.h"
#include "torque-generated/exported-class-definitions-inl.h"
#include "torque-generated/internal-class-definitions-inl.h"
#include "torque-generated/runtime-macros.h"
namespace v8 {
namespace internal {
......@@ -1203,6 +1204,24 @@ void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
CHECK(val.IsTheHole(isolate));
}
}
// Eventually Torque-generated offset computations could replace the ones
// implemented in C++. For now, just make sure they match. This could help
// ensure that the class definitions in C++ and Torque don't diverge.
intptr_t offset;
intptr_t length;
std::tie(std::ignore, offset, length) =
TqRuntimeFieldRefSmallOrderedHashSetDataTable(isolate, *this);
CHECK_EQ(offset, DataTableStartOffset());
CHECK_EQ(length, Capacity());
std::tie(std::ignore, offset, length) =
TqRuntimeFieldRefSmallOrderedHashSetHashTable(isolate, *this);
CHECK_EQ(offset, GetBucketsStartOffset());
CHECK_EQ(length, NumberOfBuckets());
std::tie(std::ignore, offset, length) =
TqRuntimeFieldRefSmallOrderedHashSetChainTable(isolate, *this);
CHECK_EQ(offset, GetChainTableOffset());
CHECK_EQ(length, Capacity());
}
void SmallOrderedNameDictionary::SmallOrderedNameDictionaryVerify(
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/torque/cc-generator.h"
#include "src/common/globals.h"
#include "src/torque/global-context.h"
#include "src/torque/type-oracle.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
namespace v8 {
namespace internal {
namespace torque {
base::Optional<Stack<std::string>> CCGenerator::EmitGraph(
Stack<std::string> parameters) {
for (BottomOffset i = {0}; i < parameters.AboveTop(); ++i) {
SetDefinitionVariable(DefinitionLocation::Parameter(i.offset),
parameters.Peek(i));
}
// C++ doesn't have parameterized labels like CSA, so we must pre-declare all
// phi values so they're in scope for both the blocks that define them and the
// blocks that read them.
for (Block* block : cfg_.blocks()) {
if (block->IsDead()) continue;
DCHECK_EQ(block->InputTypes().Size(), block->InputDefinitions().Size());
for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
DefinitionLocation input_def = block->InputDefinitions().Peek(i);
if (block->InputDefinitions().Peek(i).IsPhiFromBlock(block)) {
out() << " " << block->InputTypes().Peek(i)->GetRuntimeType() << " "
<< DefinitionToVariable(input_def) << ";\n";
}
}
}
// Redirect the output of non-declarations into a buffer and only output
// declarations right away.
std::stringstream out_buffer;
std::ostream* old_out = out_;
out_ = &out_buffer;
EmitInstruction(GotoInstruction{cfg_.start()}, &parameters);
for (Block* block : cfg_.blocks()) {
if (cfg_.end() && *cfg_.end() == block) continue;
if (block->IsDead()) continue;
EmitBlock(block);
}
base::Optional<Stack<std::string>> result;
if (cfg_.end()) {
result = EmitBlock(*cfg_.end());
}
// All declarations have been printed now, so we can append the buffered
// output and redirect back to the original output stream.
out_ = old_out;
out() << out_buffer.str();
return result;
}
Stack<std::string> CCGenerator::EmitBlock(const Block* block) {
out() << "\n";
out() << " " << BlockName(block) << ":\n";
Stack<std::string> stack;
for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
const auto& def = block->InputDefinitions().Peek(i);
stack.Push(DefinitionToVariable(def));
if (def.IsPhiFromBlock(block)) {
decls() << " " << block->InputTypes().Peek(i)->GetRuntimeType() << " "
<< stack.Top() << "{}; USE(" << stack.Top() << ");\n";
}
}
for (const Instruction& instruction : block->instructions()) {
TorqueCodeGenerator::EmitInstruction(instruction, &stack);
}
return stack;
}
void CCGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
const std::string& file = SourceFileMap::AbsolutePath(pos.source);
if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
// Lines in Torque SourcePositions are zero-based, while the
// CodeStubAssembler and downwind systems are one-based.
out() << " // " << file << ":" << (pos.start.line + 1) << "\n";
previous_position_ = pos;
}
}
void CCGenerator::EmitInstruction(
const PushUninitializedInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: PushUninitialized");
}
void CCGenerator::EmitInstruction(
const PushBuiltinPointerInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: PushBuiltinPointer");
}
void CCGenerator::EmitInstruction(
const NamespaceConstantInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: NamespaceConstantInstruction");
}
std::vector<std::string> CCGenerator::ProcessArgumentsCommon(
const TypeVector& parameter_types,
std::vector<std::string> constexpr_arguments, Stack<std::string>* stack) {
std::vector<std::string> args;
for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
const Type* type = *it;
VisitResult arg;
if (type->IsConstexpr()) {
args.push_back(std::move(constexpr_arguments.back()));
constexpr_arguments.pop_back();
} else {
std::stringstream s;
size_t slot_count = LoweredSlotCount(type);
VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
EmitCCValue(arg, *stack, s);
args.push_back(s.str());
stack->PopMany(slot_count);
}
}
std::reverse(args.begin(), args.end());
return args;
}
void CCGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
Stack<std::string>* stack) {
TypeVector parameter_types =
instruction.intrinsic->signature().parameter_types.types;
std::vector<std::string> args = ProcessArgumentsCommon(
parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.intrinsic->signature().return_type;
std::vector<std::string> results;
const auto lowered = LowerType(return_type);
for (std::size_t i = 0; i < lowered.size(); ++i) {
results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
stack->Push(results.back());
decls() << " " << lowered[i]->GetRuntimeType() << " " << stack->Top()
<< "{}; USE(" << stack->Top() << ");\n";
}
out() << " ";
if (return_type->StructSupertype()) {
out() << "std::tie(";
PrintCommaSeparatedList(out(), results);
out() << ") = ";
} else {
if (results.size() == 1) {
out() << results[0] << " = ";
}
}
if (instruction.intrinsic->ExternalName() == "%RawDownCast") {
if (parameter_types.size() != 1) {
ReportError("%RawDownCast must take a single parameter");
}
const Type* original_type = parameter_types[0];
bool is_subtype =
return_type->IsSubtypeOf(original_type) ||
(original_type == TypeOracle::GetUninitializedHeapObjectType() &&
return_type->IsSubtypeOf(TypeOracle::GetHeapObjectType()));
if (!is_subtype) {
ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ",
*original_type);
}
if (!original_type->StructSupertype() &&
return_type->GetRuntimeType() != original_type->GetRuntimeType()) {
out() << "static_cast<" << return_type->GetRuntimeType() << ">";
}
} else if (instruction.intrinsic->ExternalName() == "%GetClassMapConstant") {
ReportError("C++ generator doesn't yet support %GetClassMapConstant");
} else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
if (parameter_types.size() != 1 || !parameter_types[0]->IsConstexpr()) {
ReportError(
"%FromConstexpr must take a single parameter with constexpr "
"type");
}
if (return_type->IsConstexpr()) {
ReportError("%FromConstexpr must return a non-constexpr type");
}
// Nothing to do here; constexpr expressions are already valid C++.
} else {
ReportError("no built in intrinsic with name " +
instruction.intrinsic->ExternalName());
}
out() << "(";
PrintCommaSeparatedList(out(), args);
out() << ");\n";
}
void CCGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
Stack<std::string>* stack) {
TypeVector parameter_types =
instruction.macro->signature().parameter_types.types;
std::vector<std::string> args = ProcessArgumentsCommon(
parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.macro->signature().return_type;
std::vector<std::string> results;
const auto lowered = LowerType(return_type);
for (std::size_t i = 0; i < lowered.size(); ++i) {
results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
stack->Push(results.back());
decls() << " " << lowered[i]->GetRuntimeType() << " " << stack->Top()
<< "{}; USE(" << stack->Top() << ");\n";
}
// We should have inlined any calls requiring complex control flow.
CHECK(!instruction.catch_block);
out() << " ";
if (return_type->StructSupertype().has_value()) {
out() << "std::tie(";
PrintCommaSeparatedList(out(), results);
out() << ") = ";
} else {
if (results.size() == 1) {
out() << results[0] << " = ";
} else {
DCHECK_EQ(0, results.size());
}
}
if (ExternMacro* extern_macro = ExternMacro::DynamicCast(instruction.macro)) {
out() << "TorqueRuntimeMacroShims::"
<< extern_macro->external_assembler_name() << "::";
} else {
out() << "TqRuntime";
}
out() << instruction.macro->CCName() << "(isolate";
if (!args.empty()) out() << ", ";
PrintCommaSeparatedList(out(), args);
out() << ");\n";
}
void CCGenerator::EmitInstruction(
const CallCsaMacroAndBranchInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: CallCsaMacroAndBranch");
}
void CCGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: CallBuiltin");
}
void CCGenerator::EmitInstruction(
const CallBuiltinPointerInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: CallBuiltinPointer");
}
void CCGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: CallRuntime");
}
void CCGenerator::EmitInstruction(const BranchInstruction& instruction,
Stack<std::string>* stack) {
out() << " if (" << stack->Pop() << ") {\n";
EmitGoto(instruction.if_true, stack, " ");
out() << " } else {\n";
EmitGoto(instruction.if_false, stack, " ");
out() << " }\n";
}
void CCGenerator::EmitInstruction(const ConstexprBranchInstruction& instruction,
Stack<std::string>* stack) {
out() << " if ((" << instruction.condition << ")) {\n";
EmitGoto(instruction.if_true, stack, " ");
out() << " } else {\n";
EmitGoto(instruction.if_false, stack, " ");
out() << " }\n";
}
void CCGenerator::EmitGoto(const Block* destination, Stack<std::string>* stack,
std::string indentation) {
const auto& destination_definitions = destination->InputDefinitions();
DCHECK_EQ(stack->Size(), destination_definitions.Size());
for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
DefinitionLocation def = destination_definitions.Peek(i);
if (def.IsPhiFromBlock(destination)) {
out() << indentation << DefinitionToVariable(def) << " = "
<< stack->Peek(i) << ";\n";
}
}
out() << indentation << "goto " << BlockName(destination) << ";\n";
}
void CCGenerator::EmitInstruction(const GotoInstruction& instruction,
Stack<std::string>* stack) {
EmitGoto(instruction.destination, stack, " ");
}
void CCGenerator::EmitInstruction(const GotoExternalInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: GotoExternal");
}
void CCGenerator::EmitInstruction(const ReturnInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: Return");
}
void CCGenerator::EmitInstruction(
const PrintConstantStringInstruction& instruction,
Stack<std::string>* stack) {
out() << " std::cout << " << StringLiteralQuote(instruction.message)
<< ";\n";
}
void CCGenerator::EmitInstruction(const AbortInstruction& instruction,
Stack<std::string>* stack) {
switch (instruction.kind) {
case AbortInstruction::Kind::kUnreachable:
DCHECK(instruction.message.empty());
out() << " UNREACHABLE();\n";
break;
case AbortInstruction::Kind::kDebugBreak:
DCHECK(instruction.message.empty());
out() << " base::OS::DebugBreak();\n";
break;
case AbortInstruction::Kind::kAssertionFailure: {
std::string file = StringLiteralQuote(
SourceFileMap::PathFromV8Root(instruction.pos.source));
out() << " CHECK(false, \"Failed Torque assertion: '\""
<< StringLiteralQuote(instruction.message) << "\"' at \"" << file
<< "\":\""
<< StringLiteralQuote(
std::to_string(instruction.pos.start.line + 1))
<< ");\n";
break;
}
}
}
void CCGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
Stack<std::string>* stack) {
const std::string str = "static_cast<" +
instruction.destination_type->GetRuntimeType() +
">(" + stack->Top() + ")";
stack->Poke(stack->AboveTop() - 1, str);
SetDefinitionVariable(instruction.GetValueDefinition(), str);
}
void CCGenerator::EmitInstruction(const LoadReferenceInstruction& instruction,
Stack<std::string>* stack) {
std::string result_name =
DefinitionToVariable(instruction.GetValueDefinition());
std::string offset = stack->Pop();
std::string object = stack->Pop();
stack->Push(result_name);
std::string result_type = instruction.type->GetRuntimeType();
decls() << " " << result_type << " " << result_name << "{}; USE("
<< result_name << ");\n";
out() << " " << result_name << " = ";
if (instruction.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
out() << "TaggedField<" << result_type << ">::load(isolate, " << object
<< ", static_cast<int>(" << offset << "));\n";
} else {
out() << "(" << object << ").ReadField<" << result_type << ">(" << offset
<< ");\n";
}
}
void CCGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: StoreReference");
}
namespace {
std::string GetBitFieldSpecialization(const Type* container,
const BitField& field) {
std::stringstream stream;
stream << "base::BitField<"
<< field.name_and_type.type->GetConstexprGeneratedTypeName() << ", "
<< field.offset << ", " << field.num_bits << ", "
<< container->GetConstexprGeneratedTypeName() << ">";
return stream.str();
}
} // namespace
void CCGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
Stack<std::string>* stack) {
std::string result_name =
DefinitionToVariable(instruction.GetValueDefinition());
std::string bit_field_struct = stack->Pop();
stack->Push(result_name);
const Type* struct_type = instruction.bit_field_struct_type;
decls() << " " << instruction.bit_field.name_and_type.type->GetRuntimeType()
<< " " << result_name << "{}; USE(" << result_name << ");\n";
base::Optional<const Type*> smi_tagged_type =
Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric());
if (smi_tagged_type) {
// Get the untagged value and its type.
bit_field_struct = bit_field_struct + ".value()";
struct_type = *smi_tagged_type;
}
out() << " " << result_name << " = "
<< GetBitFieldSpecialization(struct_type, instruction.bit_field)
<< "::decode(" << bit_field_struct << ");\n";
}
void CCGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
Stack<std::string>* stack) {
ReportError("Not supported in C++ output: StoreBitField");
}
// static
void CCGenerator::EmitCCValue(VisitResult result,
const Stack<std::string>& values,
std::ostream& out) {
if (!result.IsOnStack()) {
out << result.constexpr_value();
} else if (auto struct_type = result.type()->StructSupertype()) {
out << "std::tuple_cat(";
bool first = true;
for (auto& field : (*struct_type)->fields()) {
if (!first) {
out << ", ";
}
first = false;
if (!field.name_and_type.type->IsStructType()) {
out << "std::make_tuple(";
}
EmitCCValue(ProjectStructField(result, field.name_and_type.name), values,
out);
if (!field.name_and_type.type->IsStructType()) {
out << ")";
}
}
out << ")";
} else {
DCHECK_EQ(1, result.stack_range().Size());
out << values.Peek(result.stack_range().begin());
}
}
} // namespace torque
} // namespace internal
} // namespace v8
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_TORQUE_CC_GENERATOR_H_
#define V8_TORQUE_CC_GENERATOR_H_
#include "src/torque/torque-code-generator.h"
namespace v8 {
namespace internal {
namespace torque {
class CCGenerator : public TorqueCodeGenerator {
public:
CCGenerator(const ControlFlowGraph& cfg, std::ostream& out)
: TorqueCodeGenerator(cfg, out) {}
base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
static void EmitCCValue(VisitResult result, const Stack<std::string>& values,
std::ostream& out);
private:
void EmitSourcePosition(SourcePosition pos,
bool always_emit = false) override;
void EmitGoto(const Block* destination, Stack<std::string>* stack,
std::string indentation);
std::vector<std::string> ProcessArgumentsCommon(
const TypeVector& parameter_types,
std::vector<std::string> constexpr_arguments, Stack<std::string>* stack);
Stack<std::string> EmitBlock(const Block* block);
#define EMIT_INSTRUCTION_DECLARATION(T) \
void EmitInstruction(const T& instruction, Stack<std::string>* stack) \
override;
TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
#undef EMIT_INSTRUCTION_DECLARATION
};
} // namespace torque
} // namespace internal
} // namespace v8
#endif // V8_TORQUE_CC_GENERATOR_H_
......@@ -83,7 +83,7 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
out() << " ca_.Bind(&" << BlockName(block) << phi_names.str() << ");\n";
for (const Instruction& instruction : block->instructions()) {
EmitInstruction(instruction, &stack);
TorqueCodeGenerator::EmitInstruction(instruction, &stack);
}
return stack;
}
......@@ -99,53 +99,6 @@ void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
}
}
bool CSAGenerator::IsEmptyInstruction(const Instruction& instruction) {
switch (instruction.kind()) {
case InstructionKind::kPeekInstruction:
case InstructionKind::kPokeInstruction:
case InstructionKind::kDeleteRangeInstruction:
case InstructionKind::kPushUninitializedInstruction:
case InstructionKind::kPushBuiltinPointerInstruction:
case InstructionKind::kUnsafeCastInstruction:
return true;
default:
return false;
}
}
void CSAGenerator::EmitInstruction(const Instruction& instruction,
Stack<std::string>* stack) {
#ifdef DEBUG
if (!IsEmptyInstruction(instruction)) {
EmitSourcePosition(instruction->pos);
}
#endif
switch (instruction.kind()) {
#define ENUM_ITEM(T) \
case InstructionKind::k##T: \
return EmitInstruction(instruction.Cast<T>(), stack);
TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
#undef ENUM_ITEM
}
}
void CSAGenerator::EmitInstruction(const PeekInstruction& instruction,
Stack<std::string>* stack) {
stack->Push(stack->Peek(instruction.slot));
}
void CSAGenerator::EmitInstruction(const PokeInstruction& instruction,
Stack<std::string>* stack) {
stack->Poke(instruction.slot, stack->Top());
stack->Pop();
}
void CSAGenerator::EmitInstruction(const DeleteRangeInstruction& instruction,
Stack<std::string>* stack) {
stack->DeleteRange(instruction.range);
}
void CSAGenerator::EmitInstruction(
const PushUninitializedInstruction& instruction,
Stack<std::string>* stack) {
......@@ -198,35 +151,35 @@ void CSAGenerator::EmitInstruction(
}
}
void CSAGenerator::ProcessArgumentsCommon(
const TypeVector& parameter_types, std::vector<std::string>* args,
std::vector<std::string>* constexpr_arguments, Stack<std::string>* stack) {
std::vector<std::string> CSAGenerator::ProcessArgumentsCommon(
const TypeVector& parameter_types,
std::vector<std::string> constexpr_arguments, Stack<std::string>* stack) {
std::vector<std::string> args;
for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
const Type* type = *it;
VisitResult arg;
if (type->IsConstexpr()) {
args->push_back(std::move(constexpr_arguments->back()));
constexpr_arguments->pop_back();
args.push_back(std::move(constexpr_arguments.back()));
constexpr_arguments.pop_back();
} else {
std::stringstream s;
size_t slot_count = LoweredSlotCount(type);
VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
EmitCSAValue(arg, *stack, s);
args->push_back(s.str());
args.push_back(s.str());
stack->PopMany(slot_count);
}
}
std::reverse(args->begin(), args->end());
std::reverse(args.begin(), args.end());
return args;
}
void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
Stack<std::string>* stack) {
std::vector<std::string> constexpr_arguments =
instruction.constexpr_arguments;
std::vector<std::string> args;
TypeVector parameter_types =
instruction.intrinsic->signature().parameter_types.types;
ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
std::vector<std::string> args = ProcessArgumentsCommon(
parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.intrinsic->signature().return_type;
......@@ -355,12 +308,10 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
Stack<std::string>* stack) {
std::vector<std::string> constexpr_arguments =
instruction.constexpr_arguments;
std::vector<std::string> args;
TypeVector parameter_types =
instruction.macro->signature().parameter_types.types;
ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
std::vector<std::string> args = ProcessArgumentsCommon(
parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.macro->signature().return_type;
......@@ -409,12 +360,10 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
void CSAGenerator::EmitInstruction(
const CallCsaMacroAndBranchInstruction& instruction,
Stack<std::string>* stack) {
std::vector<std::string> constexpr_arguments =
instruction.constexpr_arguments;
std::vector<std::string> args;
TypeVector parameter_types =
instruction.macro->signature().parameter_types.types;
ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
std::vector<std::string> args = ProcessArgumentsCommon(
parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
std::vector<std::string> results;
......
......@@ -5,24 +5,17 @@
#ifndef V8_TORQUE_CSA_GENERATOR_H_
#define V8_TORQUE_CSA_GENERATOR_H_
#include <iostream>
#include "src/torque/cfg.h"
#include "src/torque/declarable.h"
#include "src/torque/torque-code-generator.h"
namespace v8 {
namespace internal {
namespace torque {
class CSAGenerator {
class CSAGenerator : public TorqueCodeGenerator {
public:
CSAGenerator(const ControlFlowGraph& cfg, std::ostream& out,
base::Optional<Builtin::Kind> linkage = base::nullopt)
: cfg_(cfg),
out_(&out),
out_decls_(&out),
linkage_(linkage),
previous_position_(SourcePosition::Invalid()) {}
: TorqueCodeGenerator(cfg, out), linkage_(linkage) {}
base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
static constexpr const char* ARGUMENTS_VARIABLE_STRING = "arguments";
......@@ -31,46 +24,10 @@ class CSAGenerator {
std::ostream& out);
private:
const ControlFlowGraph& cfg_;
std::ostream* out_;
std::ostream* out_decls_;
size_t fresh_id_ = 0;
base::Optional<Builtin::Kind> linkage_;
SourcePosition previous_position_;
std::map<DefinitionLocation, std::string> location_map_;
std::string DefinitionToVariable(const DefinitionLocation& location) {
if (location.IsPhi()) {
std::stringstream stream;
stream << "phi_bb" << location.GetPhiBlock()->id() << "_"
<< location.GetPhiIndex();
return stream.str();
} else if (location.IsParameter()) {
auto it = location_map_.find(location);
DCHECK_NE(it, location_map_.end());
return it->second;
} else {
DCHECK(location.IsInstruction());
auto it = location_map_.find(location);
if (it == location_map_.end()) {
it = location_map_.insert(std::make_pair(location, FreshNodeName()))
.first;
}
return it->second;
}
}
void SetDefinitionVariable(const DefinitionLocation& definition,
const std::string& str) {
DCHECK_EQ(location_map_.find(definition), location_map_.end());
location_map_.insert(std::make_pair(definition, str));
}
std::ostream& out() { return *out_; }
std::ostream& decls() { return *out_decls_; }
bool IsEmptyInstruction(const Instruction& instruction);
void EmitSourcePosition(SourcePosition pos, bool always_emit = false);
void EmitSourcePosition(SourcePosition pos,
bool always_emit = false) override;
std::string PreCallableExceptionPreparation(
base::Optional<Block*> catch_block);
......@@ -79,24 +36,15 @@ class CSAGenerator {
base::Optional<Block*> catch_block, Stack<std::string>* stack,
const base::Optional<DefinitionLocation>& exception_object_definition);
std::string FreshNodeName() { return "tmp" + std::to_string(fresh_id_++); }
std::string FreshCatchName() { return "catch" + std::to_string(fresh_id_++); }
std::string FreshLabelName() { return "label" + std::to_string(fresh_id_++); }
std::string BlockName(const Block* block) {
return "block" + std::to_string(block->id());
}
void ProcessArgumentsCommon(const TypeVector& parameter_types,
std::vector<std::string>* args,
std::vector<std::string>* constexpr_arguments,
Stack<std::string>* stack);
std::vector<std::string> ProcessArgumentsCommon(
const TypeVector& parameter_types,
std::vector<std::string> constexpr_arguments, Stack<std::string>* stack);
Stack<std::string> EmitBlock(const Block* block);
void EmitInstruction(const Instruction& instruction,
Stack<std::string>* stack);
#define EMIT_INSTRUCTION_DECLARATION(T) \
void EmitInstruction(const T& instruction, Stack<std::string>* stack);
TORQUE_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
#define EMIT_INSTRUCTION_DECLARATION(T) \
void EmitInstruction(const T& instruction, Stack<std::string>* stack) \
override;
TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
#undef EMIT_INSTRUCTION_DECLARATION
};
......
......@@ -291,6 +291,11 @@ class ExternConstant : public Value {
}
};
enum class OutputType {
kCSA,
kCC,
};
class Callable : public Scope {
public:
DECLARE_DECLARABLE_BOILERPLATE(Callable, callable)
......@@ -308,8 +313,17 @@ class Callable : public Scope {
bool HasReturns() const { return returns_; }
base::Optional<Statement*> body() const { return body_; }
bool IsExternal() const { return !body_.has_value(); }
virtual bool ShouldBeInlined() const { return false; }
virtual bool ShouldGenerateExternalCode() const { return !ShouldBeInlined(); }
virtual bool ShouldBeInlined(OutputType output_type) const {
// C++ output doesn't support exiting to labels, so functions with labels in
// the signature must be inlined.
return output_type == OutputType::kCC && !signature().labels.empty();
}
bool ShouldGenerateExternalCode(OutputType output_type) const {
return !ShouldBeInlined(output_type);
}
// Name to use in runtime C++ code.
virtual const std::string& CCName() const { return ExternalName(); }
protected:
Callable(Declarable::Kind kind, std::string external_name,
......@@ -336,7 +350,7 @@ class Callable : public Scope {
class Macro : public Callable {
public:
DECLARE_DECLARABLE_BOILERPLATE(Macro, macro)
bool ShouldBeInlined() const override {
bool ShouldBeInlined(OutputType output_type) const override {
for (const LabelDeclaration& label : signature().labels) {
for (const Type* type : label.types) {
if (type->StructSupertype()) return true;
......@@ -345,7 +359,7 @@ class Macro : public Callable {
// Intrinsics that are used internally in Torque and implemented as torque
// code should be inlined and not generate C++ definitions.
if (ReadableName()[0] == '%') return true;
return Callable::ShouldBeInlined();
return Callable::ShouldBeInlined(output_type);
}
void SetUsed() { used_ = true; }
......@@ -390,6 +404,11 @@ class TorqueMacro : public Macro {
public:
DECLARE_DECLARABLE_BOILERPLATE(TorqueMacro, TorqueMacro)
bool IsExportedToCSA() const { return exported_to_csa_; }
const std::string& CCName() const override {
// Exported functions must have unique and C++-friendly readable names, so
// prefer those wherever possible.
return IsExportedToCSA() ? ReadableName() : ExternalName();
}
protected:
TorqueMacro(Declarable::Kind kind, std::string external_name,
......@@ -417,8 +436,8 @@ class TorqueMacro : public Macro {
class Method : public TorqueMacro {
public:
DECLARE_DECLARABLE_BOILERPLATE(Method, Method)
bool ShouldBeInlined() const override {
return Macro::ShouldBeInlined() ||
bool ShouldBeInlined(OutputType output_type) const override {
return Macro::ShouldBeInlined(output_type) ||
signature()
.parameter_types.types[signature().implicit_count]
->IsStructType();
......
......@@ -214,6 +214,7 @@ Macro* Declarations::DeclareMacro(
macro = CreateTorqueMacro(name, name, accessible_from_csa, signature, body,
is_user_defined);
}
Declare(name, macro);
if (op) {
if (TryLookupMacro(*op, signature.GetExplicitTypes())) {
......
......@@ -74,6 +74,15 @@ class GlobalContext : public ContextualClass<GlobalContext> {
static bool IsInstanceTypesInitialized() {
return Get().instance_types_initialized_;
}
static void EnsureInCCOutputList(TorqueMacro* macro) {
GlobalContext& c = Get();
if (c.macros_for_cc_output_set_.insert(macro).second) {
c.macros_for_cc_output_.push_back(macro);
}
}
static const std::vector<TorqueMacro*>& AllMacrosForCCOutput() {
return Get().macros_for_cc_output_;
}
private:
bool collect_language_server_data_;
......@@ -84,6 +93,8 @@ class GlobalContext : public ContextualClass<GlobalContext> {
std::set<std::string> cpp_includes_;
std::map<SourceId, PerFileStreams> generated_per_file_;
std::map<std::string, size_t> fresh_ids_;
std::vector<TorqueMacro*> macros_for_cc_output_;
std::unordered_set<TorqueMacro*> macros_for_cc_output_set_;
bool instance_types_initialized_ = false;
friend class LanguageServerData;
......
......@@ -10,6 +10,7 @@
#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/torque/cc-generator.h"
#include "src/torque/constants.h"
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
......@@ -110,6 +111,46 @@ void ImplementationVisitor::EndCSAFiles() {
}
}
void ImplementationVisitor::BeginRuntimeMacrosFile() {
std::ostream& source = runtime_macros_cc_;
std::ostream& header = runtime_macros_h_;
source << "#include \"torque-generated/runtime-macros.h\"\n\n";
source << "#include \"src/torque/runtime-macro-shims.h\"\n";
for (const std::string& include_path : GlobalContext::CppIncludes()) {
source << "#include " << StringLiteralQuote(include_path) << "\n";
}
source << "\n";
source << "namespace v8 {\n"
<< "namespace internal {\n"
<< "\n";
const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_RUNTIME_MACROS_H_";
header << "#ifndef " << kHeaderDefine << "\n";
header << "#define " << kHeaderDefine << "\n\n";
header << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
header << "\n";
header << "namespace v8 {\n"
<< "namespace internal {\n"
<< "\n";
}
void ImplementationVisitor::EndRuntimeMacrosFile() {
std::ostream& source = runtime_macros_cc_;
std::ostream& header = runtime_macros_h_;
source << "} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
header << "\n} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
header << "#endif // V8_GEN_TORQUE_GENERATED_RUNTIME_MACROS_H_\n";
}
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(),
{}, false};
......@@ -273,15 +314,23 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
bool can_return = return_type != TypeOracle::GetNeverType();
bool has_return_value =
can_return && return_type != TypeOracle::GetVoidType();
const char* prefix = output_type_ == OutputType::kCC ? "TqRuntime" : "";
GenerateMacroFunctionDeclaration(header_out(), "", macro);
GenerateMacroFunctionDeclaration(header_out(), prefix, macro);
header_out() << ";\n";
GenerateMacroFunctionDeclaration(source_out(), "", macro);
GenerateMacroFunctionDeclaration(source_out(), prefix, macro);
source_out() << " {\n";
source_out() << " compiler::CodeAssembler ca_(state_);\n";
source_out()
<< " compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
if (output_type_ == OutputType::kCC) {
// For now, generated C++ is only for field offset computations. If we ever
// generate C++ code that can allocate, then it should be handlified.
source_out() << " DisallowHeapAllocation no_gc;\n";
} else {
source_out() << " compiler::CodeAssembler ca_(state_);\n";
source_out()
<< " compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
}
Stack<std::string> lowered_parameters;
Stack<const Type*> lowered_parameter_types;
......@@ -363,15 +412,24 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
assembler().Bind(end);
}
CSAGenerator csa_generator{assembler().Result(), source_out()};
base::Optional<Stack<std::string>> values =
csa_generator.EmitGraph(lowered_parameters);
base::Optional<Stack<std::string>> values;
if (output_type_ == OutputType::kCC) {
CCGenerator cc_generator{assembler().Result(), source_out()};
values = cc_generator.EmitGraph(lowered_parameters);
} else {
CSAGenerator csa_generator{assembler().Result(), source_out()};
values = csa_generator.EmitGraph(lowered_parameters);
}
assembler_ = base::nullopt;
if (has_return_value) {
source_out() << " return ";
CSAGenerator::EmitCSAValue(return_value, *values, source_out());
if (output_type_ == OutputType::kCC) {
CCGenerator::EmitCCValue(return_value, *values, source_out());
} else {
CSAGenerator::EmitCSAValue(return_value, *values, source_out());
}
source_out() << ";\n";
}
source_out() << "}\n\n";
......@@ -1638,12 +1696,17 @@ void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
std::string header_file_name = dir + "/" + path_from_root + "-tq-csa.h";
WriteFile(header_file_name, new_header);
}
WriteFile(dir + "/runtime-macros.h", runtime_macros_h_.str());
WriteFile(dir + "/runtime-macros.cc", runtime_macros_cc_.str());
}
void ImplementationVisitor::GenerateMacroFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, Macro* macro) {
GenerateFunctionDeclaration(o, macro_prefix, macro->ExternalName(),
macro->signature(), macro->parameter_names());
GenerateFunctionDeclaration(
o, macro_prefix,
output_type_ == OutputType::kCC ? macro->CCName() : macro->ExternalName(),
macro->signature(), macro->parameter_names());
}
std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
......@@ -1654,12 +1717,17 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
if (signature.return_type->IsVoidOrNever()) {
o << "void";
} else {
o << signature.return_type->GetGeneratedTypeName();
o << (output_type_ == OutputType::kCC
? signature.return_type->GetRuntimeType()
: signature.return_type->GetGeneratedTypeName());
}
o << " " << macro_prefix << name << "(";
bool first = true;
if (pass_code_assembler_state) {
if (output_type_ == OutputType::kCC) {
first = false;
o << "Isolate* isolate";
} else if (pass_code_assembler_state) {
first = false;
o << "compiler::CodeAssemblerState* state_";
}
......@@ -1670,7 +1738,9 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
first = false;
const Type* parameter_type = signature.types()[i];
const std::string& generated_type_name =
parameter_type->GetGeneratedTypeName();
output_type_ == OutputType::kCC
? parameter_type->GetRuntimeType()
: parameter_type->GetGeneratedTypeName();
generated_parameter_names.push_back(ExternalParameterName(
i < parameter_names.size() ? parameter_names[i]->value
......@@ -1679,6 +1749,9 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
}
for (const LabelDeclaration& label_info : signature.labels) {
if (output_type_ == OutputType::kCC) {
ReportError("Macros that generate runtime code can't have label exits");
}
if (!first) o << ", ";
first = false;
generated_parameter_names.push_back(
......@@ -2487,7 +2560,7 @@ VisitResult ImplementationVisitor::GenerateCall(
}
}
bool inline_macro = callable->ShouldBeInlined();
bool inline_macro = callable->ShouldBeInlined(output_type_);
std::vector<VisitResult> implicit_arguments;
for (size_t i = 0; i < callable->signature().implicit_count; ++i) {
std::string implicit_name = callable->signature().parameter_names[i]->value;
......@@ -2594,7 +2667,18 @@ VisitResult ImplementationVisitor::GenerateCall(
if (is_tailcall) {
ReportError("can't tail call a macro");
}
macro->SetUsed();
// If we're currently generating a C++ macro and it's calling another macro,
// then we need to make sure that we also generate C++ code for the called
// macro.
if (output_type_ == OutputType::kCC && !inline_macro) {
if (auto* torque_macro = TorqueMacro::DynamicCast(macro)) {
GlobalContext::EnsureInCCOutputList(torque_macro);
}
}
if (return_type->IsConstexpr()) {
DCHECK_EQ(0, arguments.labels.size());
std::stringstream result;
......@@ -3065,6 +3149,7 @@ void ImplementationVisitor::VisitAllDeclarables() {
CurrentCallable::Scope current_callable(nullptr);
const std::vector<std::unique_ptr<Declarable>>& all_declarables =
GlobalContext::AllDeclarables();
// This has to be an index-based loop because all_declarables can be extended
// during the loop.
for (size_t i = 0; i < all_declarables.size(); ++i) {
......@@ -3074,6 +3159,19 @@ void ImplementationVisitor::VisitAllDeclarables() {
// Recover from compile errors here. The error is recorded already.
}
}
// Do the same for macros which generate C++ code.
output_type_ = OutputType::kCC;
const std::vector<TorqueMacro*>& cc_macros =
GlobalContext::AllMacrosForCCOutput();
for (size_t i = 0; i < cc_macros.size(); ++i) {
try {
Visit(static_cast<Declarable*>(cc_macros[i]));
} catch (TorqueAbortCompilation&) {
// Recover from compile errors here. The error is recorded already.
}
}
output_type_ = OutputType::kCSA;
}
void ImplementationVisitor::Visit(Declarable* declarable) {
......@@ -3082,7 +3180,7 @@ void ImplementationVisitor::Visit(Declarable* declarable) {
CurrentFileStreams::Scope current_file_streams(
&GlobalContext::GeneratedPerFile(declarable->Position().source));
if (Callable* callable = Callable::DynamicCast(declarable)) {
if (!callable->ShouldGenerateExternalCode())
if (!callable->ShouldGenerateExternalCode(output_type_))
CurrentFileStreams::Get() = nullptr;
}
switch (declarable->kind()) {
......
......@@ -554,6 +554,8 @@ class ImplementationVisitor {
void BeginCSAFiles();
void EndCSAFiles();
void BeginRuntimeMacrosFile();
void EndRuntimeMacrosFile();
void GenerateImplementation(const std::string& dir);
......@@ -762,13 +764,15 @@ class ImplementationVisitor {
std::ostream& source_out() {
if (auto* streams = CurrentFileStreams::Get()) {
return streams->csa_ccfile;
return output_type_ == OutputType::kCSA ? streams->csa_ccfile
: runtime_macros_cc_;
}
return null_stream_;
}
std::ostream& header_out() {
if (auto* streams = CurrentFileStreams::Get()) {
return streams->csa_headerfile;
return output_type_ == OutputType::kCSA ? streams->csa_headerfile
: runtime_macros_h_;
}
return null_stream_;
}
......@@ -818,6 +822,16 @@ class ImplementationVisitor {
// the value to load.
std::unordered_map<const Expression*, const Identifier*>
bitfield_expressions_;
// The contents of the runtime macros output files. These contain all Torque
// macros that have been generated using the C++ backend. They're not yet
// split per source file like CSA macros, but eventually we should change them
// to generate -inl.inc files so that callers can easily inline their
// contents.
std::stringstream runtime_macros_cc_;
std::stringstream runtime_macros_h_;
OutputType output_type_ = OutputType::kCSA;
};
void ReportAllUnusedMacros();
......
......@@ -24,32 +24,40 @@ class Macro;
class NamespaceConstant;
class RuntimeFunction;
#define TORQUE_INSTRUCTION_LIST(V) \
V(PeekInstruction) \
V(PokeInstruction) \
V(DeleteRangeInstruction) \
V(PushUninitializedInstruction) \
V(PushBuiltinPointerInstruction) \
V(LoadReferenceInstruction) \
V(StoreReferenceInstruction) \
V(LoadBitFieldInstruction) \
V(StoreBitFieldInstruction) \
V(CallCsaMacroInstruction) \
V(CallIntrinsicInstruction) \
V(NamespaceConstantInstruction) \
V(CallCsaMacroAndBranchInstruction) \
V(CallBuiltinInstruction) \
V(CallRuntimeInstruction) \
V(CallBuiltinPointerInstruction) \
V(BranchInstruction) \
V(ConstexprBranchInstruction) \
V(GotoInstruction) \
V(GotoExternalInstruction) \
V(ReturnInstruction) \
V(PrintConstantStringInstruction) \
V(AbortInstruction) \
// Instructions where all backends generate code the same way.
#define TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(V) \
V(PeekInstruction) \
V(PokeInstruction) \
V(DeleteRangeInstruction)
// Instructions where different backends may generate different code.
#define TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(V) \
V(PushUninitializedInstruction) \
V(PushBuiltinPointerInstruction) \
V(LoadReferenceInstruction) \
V(StoreReferenceInstruction) \
V(LoadBitFieldInstruction) \
V(StoreBitFieldInstruction) \
V(CallCsaMacroInstruction) \
V(CallIntrinsicInstruction) \
V(NamespaceConstantInstruction) \
V(CallCsaMacroAndBranchInstruction) \
V(CallBuiltinInstruction) \
V(CallRuntimeInstruction) \
V(CallBuiltinPointerInstruction) \
V(BranchInstruction) \
V(ConstexprBranchInstruction) \
V(GotoInstruction) \
V(GotoExternalInstruction) \
V(ReturnInstruction) \
V(PrintConstantStringInstruction) \
V(AbortInstruction) \
V(UnsafeCastInstruction)
#define TORQUE_INSTRUCTION_LIST(V) \
TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(V) \
TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(V)
#define TORQUE_INSTRUCTION_BOILERPLATE() \
static const InstructionKind kKind; \
std::unique_ptr<InstructionBase> Clone() const override; \
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains runtime implementations of a few macros that are defined
// as external in Torque, so that generated runtime code can work.
#ifndef V8_TORQUE_RUNTIME_MACRO_SHIMS_H_
#define V8_TORQUE_RUNTIME_MACRO_SHIMS_H_
#include "src/objects/smi.h"
namespace v8 {
namespace internal {
namespace TorqueRuntimeMacroShims {
namespace CodeStubAssembler {
inline intptr_t ChangeInt32ToIntPtr(Isolate* isolate, int32_t i) { return i; }
inline uintptr_t ChangeUint32ToWord(Isolate* isolate, uint32_t u) { return u; }
inline intptr_t IntPtrAdd(Isolate* isolate, intptr_t a, intptr_t b) {
return a + b;
}
inline intptr_t IntPtrMul(Isolate* isolate, intptr_t a, intptr_t b) {
return a * b;
}
inline intptr_t Signed(Isolate* isolate, uintptr_t u) {
return static_cast<intptr_t>(u);
}
inline int32_t SmiUntag(Isolate* isolate, Smi s) { return s.value(); }
} // namespace CodeStubAssembler
} // namespace TorqueRuntimeMacroShims
} // namespace internal
} // namespace v8
#endif // V8_TORQUE_RUNTIME_MACRO_SHIMS_H_
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/torque/torque-code-generator.h"
namespace v8 {
namespace internal {
namespace torque {
bool TorqueCodeGenerator::IsEmptyInstruction(const Instruction& instruction) {
switch (instruction.kind()) {
case InstructionKind::kPeekInstruction:
case InstructionKind::kPokeInstruction:
case InstructionKind::kDeleteRangeInstruction:
case InstructionKind::kPushUninitializedInstruction:
case InstructionKind::kPushBuiltinPointerInstruction:
case InstructionKind::kUnsafeCastInstruction:
return true;
default:
return false;
}
}
void TorqueCodeGenerator::EmitInstruction(const Instruction& instruction,
Stack<std::string>* stack) {
#ifdef DEBUG
if (!IsEmptyInstruction(instruction)) {
EmitSourcePosition(instruction->pos);
}
#endif
switch (instruction.kind()) {
#define ENUM_ITEM(T) \
case InstructionKind::k##T: \
return EmitInstruction(instruction.Cast<T>(), stack);
TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
#undef ENUM_ITEM
}
}
void TorqueCodeGenerator::EmitInstruction(const PeekInstruction& instruction,
Stack<std::string>* stack) {
stack->Push(stack->Peek(instruction.slot));
}
void TorqueCodeGenerator::EmitInstruction(const PokeInstruction& instruction,
Stack<std::string>* stack) {
stack->Poke(instruction.slot, stack->Top());
stack->Pop();
}
void TorqueCodeGenerator::EmitInstruction(
const DeleteRangeInstruction& instruction, Stack<std::string>* stack) {
stack->DeleteRange(instruction.range);
}
} // namespace torque
} // namespace internal
} // namespace v8
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_TORQUE_TORQUE_CODE_GENERATOR_H_
#define V8_TORQUE_TORQUE_CODE_GENERATOR_H_
#include <iostream>
#include "src/torque/cfg.h"
#include "src/torque/declarable.h"
namespace v8 {
namespace internal {
namespace torque {
class TorqueCodeGenerator {
public:
TorqueCodeGenerator(const ControlFlowGraph& cfg, std::ostream& out)
: cfg_(cfg),
out_(&out),
out_decls_(&out),
previous_position_(SourcePosition::Invalid()) {}
protected:
const ControlFlowGraph& cfg_;
std::ostream* out_;
std::ostream* out_decls_;
size_t fresh_id_ = 0;
SourcePosition previous_position_;
std::map<DefinitionLocation, std::string> location_map_;
std::string DefinitionToVariable(const DefinitionLocation& location) {
if (location.IsPhi()) {
std::stringstream stream;
stream << "phi_bb" << location.GetPhiBlock()->id() << "_"
<< location.GetPhiIndex();
return stream.str();
} else if (location.IsParameter()) {
auto it = location_map_.find(location);
DCHECK_NE(it, location_map_.end());
return it->second;
} else {
DCHECK(location.IsInstruction());
auto it = location_map_.find(location);
if (it == location_map_.end()) {
it = location_map_.insert(std::make_pair(location, FreshNodeName()))
.first;
}
return it->second;
}
}
void SetDefinitionVariable(const DefinitionLocation& definition,
const std::string& str) {
DCHECK_EQ(location_map_.find(definition), location_map_.end());
location_map_.insert(std::make_pair(definition, str));
}
std::ostream& out() { return *out_; }
std::ostream& decls() { return *out_decls_; }
static bool IsEmptyInstruction(const Instruction& instruction);
virtual void EmitSourcePosition(SourcePosition pos,
bool always_emit = false) = 0;
std::string FreshNodeName() { return "tmp" + std::to_string(fresh_id_++); }
std::string FreshCatchName() { return "catch" + std::to_string(fresh_id_++); }
std::string FreshLabelName() { return "label" + std::to_string(fresh_id_++); }
std::string BlockName(const Block* block) {
return "block" + std::to_string(block->id());
}
void EmitInstruction(const Instruction& instruction,
Stack<std::string>* stack);
#define EMIT_INSTRUCTION_DECLARATION(T) \
void EmitInstruction(const T& instruction, Stack<std::string>* stack);
TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
#undef EMIT_INSTRUCTION_DECLARATION
#define EMIT_INSTRUCTION_DECLARATION(T) \
virtual void EmitInstruction(const T& instruction, \
Stack<std::string>* stack) = 0;
TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
#undef EMIT_INSTRUCTION_DECLARATION
};
} // namespace torque
} // namespace internal
} // namespace v8
#endif // V8_TORQUE_TORQUE_CODE_GENERATOR_H_
......@@ -76,6 +76,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateInstanceTypes(output_directory);
implementation_visitor.BeginCSAFiles();
implementation_visitor.BeginRuntimeMacrosFile();
implementation_visitor.VisitAllDeclarables();
......@@ -95,6 +96,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateCSATypes(output_directory);
implementation_visitor.EndCSAFiles();
implementation_visitor.EndRuntimeMacrosFile();
implementation_visitor.GenerateImplementation(output_directory);
if (GlobalContext::collect_language_server_data()) {
......
......@@ -643,13 +643,29 @@ bool ClassType::HasNoPointerSlots() const {
return true;
}
bool ClassType::HasIndexedFieldsIncludingInParents() const {
for (const auto& field : fields_) {
if (field.index.has_value()) return true;
}
if (const ClassType* parent = GetSuperClass()) {
return parent->HasIndexedFieldsIncludingInParents();
}
return false;
}
void ClassType::GenerateAccessors() {
bool at_or_after_indexed_field = false;
if (const ClassType* parent = GetSuperClass()) {
at_or_after_indexed_field = parent->HasIndexedFieldsIncludingInParents();
}
// For each field, construct AST snippets that implement a CSA accessor
// function. The implementation iterator will turn the snippets into code.
for (auto& field : fields_) {
if (field.name_and_type.type == TypeOracle::GetVoidType()) {
continue;
}
at_or_after_indexed_field =
at_or_after_indexed_field || field.index.has_value();
CurrentSourcePosition::Scope position_activator(field.pos);
IdentifierExpression* parameter =
......@@ -657,15 +673,46 @@ void ClassType::GenerateAccessors() {
IdentifierExpression* index =
MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"i"}));
// Load accessor
std::string camel_field_name = CamelifyString(field.name_and_type.name);
std::string load_macro_name = "Load" + this->name() + camel_field_name;
if (at_or_after_indexed_field) {
// Generate a C++ function for getting a slice or reference to this field.
// In Torque, this function would be written as
// FieldRefClassNameFieldName(o: ClassName) {
// return &o.field_name;
// }
std::string ref_macro_name = "FieldRef" + this->name() + camel_field_name;
Signature ref_signature;
ref_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
ref_signature.parameter_types.types.push_back(this);
ref_signature.parameter_types.var_args = false;
// It doesn't really matter whether we say this reference is mutable or
// const, because that information is not exposed to the calling C++ code.
ref_signature.return_type =
field.index
? TypeOracle::GetSliceType(field.name_and_type.type)
: TypeOracle::GetConstReferenceType(field.name_and_type.type);
Expression* ref_expression = MakeNode<FieldAccessExpression>(
parameter, MakeNode<Identifier>(field.name_and_type.name));
ref_expression = MakeNode<CallExpression>(
MakeNode<IdentifierExpression>(
std::vector<std::string>{},
MakeNode<Identifier>(std::string{"&"})),
std::vector<Expression*>{ref_expression}, std::vector<Identifier*>{});
Statement* ref_body = MakeNode<ReturnStatement>(ref_expression);
Macro* ref_macro =
Declarations::DeclareMacro(ref_macro_name, true, base::nullopt,
ref_signature, ref_body, base::nullopt);
GlobalContext::EnsureInCCOutputList(TorqueMacro::cast(ref_macro));
}
// For now, only generate indexed accessors for simple types
if (field.index.has_value() && field.name_and_type.type->IsStructType()) {
continue;
}
// Load accessor
std::string load_macro_name = "Load" + this->name() + camel_field_name;
Signature load_signature;
load_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
load_signature.parameter_types.types.push_back(this);
......@@ -1096,10 +1143,23 @@ base::Optional<NameAndType> ExtractSimpleFieldArraySize(
}
std::string Type::GetRuntimeType() const {
// TODO(tebbi): Other types are currently unsupported, since there the TNode
// types and the C++ runtime types disagree.
DCHECK(this->IsSubtypeOf(TypeOracle::GetTaggedType()));
return GetGeneratedTNodeTypeName();
if (IsSubtypeOf(TypeOracle::GetSmiType())) return "Smi";
if (IsSubtypeOf(TypeOracle::GetTaggedType())) {
return GetGeneratedTNodeTypeName();
}
if (base::Optional<const StructType*> struct_type = StructSupertype()) {
std::stringstream result;
result << "std::tuple<";
bool first = true;
for (const Type* field_type : LowerType(*struct_type)) {
if (!first) result << ", ";
first = false;
result << field_type->GetRuntimeType();
}
result << ">";
return result.str();
}
return ConstexprVersion()->GetGeneratedTypeName();
}
} // namespace torque
......
......@@ -703,6 +703,7 @@ class ClassType final : public AggregateType {
std::vector<ObjectSlotKind> ComputeHeaderSlotKinds() const;
base::Optional<ObjectSlotKind> ComputeArraySlotKind() const;
bool HasNoPointerSlots() const;
bool HasIndexedFieldsIncludingInParents() const;
const InstanceTypeConstraints& GetInstanceTypeConstraints() const {
return decl_->instance_type_constraints;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment