implementation-visitor.cc 213 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6
#include "src/torque/implementation-visitor.h"

7
#include <algorithm>
8
#include <iomanip>
9
#include <string>
10

11
#include "src/base/optional.h"
12
#include "src/common/globals.h"
13
#include "src/numbers/integer-literal-inl.h"
14
#include "src/torque/cc-generator.h"
15
#include "src/torque/cfg.h"
16
#include "src/torque/constants.h"
17
#include "src/torque/cpp-builder.h"
18
#include "src/torque/csa-generator.h"
19
#include "src/torque/declaration-visitor.h"
20
#include "src/torque/global-context.h"
21
#include "src/torque/kythe-data.h"
22
#include "src/torque/parameter-difference.h"
23
#include "src/torque/server-data.h"
24
#include "src/torque/source-positions.h"
25
#include "src/torque/type-inference.h"
26
#include "src/torque/type-visitor.h"
27
#include "src/torque/types.h"
28
#include "src/torque/utils.h"
29 30 31 32 33

namespace v8 {
namespace internal {
namespace torque {

34 35
uint64_t next_unique_binding_index = 0;

36 37 38 39 40 41
// Sadly, 'using std::string_literals::operator""s;' is bugged in MSVC (see
// https://developercommunity.visualstudio.com/t/Incorrect-warning-when-using-standard-st/673948).
// TODO(nicohartmann@): Change to 'using std::string_literals::operator""s;'
// once this is fixed.
using namespace std::string_literals;  // NOLINT(build/namespaces)

42 43 44 45
namespace {
const char* BuiltinIncludesMarker = "// __BUILTIN_INCLUDES_MARKER__\n";
}  // namespace

46
VisitResult ImplementationVisitor::Visit(Expression* expr) {
47
  CurrentSourcePosition::Scope scope(expr->pos);
48 49 50 51 52 53 54
  switch (expr->kind) {
#define ENUM_ITEM(name)        \
  case AstNode::Kind::k##name: \
    return Visit(name::cast(expr));
    AST_EXPRESSION_NODE_KIND_LIST(ENUM_ITEM)
#undef ENUM_ITEM
    default:
55
      UNREACHABLE();
56 57 58
  }
}

59
const Type* ImplementationVisitor::Visit(Statement* stmt) {
60
  CurrentSourcePosition::Scope scope(stmt->pos);
61
  StackScope stack_scope(this);
62
  const Type* result;
63
  switch (stmt->kind) {
64 65 66 67
#define ENUM_ITEM(name)               \
  case AstNode::Kind::k##name:        \
    result = Visit(name::cast(stmt)); \
    break;
68 69 70
    AST_STATEMENT_NODE_KIND_LIST(ENUM_ITEM)
#undef ENUM_ITEM
    default:
71
      UNREACHABLE();
72
  }
73 74 75
  DCHECK_EQ(result == TypeOracle::GetNeverType(),
            assembler().CurrentBlockIsComplete());
  return result;
76 77
}

78 79 80
void ImplementationVisitor::BeginGeneratedFiles() {
  std::set<SourceId> contains_class_definitions;
  for (const ClassType* type : TypeOracle::GetClasses()) {
81
    if (type->ShouldGenerateCppClassDefinitions()) {
82
      contains_class_definitions.insert(type->AttributedToFile());
83
    }
84
  }
85

86 87
  for (SourceId source : SourceFileMap::AllSources()) {
    auto& streams = GlobalContext::GeneratedPerFile(source);
88 89
    // Output beginning of CSA .cc file.
    {
90
      cpp::File& file = streams.csa_cc;
91

92
      for (const std::string& include_path : GlobalContext::CppIncludes()) {
93
        file << "#include " << StringLiteralQuote(include_path) << "\n";
94
      }
95

96 97 98 99
      file << "// Required Builtins:\n";
      file << "#include \"torque-generated/" +
                  SourceFileMap::PathFromV8RootWithoutExtension(source) +
                  "-tq-csa.h\"\n";
100 101 102 103 104 105
      // Now that required include files are collected while generting the file,
      // we only know the full set at the end. Insert a marker here that is
      // replaced with the list of includes at the very end.
      // TODO(nicohartmann@): This is not the most beautiful way to do this,
      // replace once the cpp file builder is available, where this can be
      // handled easily.
106 107
      file << BuiltinIncludesMarker;
      file << "\n";
108

109
      streams.csa_cc.BeginNamespace("v8", "internal");
110
      streams.csa_ccfile << "\n";
111 112 113
    }
    // Output beginning of CSA .h file.
    {
114
      cpp::File& file = streams.csa_header;
115
      std::string header_define =
116
          "V8_GEN_TORQUE_GENERATED_" +
117 118 119 120
          UnderlinifyPath(SourceFileMap::PathFromV8Root(source)) + "_CSA_H_";
      streams.csa_header.BeginIncludeGuard(header_define);
      file << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
      file << "\n";
121

122
      streams.csa_header.BeginNamespace("v8", "internal");
123
      streams.csa_headerfile << "\n";
124 125 126
    }
    // Output beginning of class definition .cc file.
    {
127 128 129 130 131 132 133
      cpp::File& file = streams.class_definition_cc;
      if (contains_class_definitions.count(source) != 0) {
        file << "#include \""
             << SourceFileMap::PathFromV8RootWithoutExtension(source)
             << "-inl.h\"\n\n";
        file << "#include \"torque-generated/class-verifiers.h\"\n";
        file << "#include \"src/objects/instance-type-inl.h\"\n\n";
134 135
      }

136
      streams.class_definition_cc.BeginNamespace("v8", "internal");
137
      streams.class_definition_ccfile << "\n";
138
    }
139
  }
140
}
141

142
void ImplementationVisitor::EndGeneratedFiles() {
143
  for (SourceId file : SourceFileMap::AllSources()) {
144
    auto& streams = GlobalContext::GeneratedPerFile(file);
145

146 147
    // Output ending of CSA .cc file.
    streams.csa_cc.EndNamespace("v8", "internal");
148

149 150
    // Output ending of CSA .h file.
    {
151
      std::string header_define =
152
          "V8_GEN_TORQUE_GENERATED_" +
153
          UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_CSA_H_";
154

155
      streams.csa_header.EndNamespace("v8", "internal");
156
      streams.csa_headerfile << "\n";
157
      streams.csa_header.EndIncludeGuard(header_define);
158 159
    }

160 161
    // Output ending of class definition .cc file.
    streams.class_definition_cc.EndNamespace("v8", "internal");
162
  }
163 164
}

165
void ImplementationVisitor::BeginDebugMacrosFile() {
166
  // TODO(torque-builer): Can use builder for debug_macros_*_
167 168 169 170
  std::ostream& source = debug_macros_cc_;
  std::ostream& header = debug_macros_h_;

  source << "#include \"torque-generated/debug-macros.h\"\n\n";
171 172
  source << "#include \"src/objects/swiss-name-dictionary.h\"\n";
  source << "#include \"src/objects/ordered-hash-table.h\"\n";
173 174 175 176 177 178 179 180 181 182 183 184 185
  source << "#include \"tools/debug_helper/debug-macro-shims.h\"\n";
  source << "#include \"include/v8-internal.h\"\n";
  source << "\n";

  source << "namespace v8 {\n"
         << "namespace internal {\n"
         << "namespace debug_helper_internal {\n"
         << "\n";

  const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_";
  header << "#ifndef " << kHeaderDefine << "\n";
  header << "#define " << kHeaderDefine << "\n\n";
  header << "#include \"tools/debug_helper/debug-helper-internal.h\"\n";
186
  header << "#include \"src/numbers/integer-literal.h\"\n";
187 188 189 190
  header << "\n";

  header << "namespace v8 {\n"
         << "namespace internal {\n"
191
         << "namespace debug_helper_internal {\n"
192 193 194 195
         << "\n";
}

void ImplementationVisitor::EndDebugMacrosFile() {
196
  // TODO(torque-builder): Can use builder for debug_macros_*_
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
  std::ostream& source = debug_macros_cc_;
  std::ostream& header = debug_macros_h_;

  source << "}  // namespace internal\n"
         << "}  // namespace v8\n"
         << "}  // namespace debug_helper_internal\n"
         << "\n";

  header << "\n}  // namespace internal\n"
         << "}  // namespace v8\n"
         << "}  // namespace debug_helper_internal\n"
         << "\n";
  header << "#endif  // V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_\n";
}

212
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
213 214
  Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(),
                      {}, false};
215

216 217
  BindingsManagersScope bindings_managers_scope;

218 219
  cpp::Function f =
      GenerateFunction(nullptr, decl->external_name(), signature, {});
220

221
  f.PrintDeclaration(csa_headerfile());
222

223 224
  f.PrintDefinition(csa_ccfile(), [&](std::ostream& stream) {
    stream << "  compiler::CodeAssembler ca_(state_);\n";
225

226
    DCHECK(!signature.return_type->IsVoidOrNever());
227

228
    assembler_ = CfgAssembler(Stack<const Type*>{});
229

230 231 232
    VisitResult expression_result = Visit(decl->body());
    VisitResult return_result =
        GenerateImplicitConvert(signature.return_type, expression_result);
233

234 235
    CSAGenerator csa_generator{assembler().Result(), stream};
    Stack<std::string> values = *csa_generator.EmitGraph(Stack<std::string>{});
236

237 238 239 240 241 242
    assembler_ = base::nullopt;

    stream << "  return ";
    CSAGenerator::EmitCSAValue(return_result, values, stream);
    stream << ";";
  });
243 244
}

245 246
void ImplementationVisitor::Visit(TypeAlias* alias) {
  if (alias->IsRedeclaration()) return;
247 248 249 250 251
  if (const ClassType* class_type = ClassType::DynamicCast(alias->type())) {
    if (class_type->IsExtern() && !class_type->nspace()->IsDefaultNamespace()) {
      Error(
          "extern classes are currently only supported in the default "
          "namespace");
252
    }
253
  }
254 255
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
class ImplementationVisitor::MacroInliningScope {
 public:
  MacroInliningScope(ImplementationVisitor* visitor, const Macro* macro)
      : visitor_(visitor), macro_(macro) {
    if (!visitor_->inlining_macros_.insert(macro).second) {
      // Recursive macro expansion would just keep going until stack overflow.
      // To avoid crashes, throw an error immediately.
      ReportError("Recursive macro call to ", *macro);
    }
  }
  ~MacroInliningScope() { visitor_->inlining_macros_.erase(macro_); }

 private:
  ImplementationVisitor* visitor_;
  const Macro* macro_;
};

273
VisitResult ImplementationVisitor::InlineMacro(
274 275
    Macro* macro, base::Optional<LocationReference> this_reference,
    const std::vector<VisitResult>& arguments,
276
    const std::vector<Block*> label_blocks) {
277
  MacroInliningScope macro_inlining_scope(this, macro);
278 279 280 281 282 283 284 285 286
  CurrentScope::Scope current_scope(macro);
  BindingsManagersScope bindings_managers_scope;
  CurrentCallable::Scope current_callable(macro);
  CurrentReturnValue::Scope current_return_value;
  const Signature& signature = macro->signature();
  const Type* return_type = macro->signature().return_type;
  bool can_return = return_type != TypeOracle::GetNeverType();

  BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
287 288 289 290 291
  BlockBindings<LocalLabel> label_bindings(&LabelBindingsManager::Get());
  DCHECK_EQ(macro->signature().parameter_names.size(),
            arguments.size() + (this_reference ? 1 : 0));
  DCHECK_EQ(this_reference.has_value(), macro->IsMethod());

292 293 294 295 296 297 298 299 300
  // Bind the this for methods. Methods that modify a struct-type "this" must
  // only be called if the this is in a variable, in which case the
  // LocalValue is non-const. Otherwise, the LocalValue used for the parameter
  // binding is const, and thus read-only, which will cause errors if
  // modified, e.g. when called by a struct method that sets the structs
  // fields. This prevents using temporary struct values for anything other
  // than read operations.
  if (this_reference) {
    DCHECK(macro->IsMethod());
301 302
    parameter_bindings.Add(kThisParameterName, LocalValue{*this_reference},
                           true);
303 304
    // TODO(v8:12261): Tracking 'this'-binding for kythe led to a few weird
    // issues. Review to fully support 'this' in methods.
305
  }
306

307
  size_t count = 0;
308
  for (auto arg : arguments) {
309 310 311
    if (this_reference && count == signature.implicit_count) count++;
    const bool mark_as_used = signature.implicit_count > count;
    const Identifier* name = macro->parameter_names()[count++];
312 313 314 315 316 317 318 319
    Binding<LocalValue>* binding =
        parameter_bindings.Add(name,
                               LocalValue{LocationReference::Temporary(
                                   arg, "parameter " + name->value)},
                               mark_as_used);
    if (GlobalContext::collect_kythe_data()) {
      KytheData::AddBindingDefinition(binding);
    }
320 321 322 323 324
  }

  DCHECK_EQ(label_blocks.size(), signature.labels.size());
  for (size_t i = 0; i < signature.labels.size(); ++i) {
    const LabelDeclaration& label_info = signature.labels[i];
325 326 327 328 329
    Binding<LocalLabel>* binding = label_bindings.Add(
        label_info.name, LocalLabel{label_blocks[i], label_info.types});
    if (GlobalContext::collect_kythe_data()) {
      KytheData::AddBindingDefinition(binding);
    }
330 331 332 333 334 335 336 337 338 339 340 341
  }

  Block* macro_end;
  base::Optional<Binding<LocalLabel>> macro_end_binding;
  if (can_return) {
    Stack<const Type*> stack = assembler().CurrentStack();
    std::vector<const Type*> lowered_return_types = LowerType(return_type);
    stack.PushMany(lowered_return_types);
    if (!return_type->IsConstexpr()) {
      SetReturnValue(VisitResult(return_type,
                                 stack.TopRange(lowered_return_types.size())));
    }
342 343 344 345 346 347 348 349 350 351
    // The stack copy used to initialize the _macro_end block is only used
    // as a template for the actual gotos generated by return statements. It
    // doesn't correspond to any real return values, and thus shouldn't contain
    // top types, because these would pollute actual return value types that get
    // unioned with them for return statements, erroneously forcing them to top.
    for (auto i = stack.begin(); i != stack.end(); ++i) {
      if ((*i)->IsTopType()) {
        *i = TopType::cast(*i)->source_type();
      }
    }
352
    macro_end = assembler().NewBlock(std::move(stack));
353
    macro_end_binding.emplace(&LabelBindingsManager::Get(), kMacroEndLabelName,
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
                              LocalLabel{macro_end, {return_type}});
  } else {
    SetReturnValue(VisitResult::NeverResult());
  }

  const Type* result = Visit(*macro->body());

  if (result->IsNever()) {
    if (!return_type->IsNever() && !macro->HasReturns()) {
      std::stringstream s;
      s << "macro " << macro->ReadableName()
        << " that never returns must have return type never";
      ReportError(s.str());
    }
  } else {
    if (return_type->IsNever()) {
      std::stringstream s;
      s << "macro " << macro->ReadableName()
        << " has implicit return at end of its declartion but return type "
           "never";
      ReportError(s.str());
    } else if (!macro->signature().return_type->IsVoid()) {
      std::stringstream s;
      s << "macro " << macro->ReadableName()
        << " expects to return a value but doesn't on all paths";
      ReportError(s.str());
    }
  }
  if (!result->IsNever()) {
    assembler().Goto(macro_end);
  }

  if (macro->HasReturns() || !result->IsNever()) {
    assembler().Bind(macro_end);
  }

  return GetAndClearReturnValue();
}

393 394
void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
  CurrentCallable::Scope current_callable(macro);
395 396
  const Signature& signature = macro->signature();
  const Type* return_type = macro->signature().return_type;
397 398 399
  bool can_return = return_type != TypeOracle::GetNeverType();
  bool has_return_value =
      can_return && return_type != TypeOracle::GetVoidType();
400

401 402
  cpp::Function f = GenerateMacroFunctionDeclaration(macro);
  f.PrintDeclaration(csa_headerfile());
403
  csa_headerfile() << "\n";
404 405

  cpp::File csa_cc(csa_ccfile());
406

407 408 409
  // Avoid multiple-definition errors since it is possible for multiple
  // generated -inl.inc files to all contain function definitions for the same
  // Torque macro.
410
  base::Optional<cpp::IncludeGuardScope> include_guard;
411
  if (output_type_ == OutputType::kCC) {
412
    include_guard.emplace(&csa_cc, "V8_INTERNAL_DEFINED_"s + macro->CCName());
413
  } else if (output_type_ == OutputType::kCCDebug) {
414 415
    include_guard.emplace(&csa_cc,
                          "V8_INTERNAL_DEFINED_"s + macro->CCDebugName());
416 417
  }

418
  f.PrintBeginDefinition(csa_ccfile());
419 420 421 422

  if (output_type_ == OutputType::kCC) {
    // For now, generated C++ is only for field offset computations. If we ever
    // generate C++ code that can allocate, then it should be handlified.
423
    csa_ccfile() << "  DisallowGarbageCollection no_gc;\n";
424
  } else if (output_type_ == OutputType::kCSA) {
425 426
    csa_ccfile() << "  compiler::CodeAssembler ca_(state_);\n";
    csa_ccfile()
427 428
        << "  compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
  }
429

430 431
  Stack<std::string> lowered_parameters;
  Stack<const Type*> lowered_parameter_types;
432

433
  std::vector<VisitResult> arguments;
434

435 436 437
  base::Optional<LocationReference> this_reference;
  if (Method* method = Method::DynamicCast(macro)) {
    const Type* this_type = method->aggregate_type();
438 439 440 441 442 443 444 445 446 447 448 449 450
    LowerParameter(this_type, ExternalParameterName(kThisParameterName),
                   &lowered_parameters);
    StackRange range = lowered_parameter_types.PushMany(LowerType(this_type));
    VisitResult this_result = VisitResult(this_type, range);
    // For classes, mark 'this' as a temporary to prevent assignment to it.
    // Note that using a VariableAccess for non-class types is technically
    // incorrect because changes to the 'this' variable do not get reflected
    // to the caller. Therefore struct methods should always be inlined and a
    // C++ version should never be generated, since it would be incorrect.
    // However, in order to be able to type- and semantics-check even unused
    // struct methods, set the this_reference to be the local variable copy of
    // the passed-in this, which allows the visitor to at least find and report
    // errors.
451
    this_reference =
452 453 454
        (this_type->IsClassType())
            ? LocationReference::Temporary(this_result, "this parameter")
            : LocationReference::VariableAccess(this_result);
455 456
  }

457 458
  for (size_t i = 0; i < macro->signature().parameter_names.size(); ++i) {
    if (this_reference && i == macro->signature().implicit_count) continue;
459
    const std::string& name = macro->parameter_names()[i]->value;
460
    std::string external_name = ExternalParameterName(name);
461
    const Type* type = macro->signature().types()[i];
462

463
    if (type->IsConstexpr()) {
464
      arguments.push_back(VisitResult(type, external_name));
465 466 467
    } else {
      LowerParameter(type, external_name, &lowered_parameters);
      StackRange range = lowered_parameter_types.PushMany(LowerType(type));
468
      arguments.push_back(VisitResult(type, range));
469
    }
470
  }
471

472 473
  DCHECK_EQ(lowered_parameters.Size(), lowered_parameter_types.Size());
  assembler_ = CfgAssembler(lowered_parameter_types);
474

475
  std::vector<Block*> label_blocks;
476 477 478 479
  for (const LabelDeclaration& label_info : signature.labels) {
    Stack<const Type*> label_input_stack;
    for (const Type* type : label_info.types) {
      label_input_stack.PushMany(LowerType(type));
480
    }
481
    Block* block = assembler().NewBlock(std::move(label_input_stack));
482
    label_blocks.push_back(block);
483
  }
484

485 486
  VisitResult return_value =
      InlineMacro(macro, this_reference, arguments, label_blocks);
487 488 489
  Block* end = assembler().NewBlock();
  if (return_type != TypeOracle::GetNeverType()) {
    assembler().Goto(end);
490
  }
491

492 493 494 495
  for (size_t i = 0; i < label_blocks.size(); ++i) {
    Block* label_block = label_blocks[i];
    const LabelDeclaration& label_info = signature.labels[i];
    assembler().Bind(label_block);
496
    std::vector<std::string> label_parameter_variables;
497 498 499
    for (size_t j = 0; j < label_info.types.size(); ++j) {
      LowerLabelParameter(label_info.types[j],
                          ExternalLabelParameterName(label_info.name->value, j),
500
                          &label_parameter_variables);
501
    }
502 503
    assembler().Emit(GotoExternalInstruction{
        ExternalLabelName(label_info.name->value), label_parameter_variables});
504
  }
505

506 507
  if (return_type != TypeOracle::GetNeverType()) {
    assembler().Bind(end);
508
  }
509

510 511
  base::Optional<Stack<std::string>> values;
  if (output_type_ == OutputType::kCC) {
512
    CCGenerator cc_generator{assembler().Result(), csa_ccfile()};
513
    values = cc_generator.EmitGraph(lowered_parameters);
514 515 516
  } else if (output_type_ == OutputType::kCCDebug) {
    CCGenerator cc_generator{assembler().Result(), csa_ccfile(), true};
    values = cc_generator.EmitGraph(lowered_parameters);
517
  } else {
518
    CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
519 520
    values = csa_generator.EmitGraph(lowered_parameters);
  }
521

522
  assembler_ = base::nullopt;
523

524
  if (has_return_value) {
525
    csa_ccfile() << "  return ";
526 527 528 529 530
    if (output_type_ == OutputType::kCCDebug) {
      csa_ccfile() << "{d::MemoryAccessResult::kOk, ";
      CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
      csa_ccfile() << "}";
    } else if (output_type_ == OutputType::kCC) {
531
      CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
532
    } else {
533
      CSAGenerator::EmitCSAValue(return_value, *values, csa_ccfile());
534
    }
535
    csa_ccfile() << ";\n";
536
  }
537 538 539
  f.PrintEndDefinition(csa_ccfile());

  include_guard.reset();
540 541
}

542
void ImplementationVisitor::Visit(TorqueMacro* macro) {
543 544 545 546 547 548 549 550
  VisitMacroCommon(macro);
}

void ImplementationVisitor::Visit(Method* method) {
  DCHECK(!method->IsExternal());
  VisitMacroCommon(method);
}

551
namespace {
552

553
std::string AddParameter(size_t i, Builtin* builtin,
554
                         Stack<std::string>* parameters,
555
                         Stack<const Type*>* parameter_types,
556 557
                         BlockBindings<LocalValue>* parameter_bindings,
                         bool mark_as_used) {
558
  const Identifier* name = builtin->signature().parameter_names[i];
559
  const Type* type = builtin->signature().types()[i];
560 561 562
  std::string external_name = "parameter" + std::to_string(i);
  parameters->Push(external_name);
  StackRange range = parameter_types->PushMany(LowerType(type));
563
  Binding<LocalValue>* binding = parameter_bindings->Add(
564 565 566 567
      name,
      LocalValue{LocationReference::Temporary(VisitResult(type, range),
                                              "parameter " + name->value)},
      mark_as_used);
568 569 570
  if (GlobalContext::collect_kythe_data()) {
    KytheData::AddBindingDefinition(binding);
  }
571
  return external_name;
572
}
573

574 575
}  // namespace

576 577 578
void ImplementationVisitor::Visit(Builtin* builtin) {
  if (builtin->IsExternal()) return;
  CurrentScope::Scope current_scope(builtin);
579 580 581
  CurrentCallable::Scope current_callable(builtin);
  CurrentReturnValue::Scope current_return_value;

582
  const std::string& name = builtin->ExternalName();
583
  const Signature& signature = builtin->signature();
584
  csa_ccfile() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
585 586 587
               << "  compiler::CodeAssemblerState* state_ = state();"
               << "  compiler::CodeAssembler ca_(state());\n";

588 589 590
  Stack<const Type*> parameter_types;
  Stack<std::string> parameters;

591 592 593 594
  BindingsManagersScope bindings_managers_scope;

  BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());

595 596 597 598 599 600 601
  if (builtin->IsVarArgsJavaScript() || builtin->IsFixedArgsJavaScript()) {
    if (builtin->IsVarArgsJavaScript()) {
      DCHECK(signature.parameter_types.var_args);
      if (signature.ExplicitCount() > 0) {
        Error("Cannot mix explicit parameters with varargs.")
            .Position(signature.parameter_names[signature.implicit_count]->pos);
      }
602

603
      csa_ccfile() << "  TNode<Word32T> argc = UncheckedParameter<Word32T>("
604
                   << "Descriptor::kJSActualArgumentsCount);\n";
605
      csa_ccfile() << "  TNode<IntPtrT> "
606 607
                      "arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
                      "Int32T>(argc)));\n";
608
      csa_ccfile() << "  TNode<RawPtrT> arguments_frame = "
609
                      "UncheckedCast<RawPtrT>(LoadFramePointer());\n";
610 611 612 613 614
      csa_ccfile()
          << "  TorqueStructArguments "
             "torque_arguments(GetFrameArguments(arguments_frame, "
             "arguments_length, FrameArgumentsArgcType::kCountIncludesReceiver"
          << "));\n";
615
      csa_ccfile()
616 617 618 619 620
          << "  CodeStubArguments arguments(this, torque_arguments);\n";

      parameters.Push("torque_arguments.frame");
      parameters.Push("torque_arguments.base");
      parameters.Push("torque_arguments.length");
621
      parameters.Push("torque_arguments.actual_count");
622 623
      const Type* arguments_type = TypeOracle::GetArgumentsType();
      StackRange range = parameter_types.PushMany(LowerType(arguments_type));
624 625 626 627 628
      parameter_bindings.Add(*signature.arguments_variable,
                             LocalValue{LocationReference::Temporary(
                                 VisitResult(arguments_type, range),
                                 "parameter " + *signature.arguments_variable)},
                             true);
629 630 631 632 633 634 635 636
    }

    for (size_t i = 0; i < signature.implicit_count; ++i) {
      const std::string& param_name = signature.parameter_names[i]->value;
      SourcePosition param_pos = signature.parameter_names[i]->pos;
      std::string generated_name = AddParameter(
          i, builtin, &parameters, &parameter_types, &parameter_bindings, true);
      const Type* actual_type = signature.parameter_types.types[i];
637
      std::vector<const Type*> expected_types;
638
      if (param_name == "context") {
639
        csa_ccfile() << "  TNode<NativeContext> " << generated_name
640 641
                     << " = UncheckedParameter<NativeContext>("
                     << "Descriptor::kContext);\n";
642
        csa_ccfile() << "  USE(" << generated_name << ");\n";
643 644
        expected_types = {TypeOracle::GetNativeContextType(),
                          TypeOracle::GetContextType()};
645
      } else if (param_name == "receiver") {
646
        csa_ccfile()
647 648 649
            << "  TNode<Object> " << generated_name << " = "
            << (builtin->IsVarArgsJavaScript()
                    ? "arguments.GetReceiver()"
650
                    : "UncheckedParameter<Object>(Descriptor::kReceiver)")
651
            << ";\n";
652
        csa_ccfile() << "  USE(" << generated_name << ");\n";
653
        expected_types = {TypeOracle::GetJSAnyType()};
654
      } else if (param_name == "newTarget") {
655
        csa_ccfile() << "  TNode<Object> " << generated_name
656 657
                     << " = UncheckedParameter<Object>("
                     << "Descriptor::kJSNewTarget);\n";
658
        csa_ccfile() << "USE(" << generated_name << ");\n";
659
        expected_types = {TypeOracle::GetJSAnyType()};
660
      } else if (param_name == "target") {
661
        csa_ccfile() << "  TNode<JSFunction> " << generated_name
662 663
                     << " = UncheckedParameter<JSFunction>("
                     << "Descriptor::kJSTarget);\n";
664
        csa_ccfile() << "USE(" << generated_name << ");\n";
665
        expected_types = {TypeOracle::GetJSFunctionType()};
666 667 668 669 670 671
      } else {
        Error(
            "Unexpected implicit parameter \"", param_name,
            "\" for JavaScript calling convention, "
            "expected \"context\", \"receiver\", \"target\", or \"newTarget\"")
            .Position(param_pos);
672
        expected_types = {actual_type};
673
      }
674 675
      if (std::find(expected_types.begin(), expected_types.end(),
                    actual_type) == expected_types.end()) {
676
        Error("According to JavaScript calling convention, expected parameter ",
677 678
              param_name, " to have type ", PrintList(expected_types, " or "),
              " but found type ", *actual_type)
679 680 681 682 683 684 685 686 687 688 689
            .Position(param_pos);
      }
    }

    for (size_t i = signature.implicit_count;
         i < signature.parameter_names.size(); ++i) {
      const std::string& parameter_name = signature.parameter_names[i]->value;
      const Type* type = signature.types()[i];
      const bool mark_as_used = signature.implicit_count > i;
      std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
                                     &parameter_bindings, mark_as_used);
690
      csa_ccfile() << "  " << type->GetGeneratedTypeName() << " " << var
691
                   << " = "
692 693 694
                   << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
                   << ">(Descriptor::k" << CamelifyString(parameter_name)
                   << ");\n";
695
      csa_ccfile() << "  USE(" << var << ");\n";
696
    }
697

698 699 700
  } else {
    DCHECK(builtin->IsStub());

701
    for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
702
      const std::string& parameter_name = signature.parameter_names[i]->value;
703 704 705 706
      const Type* type = signature.types()[i];
      const bool mark_as_used = signature.implicit_count > i;
      std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
                                     &parameter_bindings, mark_as_used);
707
      csa_ccfile() << "  " << type->GetGeneratedTypeName() << " " << var
708
                   << " = "
709
                   << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
710 711
                   << ">(Descriptor::k" << CamelifyString(parameter_name)
                   << ");\n";
712
      csa_ccfile() << "  USE(" << var << ");\n";
713 714
    }
  }
715
  assembler_ = CfgAssembler(parameter_types);
716
  const Type* body_result = Visit(*builtin->body());
717 718 719
  if (body_result != TypeOracle::GetNeverType()) {
    ReportError("control reaches end of builtin, expected return of a value");
  }
720
  CSAGenerator csa_generator{assembler().Result(), csa_ccfile(),
721 722 723
                             builtin->kind()};
  csa_generator.EmitGraph(parameters);
  assembler_ = base::nullopt;
724
  csa_ccfile() << "}\n\n";
725 726
}

727
const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
728 729 730 731 732 733 734 735 736 737 738
  BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
  return Visit(stmt, &block_bindings);
}

const Type* ImplementationVisitor::Visit(
    VarDeclarationStatement* stmt, BlockBindings<LocalValue>* block_bindings) {
  // const qualified variables are required to be initialized properly.
  if (stmt->const_qualified && !stmt->initializer) {
    ReportError("local constant \"", stmt->name, "\" is not initialized.");
  }

739
  base::Optional<const Type*> type;
740
  if (stmt->type) {
741
    type = TypeVisitor::ComputeType(*stmt->type);
742
  }
743 744
  base::Optional<VisitResult> init_result;
  if (stmt->initializer) {
745
    StackScope scope(this);
746
    init_result = Visit(*stmt->initializer);
747 748 749
    if (type) {
      init_result = GenerateImplicitConvert(*type, *init_result);
    }
750 751 752 753 754 755 756
    type = init_result->type();
    if ((*type)->IsConstexpr() && !stmt->const_qualified) {
      Error("Use 'const' instead of 'let' for variable '", stmt->name->value,
            "' of constexpr type '", (*type)->ToString(), "'.")
          .Position(stmt->name->pos)
          .Throw();
    }
757 758 759 760 761 762 763
    init_result = scope.Yield(*init_result);
  } else {
    DCHECK(type.has_value());
    if ((*type)->IsConstexpr()) {
      ReportError("constexpr variables need an initializer");
    }
    TypeVector lowered_types = LowerType(*type);
764
    for (const Type* t : lowered_types) {
765
      assembler().Emit(PushUninitializedInstruction{TypeOracle::GetTopType(
766
          "uninitialized variable '" + stmt->name->value + "' of type " +
767
              t->ToString() + " originally defined at " +
768
              PositionAsString(stmt->pos),
769
          t)});
770 771 772
    }
    init_result =
        VisitResult(*type, assembler().TopRange(lowered_types.size()));
773
  }
774 775 776 777 778
  LocationReference ref = stmt->const_qualified
                              ? LocationReference::Temporary(
                                    *init_result, "const " + stmt->name->value)
                              : LocationReference::VariableAccess(*init_result);
  block_bindings->Add(stmt->name, LocalValue{std::move(ref)});
779
  return TypeOracle::GetVoidType();
780 781
}

782
const Type* ImplementationVisitor::Visit(TailCallStatement* stmt) {
783 784 785 786
  return Visit(stmt->call, true).type();
}

VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
787 788
  Block* true_block = assembler().NewBlock(assembler().CurrentStack());
  Block* false_block = assembler().NewBlock(assembler().CurrentStack());
789 790
  Block* done_block = assembler().NewBlock();
  Block* true_conversion_block = assembler().NewBlock();
791
  GenerateExpressionBranch(expr->condition, true_block, false_block);
792

793 794
  VisitResult left;
  VisitResult right;
795 796

  {
797 798 799 800
    // The code for both paths of the conditional need to be generated first
    // before evaluating the conditional expression because the common type of
    // the result of both the true and false of the condition needs to be known
    // to convert both branches to a common type.
801
    assembler().Bind(true_block);
802 803 804
    StackScope left_scope(this);
    left = Visit(expr->if_true);
    assembler().Goto(true_conversion_block);
805

806 807
    const Type* common_type;
    {
808
      assembler().Bind(false_block);
809 810 811 812 813
      StackScope right_scope(this);
      right = Visit(expr->if_false);
      common_type = GetCommonType(left.type(), right.type());
      right = right_scope.Yield(GenerateImplicitConvert(common_type, right));
      assembler().Goto(done_block);
814 815
    }

816 817 818
    assembler().Bind(true_conversion_block);
    left = left_scope.Yield(GenerateImplicitConvert(common_type, left));
    assembler().Goto(done_block);
819
  }
820 821 822 823

  assembler().Bind(done_block);
  CHECK_EQ(left, right);
  return left;
824 825 826
}

VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
827 828
  StackScope outer_scope(this);
  VisitResult left_result = Visit(expr->left);
829

830
  if (left_result.type()->IsConstexprBool()) {
831 832 833 834 835 836
    VisitResult right_result = Visit(expr->right);
    if (!right_result.type()->IsConstexprBool()) {
      ReportError(
          "expected type constexpr bool on right-hand side of operator "
          "||");
    }
837 838 839
    return VisitResult(TypeOracle::GetConstexprBoolType(),
                       std::string("(") + left_result.constexpr_value() +
                           " || " + right_result.constexpr_value() + ")");
840
  }
841

842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
  Block* true_block = assembler().NewBlock();
  Block* false_block = assembler().NewBlock();
  Block* done_block = assembler().NewBlock();

  left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
  GenerateBranch(left_result, true_block, false_block);

  assembler().Bind(true_block);
  VisitResult true_result = GenerateBoolConstant(true);
  assembler().Goto(done_block);

  assembler().Bind(false_block);
  VisitResult false_result;
  {
    StackScope false_block_scope(this);
    false_result = false_block_scope.Yield(
        GenerateImplicitConvert(TypeOracle::GetBoolType(), Visit(expr->right)));
859
  }
860 861 862 863 864
  assembler().Goto(done_block);

  assembler().Bind(done_block);
  DCHECK_EQ(true_result, false_result);
  return outer_scope.Yield(true_result);
865 866 867
}

VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
868 869
  StackScope outer_scope(this);
  VisitResult left_result = Visit(expr->left);
870

871
  if (left_result.type()->IsConstexprBool()) {
872 873 874 875 876 877
    VisitResult right_result = Visit(expr->right);
    if (!right_result.type()->IsConstexprBool()) {
      ReportError(
          "expected type constexpr bool on right-hand side of operator "
          "&&");
    }
878 879 880
    return VisitResult(TypeOracle::GetConstexprBoolType(),
                       std::string("(") + left_result.constexpr_value() +
                           " && " + right_result.constexpr_value() + ")");
881
  }
882

883 884 885 886 887 888 889 890 891 892 893
  Block* true_block = assembler().NewBlock();
  Block* false_block = assembler().NewBlock();
  Block* done_block = assembler().NewBlock();

  left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
  GenerateBranch(left_result, true_block, false_block);

  assembler().Bind(true_block);
  VisitResult true_result;
  {
    StackScope true_block_scope(this);
894 895 896 897 898 899 900 901 902
    VisitResult right_result = Visit(expr->right);
    if (TryGetSourceForBitfieldExpression(expr->left) != nullptr &&
        TryGetSourceForBitfieldExpression(expr->right) != nullptr &&
        TryGetSourceForBitfieldExpression(expr->left)->value ==
            TryGetSourceForBitfieldExpression(expr->right)->value) {
      Lint(
          "Please use & rather than && when checking multiple bitfield "
          "values, to avoid complexity in generated code.");
    }
903
    true_result = true_block_scope.Yield(
904
        GenerateImplicitConvert(TypeOracle::GetBoolType(), right_result));
905
  }
906 907 908 909 910 911 912 913 914
  assembler().Goto(done_block);

  assembler().Bind(false_block);
  VisitResult false_result = GenerateBoolConstant(false);
  assembler().Goto(done_block);

  assembler().Bind(done_block);
  DCHECK_EQ(true_result, false_result);
  return outer_scope.Yield(true_result);
915 916 917
}

VisitResult ImplementationVisitor::Visit(IncrementDecrementExpression* expr) {
918 919 920
  StackScope scope(this);
  LocationReference location_ref = GetLocationReference(expr->location);
  VisitResult current_value = GenerateFetchFromLocation(location_ref);
921
  VisitResult one = {TypeOracle::GetConstInt31Type(), "1"};
922 923
  Arguments args;
  args.parameters = {current_value, one};
924
  VisitResult assignment_value = GenerateCall(
925
      expr->op == IncrementDecrementOperator::kIncrement ? "+" : "-", args);
926 927
  GenerateAssignToLocation(location_ref, assignment_value);
  return scope.Yield(expr->postfix ? current_value : assignment_value);
928 929 930
}

VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
931
  StackScope scope(this);
932 933 934
  LocationReference location_ref = GetLocationReference(expr->location);
  VisitResult assignment_value;
  if (expr->op) {
935
    VisitResult location_value = GenerateFetchFromLocation(location_ref);
936 937
    assignment_value = Visit(expr->value);
    Arguments args;
938
    args.parameters = {location_value, assignment_value};
939
    assignment_value = GenerateCall(*expr->op, args);
940
    GenerateAssignToLocation(location_ref, assignment_value);
941 942
  } else {
    assignment_value = Visit(expr->value);
943
    GenerateAssignToLocation(location_ref, assignment_value);
944
  }
945
  return scope.Yield(assignment_value);
946 947
}

948
VisitResult ImplementationVisitor::Visit(FloatingPointLiteralExpression* expr) {
949
  const Type* result_type = TypeOracle::GetConstFloat64Type();
950 951
  std::stringstream str;
  str << std::setprecision(std::numeric_limits<double>::digits10 + 1)
952 953 954 955 956 957 958 959 960 961
      << expr->value;
  return VisitResult{result_type, str.str()};
}

VisitResult ImplementationVisitor::Visit(IntegerLiteralExpression* expr) {
  const Type* result_type = TypeOracle::GetIntegerLiteralType();
  std::stringstream str;
  str << "IntegerLiteral("
      << (expr->value.is_negative() ? "true, 0x" : "false, 0x") << std::hex
      << expr->value.absolute_value() << std::dec << "ull)";
962
  return VisitResult{result_type, str.str()};
963 964
}

965 966
VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
  VisitResult result = Visit(expr->expression);
967 968
  const Type* result_type = SubtractType(
      result.type(), TypeVisitor::ComputeType(expr->excluded_type));
969 970 971
  if (result_type->IsNever()) {
    ReportError("unreachable code");
  }
972 973 974 975
  CHECK_EQ(LowerType(result_type), TypeVector{result_type});
  assembler().Emit(UnsafeCastInstruction{result_type});
  result.SetType(result_type);
  return result;
976 977
}

978
VisitResult ImplementationVisitor::Visit(StringLiteralExpression* expr) {
979 980 981
  return VisitResult{
      TypeOracle::GetConstStringType(),
      "\"" + expr->literal.substr(1, expr->literal.size() - 2) + "\""};
982 983
}

984
VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
985
  if (builtin->IsExternal() || builtin->kind() != Builtin::kStub) {
986 987 988
    ReportError(
        "creating function pointers is only allowed for internal builtins with "
        "stub linkage");
989
  }
990
  const Type* type = TypeOracle::GetBuiltinPointerType(
991
      builtin->signature().parameter_types.types,
992
      builtin->signature().return_type);
993 994
  assembler().Emit(
      PushBuiltinPointerInstruction{builtin->ExternalName(), type});
995
  return VisitResult(type, assembler().TopRange(1));
996 997
}

998
VisitResult ImplementationVisitor::Visit(LocationExpression* expr) {
999 1000
  StackScope scope(this);
  return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
1001 1002
}

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
VisitResult ImplementationVisitor::Visit(FieldAccessExpression* expr) {
  StackScope scope(this);
  LocationReference location = GetLocationReference(expr);
  if (location.IsBitFieldAccess()) {
    if (auto* identifier = IdentifierExpression::DynamicCast(expr->object)) {
      bitfield_expressions_[expr] = identifier->name;
    }
  }
  return scope.Yield(GenerateFetchFromLocation(location));
}

1014
const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
1015
  Binding<LocalLabel>* label = LookupLabel(stmt->label->value);
1016 1017 1018 1019
  size_t parameter_count = label->parameter_types.size();
  if (stmt->arguments.size() != parameter_count) {
    ReportError("goto to label has incorrect number of parameters (expected ",
                parameter_count, " found ", stmt->arguments.size(), ")");
1020 1021
  }

1022 1023 1024 1025
  if (GlobalContext::collect_language_server_data()) {
    LanguageServerData::AddDefinition(stmt->label->pos,
                                      label->declaration_position());
  }
1026 1027 1028
  if (GlobalContext::collect_kythe_data()) {
    KytheData::AddBindingUse(stmt->label->pos, label);
  }
1029

1030
  size_t i = 0;
1031
  StackRange arguments = assembler().TopRange(0);
1032
  for (Expression* e : stmt->arguments) {
1033
    StackScope scope(this);
1034
    VisitResult result = Visit(e);
1035 1036
    const Type* parameter_type = label->parameter_types[i++];
    result = GenerateImplicitConvert(parameter_type, result);
1037
    arguments.Extend(scope.Yield(result).stack_range());
1038 1039
  }

1040
  assembler().Goto(label->block, arguments.Size());
1041
  return TypeOracle::GetNeverType();
1042 1043
}

1044
const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
1045 1046
  bool has_else = stmt->if_false.has_value();

1047 1048
  if (stmt->is_constexpr) {
    VisitResult expression_result = Visit(stmt->condition);
1049

1050
    if (!(expression_result.type() == TypeOracle::GetConstexprBoolType())) {
1051
      std::stringstream stream;
1052
      stream << "expression should return type constexpr bool "
1053
             << "but returns type " << *expression_result.type();
1054 1055 1056
      ReportError(stream.str());
    }

1057 1058 1059 1060 1061 1062 1063 1064
    Block* true_block = assembler().NewBlock();
    Block* false_block = assembler().NewBlock();
    Block* done_block = assembler().NewBlock();

    assembler().Emit(ConstexprBranchInstruction{
        expression_result.constexpr_value(), true_block, false_block});

    assembler().Bind(true_block);
1065
    const Type* left_result = Visit(stmt->if_true);
1066 1067 1068
    if (left_result == TypeOracle::GetVoidType()) {
      assembler().Goto(done_block);
    }
1069

1070 1071
    assembler().Bind(false_block);
    const Type* right_result = TypeOracle::GetVoidType();
1072
    if (has_else) {
1073 1074
      right_result = Visit(*stmt->if_false);
    }
1075 1076 1077 1078
    if (right_result == TypeOracle::GetVoidType()) {
      assembler().Goto(done_block);
    }

1079 1080 1081 1082 1083 1084
    if (left_result->IsNever() != right_result->IsNever()) {
      std::stringstream stream;
      stream << "either both or neither branches in a constexpr if statement "
                "must reach their end at"
             << PositionAsString(stmt->pos);
      ReportError(stream.str());
1085 1086
    }

1087 1088 1089
    if (left_result != TypeOracle::GetNeverType()) {
      assembler().Bind(done_block);
    }
1090
    return left_result;
1091
  } else {
1092 1093 1094 1095 1096 1097
    Block* true_block = assembler().NewBlock(assembler().CurrentStack(),
                                             IsDeferred(stmt->if_true));
    Block* false_block =
        assembler().NewBlock(assembler().CurrentStack(),
                             stmt->if_false && IsDeferred(*stmt->if_false));
    GenerateExpressionBranch(stmt->condition, true_block, false_block);
1098

1099
    Block* done_block;
1100 1101
    bool live = false;
    if (has_else) {
1102
      done_block = assembler().NewBlock();
1103
    } else {
1104
      done_block = false_block;
1105 1106
      live = true;
    }
1107 1108 1109 1110 1111 1112 1113 1114

    assembler().Bind(true_block);
    {
      const Type* result = Visit(stmt->if_true);
      if (result == TypeOracle::GetVoidType()) {
        live = true;
        assembler().Goto(done_block);
      }
1115
    }
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125

    if (has_else) {
      assembler().Bind(false_block);
      const Type* result = Visit(*stmt->if_false);
      if (result == TypeOracle::GetVoidType()) {
        live = true;
        assembler().Goto(done_block);
      }
    }

1126
    if (live) {
1127
      assembler().Bind(done_block);
1128
    }
1129
    return live ? TypeOracle::GetVoidType() : TypeOracle::GetNeverType();
1130 1131 1132
  }
}

1133
const Type* ImplementationVisitor::Visit(WhileStatement* stmt) {
1134 1135
  Block* body_block = assembler().NewBlock(assembler().CurrentStack());
  Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
1136

1137 1138 1139 1140
  Block* header_block = assembler().NewBlock();
  assembler().Goto(header_block);

  assembler().Bind(header_block);
1141
  GenerateExpressionBranch(stmt->condition, body_block, exit_block);
1142

1143 1144 1145 1146 1147 1148 1149 1150
  assembler().Bind(body_block);
  {
    BreakContinueActivator activator{exit_block, header_block};
    const Type* body_result = Visit(stmt->body);
    if (body_result != TypeOracle::GetNeverType()) {
      assembler().Goto(header_block);
    }
  }
1151

1152
  assembler().Bind(exit_block);
1153
  return TypeOracle::GetVoidType();
1154 1155
}

1156
const Type* ImplementationVisitor::Visit(BlockStatement* block) {
1157
  BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
1158
  const Type* type = TypeOracle::GetVoidType();
1159
  for (Statement* s : block->statements) {
1160
    CurrentSourcePosition::Scope source_position(s->pos);
1161
    if (type->IsNever()) {
1162 1163 1164 1165 1166 1167
      ReportError("statement after non-returning statement");
    }
    if (auto* var_declaration = VarDeclarationStatement::DynamicCast(s)) {
      type = Visit(var_declaration, &block_bindings);
    } else {
      type = Visit(s);
1168 1169 1170 1171 1172
    }
  }
  return type;
}

1173
const Type* ImplementationVisitor::Visit(DebugStatement* stmt) {
1174
#if defined(DEBUG)
1175 1176 1177
  assembler().Emit(PrintConstantStringInstruction{"halting because of '" +
                                                  stmt->reason + "' at " +
                                                  PositionAsString(stmt->pos)});
1178
#endif
1179 1180 1181
  assembler().Emit(AbortInstruction{stmt->never_continues
                                        ? AbortInstruction::Kind::kUnreachable
                                        : AbortInstruction::Kind::kDebugBreak});
1182
  if (stmt->never_continues) {
1183
    return TypeOracle::GetNeverType();
1184
  } else {
1185
    return TypeOracle::GetVoidType();
1186 1187 1188
  }
}

1189 1190 1191 1192 1193
namespace {

std::string FormatAssertSource(const std::string& str) {
  // Replace all whitespace characters with a space character.
  std::string str_no_newlines = str;
1194 1195 1196
  std::replace_if(
      str_no_newlines.begin(), str_no_newlines.end(),
      [](unsigned char c) { return isspace(c); }, ' ');
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207

  // str might include indentation, squash multiple space characters into one.
  std::string result;
  std::unique_copy(str_no_newlines.begin(), str_no_newlines.end(),
                   std::back_inserter(result),
                   [](char a, char b) { return a == ' ' && b == ' '; });
  return result;
}

}  // namespace

1208
const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
  if (stmt->kind == AssertStatement::AssertKind::kStaticAssert) {
    std::string message =
        "static_assert(" + stmt->source + ") at " + ToString(stmt->pos);
    GenerateCall(QualifiedName({"", TORQUE_INTERNAL_NAMESPACE_STRING},
                               STATIC_ASSERT_MACRO_STRING),
                 Arguments{{Visit(stmt->expression),
                            VisitResult(TypeOracle::GetConstexprStringType(),
                                        StringLiteralQuote(message))},
                           {}});
    return TypeOracle::GetVoidType();
  }
1220
  bool do_check = stmt->kind != AssertStatement::AssertKind::kDcheck ||
1221
                  GlobalContext::force_assert_statements();
1222
#if defined(DEBUG)
1223 1224
  do_check = true;
#endif
1225
  Block* resume_block;
1226

1227 1228 1229 1230 1231 1232
  if (!do_check) {
    Block* unreachable_block = assembler().NewBlock(assembler().CurrentStack());
    resume_block = assembler().NewBlock(assembler().CurrentStack());
    assembler().Goto(resume_block);
    assembler().Bind(unreachable_block);
  }
1233

1234
  // CSA_DCHECK & co. are not used here on purpose for two reasons. First,
1235 1236 1237 1238
  // Torque allows and handles two types of expressions in the if protocol
  // automagically, ones that return TNode<BoolT> and those that use the
  // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
  // handle this is embedded in the expression handling and to it's not
1239
  // possible to make the decision to use CSA_DCHECK or CSA_DCHECK_BRANCH
1240 1241
  // isn't trivial up-front. Secondly, on failure, the assert text should be
  // the corresponding Torque code, not the -gen.cc code, which would be the
1242
  // case when using CSA_DCHECK_XXX.
1243 1244 1245
  Block* true_block = assembler().NewBlock(assembler().CurrentStack());
  Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
  GenerateExpressionBranch(stmt->expression, true_block, false_block);
1246

1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
  assembler().Bind(false_block);

  assembler().Emit(AbortInstruction{
      AbortInstruction::Kind::kAssertionFailure,
      "Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});

  assembler().Bind(true_block);

  if (!do_check) {
    assembler().Bind(resume_block);
1257
  }
1258

1259
  return TypeOracle::GetVoidType();
1260 1261
}

1262 1263
const Type* ImplementationVisitor::Visit(ExpressionStatement* stmt) {
  const Type* type = Visit(stmt->expression).type();
1264
  return type->IsNever() ? type : TypeOracle::GetVoidType();
1265 1266
}

1267
const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
1268
  Callable* current_callable = CurrentCallable::Get();
1269
  if (current_callable->signature().return_type->IsNever()) {
1270
    std::stringstream s;
1271
    s << "cannot return from a function with return type never";
1272 1273
    ReportError(s.str());
  }
1274
  LocalLabel* end =
1275
      current_callable->IsMacro() ? LookupLabel(kMacroEndLabelName) : nullptr;
1276 1277 1278 1279
  if (current_callable->HasReturnValue()) {
    if (!stmt->value) {
      std::stringstream s;
      s << "return expression needs to be specified for a return type of "
1280
        << *current_callable->signature().return_type;
1281 1282 1283 1284
      ReportError(s.str());
    }
    VisitResult expression_result = Visit(*stmt->value);
    VisitResult return_result = GenerateImplicitConvert(
1285
        current_callable->signature().return_type, expression_result);
1286
    if (current_callable->IsMacro()) {
1287 1288 1289 1290
      if (return_result.IsOnStack()) {
        StackRange return_value_range =
            GenerateLabelGoto(end, return_result.stack_range());
        SetReturnValue(VisitResult(return_result.type(), return_value_range));
1291
      } else {
1292 1293
        GenerateLabelGoto(end);
        SetReturnValue(return_result);
1294
      }
1295
    } else if (current_callable->IsBuiltin()) {
1296 1297
      assembler().Emit(ReturnInstruction{
          LoweredSlotCount(current_callable->signature().return_type)});
1298 1299 1300 1301 1302 1303 1304
    } else {
      UNREACHABLE();
    }
  } else {
    if (stmt->value) {
      std::stringstream s;
      s << "return expression can't be specified for a void or never return "
1305
           "type";
1306 1307 1308 1309 1310
      ReportError(s.str());
    }
    GenerateLabelGoto(end);
  }
  current_callable->IncrementReturns();
1311
  return TypeOracle::GetNeverType();
1312 1313
}

1314
VisitResult ImplementationVisitor::Visit(TryLabelExpression* expr) {
1315 1316 1317 1318
  size_t parameter_count = expr->label_block->parameters.names.size();
  std::vector<VisitResult> parameters;

  Block* label_block = nullptr;
1319
  Block* done_block = assembler().NewBlock();
1320
  VisitResult try_result;
1321 1322

  {
1323 1324 1325 1326 1327 1328 1329 1330
    CurrentSourcePosition::Scope source_position(expr->label_block->pos);
    if (expr->label_block->parameters.has_varargs) {
      ReportError("cannot use ... for label parameters");
    }
    Stack<const Type*> label_input_stack = assembler().CurrentStack();
    TypeVector parameter_types;
    for (size_t i = 0; i < parameter_count; ++i) {
      const Type* type =
1331
          TypeVisitor::ComputeType(expr->label_block->parameters.types[i]);
1332 1333 1334
      parameter_types.push_back(type);
      if (type->IsConstexpr()) {
        ReportError("no constexpr type allowed for label arguments");
1335
      }
1336 1337
      StackRange range = label_input_stack.PushMany(LowerType(type));
      parameters.push_back(VisitResult(type, range));
1338
    }
1339 1340 1341 1342 1343 1344
    label_block = assembler().NewBlock(label_input_stack,
                                       IsDeferred(expr->label_block->body));

    Binding<LocalLabel> label_binding{&LabelBindingsManager::Get(),
                                      expr->label_block->label,
                                      LocalLabel{label_block, parameter_types}};
1345 1346

    // Visit try
1347 1348 1349 1350 1351
    StackScope stack_scope(this);
    try_result = Visit(expr->try_expression);
    if (try_result.type() != TypeOracle::GetNeverType()) {
      try_result = stack_scope.Yield(try_result);
      assembler().Goto(done_block);
1352 1353 1354
    }
  }

1355 1356 1357 1358 1359 1360 1361 1362 1363
  // Visit and output the code for the label block. If the label block falls
  // through, then the try must not return a value. Also, if the try doesn't
  // fall through, but the label does, then overall the try-label block
  // returns type void.
  assembler().Bind(label_block);
  const Type* label_result;
  {
    BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
    for (size_t i = 0; i < parameter_count; ++i) {
1364 1365 1366 1367
      Identifier* name = expr->label_block->parameters.names[i];
      parameter_bindings.Add(name,
                             LocalValue{LocationReference::Temporary(
                                 parameters[i], "parameter " + name->value)});
1368
    }
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381

    label_result = Visit(expr->label_block->body);
  }
  if (!try_result.type()->IsVoidOrNever() && label_result->IsVoid()) {
    ReportError(
        "otherwise clauses cannot fall through in a non-void expression");
  }
  if (label_result != TypeOracle::GetNeverType()) {
    assembler().Goto(done_block);
  }
  if (label_result->IsVoid() && try_result.type()->IsNever()) {
    try_result =
        VisitResult(TypeOracle::GetVoidType(), try_result.stack_range());
1382 1383
  }

1384
  if (!try_result.type()->IsNever()) {
1385
    assembler().Bind(done_block);
1386 1387 1388 1389
  }
  return try_result;
}

1390 1391 1392 1393
VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
  return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
}

1394
InitializerResults ImplementationVisitor::VisitInitializerResults(
1395
    const ClassType* class_type,
1396
    const std::vector<NameAndExpression>& initializers) {
1397
  InitializerResults result;
1398 1399
  for (const NameAndExpression& initializer : initializers) {
    result.names.push_back(initializer.name);
1400
    Expression* e = initializer.expression;
1401
    const Field& field = class_type->LookupField(initializer.name->value);
1402
    bool has_index = field.index.has_value();
1403
    if (SpreadExpression* s = SpreadExpression::DynamicCast(e)) {
1404
      if (!has_index) {
1405 1406 1407 1408 1409 1410
        ReportError(
            "spread expressions can only be used to initialize indexed class "
            "fields ('",
            initializer.name->value, "' is not)");
      }
      e = s->spreadee;
1411
    } else if (has_index) {
1412 1413 1414 1415
      ReportError("the indexed class field '", initializer.name->value,
                  "' must be initialized with a spread operator");
    }
    result.field_value_map[field.name_and_type.name] = Visit(e);
1416 1417 1418 1419
  }
  return result;
}

1420
LocationReference ImplementationVisitor::GenerateFieldReference(
1421 1422
    VisitResult object, const Field& field, const ClassType* class_type,
    bool treat_optional_as_indexed) {
1423
  if (field.index.has_value()) {
1424
    LocationReference slice = LocationReference::HeapSlice(
1425
        GenerateCall(class_type->GetSliceMacroName(field), {{object}, {}}));
1426 1427 1428 1429 1430 1431 1432 1433
    if (field.index->optional && !treat_optional_as_indexed) {
      // This field was declared using optional syntax, so any reference to it
      // is implicitly a reference to the first item.
      return GenerateReferenceToItemInHeapSlice(
          slice, {TypeOracle::GetConstInt31Type(), "0"});
    } else {
      return slice;
    }
1434 1435
  }
  DCHECK(field.offset.has_value());
1436 1437
  StackRange result_range = assembler().TopRange(0);
  result_range.Extend(GenerateCopy(object).stack_range());
1438 1439 1440
  VisitResult offset =
      VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
  offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
1441
  result_range.Extend(offset.stack_range());
1442 1443 1444
  const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
                                                  field.const_qualified);
  return LocationReference::HeapReference(VisitResult(type, result_range));
1445 1446 1447 1448
}

// This is used to generate field references during initialization, where we can
// re-use the offsets used for computing the allocation size.
1449
LocationReference ImplementationVisitor::GenerateFieldReferenceForInit(
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
    VisitResult object, const Field& field,
    const LayoutForInitialization& layout) {
  StackRange result_range = assembler().TopRange(0);
  result_range.Extend(GenerateCopy(object).stack_range());
  VisitResult offset = GenerateImplicitConvert(
      TypeOracle::GetIntPtrType(), layout.offsets.at(field.name_and_type.name));
  result_range.Extend(offset.stack_range());
  if (field.index) {
    VisitResult length =
        GenerateCopy(layout.array_lengths.at(field.name_and_type.name));
    result_range.Extend(length.stack_range());
1461 1462
    const Type* slice_type =
        TypeOracle::GetMutableSliceType(field.name_and_type.type);
1463 1464
    return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
  } else {
1465
    // Const fields are writable during initialization.
1466
    VisitResult heap_reference(
1467 1468
        TypeOracle::GetMutableReferenceType(field.name_and_type.type),
        result_range);
1469 1470
    return LocationReference::HeapReference(heap_reference);
  }
1471 1472
}

1473 1474
void ImplementationVisitor::InitializeClass(
    const ClassType* class_type, VisitResult allocate_result,
1475 1476
    const InitializerResults& initializer_results,
    const LayoutForInitialization& layout) {
1477
  if (const ClassType* super = class_type->GetSuperClass()) {
1478
    InitializeClass(super, allocate_result, initializer_results, layout);
1479 1480
  }

1481
  for (Field f : class_type->fields()) {
1482
    VisitResult initializer_value =
1483
        initializer_results.field_value_map.at(f.name_and_type.name);
1484
    LocationReference field =
1485
        GenerateFieldReferenceForInit(allocate_result, f, layout);
1486
    if (f.index) {
1487 1488 1489 1490 1491
      DCHECK(field.IsHeapSlice());
      VisitResult slice = field.GetVisitResult();
      GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                                 "InitializeFieldsFromIterator"),
                   {{slice, initializer_value}, {}});
1492
    } else {
1493
      GenerateAssignToLocation(field, initializer_value);
1494 1495 1496 1497
    }
  }
}

1498 1499
VisitResult ImplementationVisitor::GenerateArrayLength(
    Expression* array_length, Namespace* nspace,
1500
    const std::map<std::string, LocalValue>& bindings) {
1501
  StackScope stack_scope(this);
1502
  CurrentSourcePosition::Scope pos_scope(array_length->pos);
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
  // Switch to the namespace where the class was declared.
  CurrentScope::Scope current_scope_scope(nspace);
  // Reset local bindings and install local binding for the preceding fields.
  BindingsManagersScope bindings_managers_scope;
  BlockBindings<LocalValue> field_bindings(&ValueBindingsManager::Get());
  for (auto& p : bindings) {
    field_bindings.Add(p.first, LocalValue{p.second}, true);
  }
  VisitResult length = Visit(array_length);
  VisitResult converted_length =
      GenerateCall("Convert", Arguments{{length}, {}},
                   {TypeOracle::GetIntPtrType(), length.type()}, false);
  return stack_scope.Yield(converted_length);
}

1518 1519 1520 1521 1522
VisitResult ImplementationVisitor::GenerateArrayLength(VisitResult object,
                                                       const Field& field) {
  DCHECK(field.index);

  StackScope stack_scope(this);
1523
  const ClassType* class_type = *object.type()->ClassSupertype();
1524
  std::map<std::string, LocalValue> bindings;
1525
  bool before_current = true;
1526
  for (Field f : class_type->ComputeAllFields()) {
1527 1528 1529
    if (field.name_and_type.name == f.name_and_type.name) {
      before_current = false;
    }
1530 1531 1532 1533 1534
    // We can't generate field references eagerly here, because some preceding
    // fields might be optional, and attempting to get a reference to an
    // optional field can crash the program if the field isn't present.
    // Instead, we use the lazy form of LocalValue to only generate field
    // references if they are used in the length expression.
1535
    bindings.insert(
1536 1537
        {f.name_and_type.name,
         f.const_qualified
1538
             ? (before_current
1539 1540 1541
                    ? LocalValue{[=]() {
                        return GenerateFieldReference(object, f, class_type);
                      }}
1542 1543
                    : LocalValue("Array lengths may only refer to fields "
                                 "defined earlier"))
1544 1545
             : LocalValue(
                   "Non-const fields cannot be used for array lengths.")});
1546 1547
  }
  return stack_scope.Yield(
1548
      GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
1549 1550 1551 1552 1553 1554
}

VisitResult ImplementationVisitor::GenerateArrayLength(
    const ClassType* class_type, const InitializerResults& initializer_results,
    const Field& field) {
  DCHECK(field.index);
1555

1556
  StackScope stack_scope(this);
1557
  std::map<std::string, LocalValue> bindings;
1558 1559 1560 1561
  for (Field f : class_type->ComputeAllFields()) {
    if (f.index) break;
    const std::string& fieldname = f.name_and_type.name;
    VisitResult value = initializer_results.field_value_map.at(fieldname);
1562 1563 1564 1565 1566 1567 1568
    bindings.insert(
        {fieldname,
         f.const_qualified
             ? LocalValue{LocationReference::Temporary(
                   value, "initial field " + fieldname)}
             : LocalValue(
                   "Non-const fields cannot be used for array lengths.")});
1569 1570
  }
  return stack_scope.Yield(
1571
      GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
1572 1573
}

1574
LayoutForInitialization ImplementationVisitor::GenerateLayoutForInitialization(
1575
    const ClassType* class_type,
1576
    const InitializerResults& initializer_results) {
1577 1578
  LayoutForInitialization layout;
  VisitResult offset;
1579
  for (Field f : class_type->ComputeAllFields()) {
1580 1581 1582 1583 1584
    if (f.offset.has_value()) {
      offset =
          VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
    }
    layout.offsets[f.name_and_type.name] = offset;
1585 1586 1587 1588 1589 1590 1591
    if (f.index) {
      size_t element_size;
      std::string element_size_string;
      std::tie(element_size, element_size_string) =
          *SizeOf(f.name_and_type.type);
      VisitResult array_element_size =
          VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
1592 1593 1594
      VisitResult array_length =
          GenerateArrayLength(class_type, initializer_results, f);
      layout.array_lengths[f.name_and_type.name] = array_length;
1595
      Arguments arguments;
1596 1597 1598 1599 1600 1601
      arguments.parameters = {offset, array_length, array_element_size};
      offset = GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                                          "AddIndexedFieldSizeToObjectSize"),
                            arguments);
    } else {
      DCHECK(f.offset.has_value());
1602 1603
    }
  }
1604 1605 1606 1607 1608 1609 1610 1611
  if (class_type->size().SingleValue()) {
    layout.size = VisitResult(TypeOracle::GetConstInt31Type(),
                              ToString(*class_type->size().SingleValue()));
  } else {
    layout.size = offset;
  }
  if ((size_t{1} << class_type->size().AlignmentLog2()) <
      TargetArchitecture::TaggedSize()) {
1612
    Arguments arguments;
1613 1614
    arguments.parameters = {layout.size};
    layout.size = GenerateCall(
1615 1616 1617
        QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AlignTagged"),
        arguments);
  }
1618
  return layout;
1619 1620
}

1621 1622
VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
  StackScope stack_scope(this);
1623
  const Type* type = TypeVisitor::ComputeType(expr->type);
1624 1625 1626 1627 1628
  const ClassType* class_type = ClassType::DynamicCast(type);
  if (class_type == nullptr) {
    ReportError("type for new expression must be a class, \"", *type,
                "\" is not");
  }
1629

1630 1631 1632 1633 1634 1635
  if (!class_type->AllowInstantiation()) {
    // Classes that are only used for testing should never be instantiated.
    ReportError(*class_type,
                " cannot be allocated with new (it's used for testing)");
  }

1636
  InitializerResults initializer_results =
1637
      VisitInitializerResults(class_type, expr->initializers);
1638

1639
  const Field& map_field = class_type->LookupField("map");
1640
  if (*map_field.offset != 0) {
1641 1642 1643 1644 1645
    ReportError("class initializers must have a map as first parameter");
  }
  const std::map<std::string, VisitResult>& initializer_fields =
      initializer_results.field_value_map;
  auto it_object_map = initializer_fields.find(map_field.name_and_type.name);
1646
  VisitResult object_map;
1647
  if (class_type->IsExtern()) {
1648
    if (it_object_map == initializer_fields.end()) {
1649 1650 1651
      ReportError("Constructor for ", class_type->name(),
                  " needs Map argument!");
    }
1652
    object_map = it_object_map->second;
1653
  } else {
1654 1655 1656 1657 1658 1659 1660 1661 1662
    if (it_object_map != initializer_fields.end()) {
      ReportError(
          "Constructor for ", class_type->name(),
          " must not specify Map argument; it is automatically inserted.");
    }
    Arguments get_struct_map_arguments;
    get_struct_map_arguments.parameters.push_back(
        VisitResult(TypeOracle::GetConstexprInstanceTypeType(),
                    CapifyStringWithUnderscores(class_type->name()) + "_TYPE"));
1663
    object_map = GenerateCall(
1664
        QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "GetInstanceTypeMap"),
1665
        get_struct_map_arguments, {}, false);
1666 1667 1668 1669 1670 1671
    CurrentSourcePosition::Scope current_pos(expr->pos);
    initializer_results.names.insert(initializer_results.names.begin(),
                                     MakeNode<Identifier>("map"));
    initializer_results.field_value_map[map_field.name_and_type.name] =
        object_map;
  }
1672 1673 1674 1675 1676

  CheckInitializersWellformed(class_type->name(),
                              class_type->ComputeAllFields(),
                              expr->initializers, !class_type->IsExtern());

1677 1678
  LayoutForInitialization layout =
      GenerateLayoutForInitialization(class_type, initializer_results);
1679 1680

  Arguments allocate_arguments;
1681
  allocate_arguments.parameters.push_back(layout.size);
1682
  allocate_arguments.parameters.push_back(object_map);
1683 1684
  allocate_arguments.parameters.push_back(
      GenerateBoolConstant(expr->pretenured));
1685
  VisitResult allocate_result = GenerateCall(
1686
      QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AllocateFromNew"),
1687
      allocate_arguments, {class_type}, false);
1688
  DCHECK(allocate_result.IsOnStack());
1689

1690
  InitializeClass(class_type, allocate_result, initializer_results, layout);
1691

1692 1693
  return stack_scope.Yield(GenerateCall(
      "%RawDownCast", Arguments{{allocate_result}, {}}, {class_type}));
1694 1695
}

1696
const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
1697 1698
  base::Optional<Binding<LocalLabel>*> break_label =
      TryLookupLabel(kBreakLabelName);
1699
  if (!break_label) {
1700
    ReportError("break used outside of loop");
1701
  }
1702
  assembler().Goto((*break_label)->block);
1703
  return TypeOracle::GetNeverType();
1704 1705
}

1706
const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
1707
  base::Optional<Binding<LocalLabel>*> continue_label =
1708
      TryLookupLabel(kContinueLabelName);
1709
  if (!continue_label) {
1710
    ReportError("continue used outside of loop");
1711
  }
1712
  assembler().Goto((*continue_label)->block);
1713
  return TypeOracle::GetNeverType();
1714 1715
}

1716
const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
1717
  BlockBindings<LocalValue> loop_bindings(&ValueBindingsManager::Get());
1718

1719 1720 1721 1722
  if (stmt->var_declaration) Visit(*stmt->var_declaration, &loop_bindings);

  Block* body_block = assembler().NewBlock(assembler().CurrentStack());
  Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
1723

1724 1725 1726
  Block* header_block = assembler().NewBlock();
  assembler().Goto(header_block);
  assembler().Bind(header_block);
1727

1728 1729
  // The continue label is where "continue" statements jump to. If no action
  // expression is provided, we jump directly to the header.
1730
  Block* continue_block = header_block;
1731

1732
  // The action label is only needed when an action expression was provided.
1733
  Block* action_block = nullptr;
1734
  if (stmt->action) {
1735
    action_block = assembler().NewBlock();
1736 1737

    // The action expression needs to be executed on a continue.
1738
    continue_block = action_block;
1739 1740 1741
  }

  if (stmt->test) {
1742
    GenerateExpressionBranch(*stmt->test, body_block, exit_block);
1743
  } else {
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
    assembler().Goto(body_block);
  }

  assembler().Bind(body_block);
  {
    BreakContinueActivator activator(exit_block, continue_block);
    const Type* body_result = Visit(stmt->body);
    if (body_result != TypeOracle::GetNeverType()) {
      assembler().Goto(continue_block);
    }
1754 1755
  }

1756
  if (stmt->action) {
1757
    assembler().Bind(action_block);
1758 1759 1760 1761
    const Type* action_result = Visit(*stmt->action);
    if (action_result != TypeOracle::GetNeverType()) {
      assembler().Goto(header_block);
    }
1762 1763
  }

1764
  assembler().Bind(exit_block);
1765
  return TypeOracle::GetVoidType();
1766 1767
}

1768 1769 1770 1771 1772 1773
VisitResult ImplementationVisitor::Visit(SpreadExpression* expr) {
  ReportError(
      "spread operators are only currently supported in indexed class field "
      "initialization expressions");
}

1774 1775
void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
  for (SourceId file : SourceFileMap::AllSources()) {
1776 1777 1778 1779
    std::string base_filename =
        dir + "/" + SourceFileMap::PathFromV8RootWithoutExtension(file);
    GlobalContext::PerFileStreams& streams =
        GlobalContext::GeneratedPerFile(file);
1780

1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
    std::string csa_cc = streams.csa_ccfile.str();
    // Insert missing builtin includes where the marker is.
    {
      auto pos = csa_cc.find(BuiltinIncludesMarker);
      CHECK_NE(pos, std::string::npos);
      std::string includes;
      for (const SourceId& include : streams.required_builtin_includes) {
        std::string include_file =
            SourceFileMap::PathFromV8RootWithoutExtension(include);
        includes += "#include \"torque-generated/";
        includes += include_file;
        includes += "-tq-csa.h\"\n";
      }
      csa_cc.replace(pos, strlen(BuiltinIncludesMarker), std::move(includes));
    }

1797
    // TODO(torque-builder): Pass file directly.
1798
    WriteFile(base_filename + "-tq-csa.cc", std::move(csa_cc));
1799 1800 1801
    WriteFile(base_filename + "-tq-csa.h", streams.csa_headerfile.str());
    WriteFile(base_filename + "-tq.inc",
              streams.class_definition_headerfile.str());
1802 1803 1804 1805 1806
    WriteFile(
        base_filename + "-tq-inl.inc",
        streams.class_definition_inline_headerfile_macro_declarations.str() +
            streams.class_definition_inline_headerfile_macro_definitions.str() +
            streams.class_definition_inline_headerfile.str());
1807
    WriteFile(base_filename + "-tq.cc", streams.class_definition_ccfile.str());
1808
  }
1809 1810 1811

  WriteFile(dir + "/debug-macros.h", debug_macros_h_.str());
  WriteFile(dir + "/debug-macros.cc", debug_macros_cc_.str());
1812 1813
}

1814 1815 1816 1817 1818 1819 1820 1821 1822
cpp::Function ImplementationVisitor::GenerateMacroFunctionDeclaration(
    Macro* macro) {
  return GenerateFunction(nullptr,
                          output_type_ == OutputType::kCC
                              ? macro->CCName()
                              : output_type_ == OutputType::kCCDebug
                                    ? macro->CCDebugName()
                                    : macro->ExternalName(),
                          macro->signature(), macro->parameter_names());
1823 1824
}

1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
cpp::Function ImplementationVisitor::GenerateFunction(
    cpp::Class* owner, const std::string& name, const Signature& signature,
    const NameVector& parameter_names, bool pass_code_assembler_state,
    std::vector<std::string>* generated_parameter_names) {
  cpp::Function f(owner, name);
  f.SetInline(output_type_ == OutputType::kCC);

  // Set return type.
  // TODO(torque-builder): Consider an overload of SetReturnType that handles
  // this.
1835
  if (signature.return_type->IsVoidOrNever()) {
1836 1837 1838 1839 1840 1841
    f.SetReturnType("void");
  } else if (output_type_ == OutputType::kCCDebug) {
    f.SetReturnType(std::string("Value<") +
                    signature.return_type->GetDebugType() + ">");
  } else if (output_type_ == OutputType::kCC) {
    f.SetReturnType(signature.return_type->GetRuntimeType());
1842
  } else {
1843 1844
    DCHECK_EQ(output_type_, OutputType::kCSA);
    f.SetReturnType(signature.return_type->GetGeneratedTypeName());
1845 1846
  }

1847
  bool ignore_first_parameter = true;
1848
  if (output_type_ == OutputType::kCCDebug) {
1849
    f.AddParameter("d::MemoryAccessor", "accessor");
1850
  } else if (output_type_ == OutputType::kCSA && pass_code_assembler_state) {
1851 1852 1853
    f.AddParameter("compiler::CodeAssemblerState*", "state_");
  } else {
    ignore_first_parameter = false;
1854 1855
  }

1856 1857
  // TODO(torque-builder): Consider an overload for AddParameter that handles
  // this.
1858
  DCHECK_GE(signature.types().size(), parameter_names.size());
1859
  for (std::size_t i = 0; i < signature.types().size(); ++i) {
1860
    const Type* parameter_type = signature.types()[i];
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
    std::string type;
    if (output_type_ == OutputType::kCC) {
      type = parameter_type->GetRuntimeType();
    } else if (output_type_ == OutputType::kCCDebug) {
      type = parameter_type->GetDebugType();
    } else {
      DCHECK_EQ(output_type_, OutputType::kCSA);
      type = parameter_type->GetGeneratedTypeName();
    }
    f.AddParameter(std::move(type),
                   ExternalParameterName(i < parameter_names.size()
                                             ? parameter_names[i]->value
                                             : std::to_string(i)));
1874 1875
  }

1876
  for (const LabelDeclaration& label_info : signature.labels) {
1877 1878
    if (output_type_ == OutputType::kCC ||
        output_type_ == OutputType::kCCDebug) {
1879 1880
      ReportError("Macros that generate runtime code can't have label exits");
    }
1881 1882
    f.AddParameter("compiler::CodeAssemblerLabel*",
                   ExternalLabelName(label_info.name->value));
1883
    size_t i = 0;
1884
    for (const Type* type : label_info.types) {
1885
      std::string generated_type_name;
1886
      if (type->StructSupertype()) {
1887 1888 1889 1890 1891 1892
        generated_type_name = "\n#error no structs allowed in labels\n";
      } else {
        generated_type_name = "compiler::TypedCodeAssemblerVariable<";
        generated_type_name += type->GetGeneratedTNodeTypeName();
        generated_type_name += ">*";
      }
1893 1894
      f.AddParameter(generated_type_name,
                     ExternalLabelParameterName(label_info.name->value, i));
1895
      ++i;
1896 1897 1898
    }
  }

1899 1900 1901 1902 1903 1904 1905 1906
  if (generated_parameter_names) {
    *generated_parameter_names = f.GetParameterNames();
    if (ignore_first_parameter) {
      DCHECK(!generated_parameter_names->empty());
      generated_parameter_names->erase(generated_parameter_names->begin());
    }
  }
  return f;
1907 1908
}

1909 1910
namespace {

1911 1912 1913 1914 1915
void FailCallableLookup(
    const std::string& reason, const QualifiedName& name,
    const TypeVector& parameter_types,
    const std::vector<Binding<LocalLabel>*>& labels,
    const std::vector<Signature>& candidates,
1916
    const std::vector<std::pair<GenericCallable*, std::string>>
1917
        inapplicable_generics) {
1918
  std::stringstream stream;
1919 1920
  stream << "\n" << reason << ": \n  " << name << "(" << parameter_types << ")";
  if (labels.size() != 0) {
1921
    stream << " labels ";
1922 1923
    for (size_t i = 0; i < labels.size(); ++i) {
      stream << labels[i]->name() << "(" << labels[i]->parameter_types << ")";
1924 1925 1926
    }
  }
  stream << "\ncandidates are:";
1927 1928 1929 1930
  for (const Signature& signature : candidates) {
    stream << "\n  " << name;
    PrintSignature(stream, signature, false);
  }
1931 1932 1933
  if (inapplicable_generics.size() != 0) {
    stream << "\nfailed to instantiate all of these generic declarations:";
    for (auto& failure : inapplicable_generics) {
1934
      GenericCallable* generic = failure.first;
1935
      const std::string& fail_reason = failure.second;
1936
      stream << "\n  " << generic->name() << " defined at "
1937 1938
             << PositionAsString(generic->Position()) << ":\n    "
             << fail_reason << "\n";
1939 1940
    }
  }
1941 1942 1943
  ReportError(stream.str());
}

1944 1945
Callable* GetOrCreateSpecialization(
    const SpecializationKey<GenericCallable>& key) {
1946
  if (base::Optional<Callable*> specialization =
1947
          key.generic->GetSpecialization(key.specialized_types)) {
1948 1949
    return *specialization;
  }
1950
  return DeclarationVisitor::SpecializeImplicit(key);
1951 1952
}

1953 1954
}  // namespace

1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981
base::Optional<Binding<LocalValue>*> ImplementationVisitor::TryLookupLocalValue(
    const std::string& name) {
  return ValueBindingsManager::Get().TryLookup(name);
}

base::Optional<Binding<LocalLabel>*> ImplementationVisitor::TryLookupLabel(
    const std::string& name) {
  return LabelBindingsManager::Get().TryLookup(name);
}

Binding<LocalLabel>* ImplementationVisitor::LookupLabel(
    const std::string& name) {
  base::Optional<Binding<LocalLabel>*> label = TryLookupLabel(name);
  if (!label) ReportError("cannot find label ", name);
  return *label;
}

Block* ImplementationVisitor::LookupSimpleLabel(const std::string& name) {
  LocalLabel* label = LookupLabel(name);
  if (!label->parameter_types.empty()) {
    ReportError("label ", name,
                "was expected to have no parameters, but has parameters (",
                label->parameter_types, ")");
  }
  return label->block;
}

1982 1983 1984 1985 1986 1987 1988 1989 1990
// Try to lookup a callable with the provided argument types. Do not report
// an error if no matching callable was found, but return false instead.
// This is used to test the presence of overloaded field accessors.
bool ImplementationVisitor::TestLookupCallable(
    const QualifiedName& name, const TypeVector& parameter_types) {
  return LookupCallable(name, Declarations::TryLookup(name), parameter_types,
                        {}, {}, true) != nullptr;
}

1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
TypeArgumentInference ImplementationVisitor::InferSpecializationTypes(
    GenericCallable* generic, const TypeVector& explicit_specialization_types,
    const TypeVector& explicit_arguments) {
  std::vector<base::Optional<const Type*>> all_arguments;
  const ParameterList& parameters = generic->declaration()->parameters;
  for (size_t i = 0; i < parameters.implicit_count; ++i) {
    base::Optional<Binding<LocalValue>*> val =
        TryLookupLocalValue(parameters.names[i]->value);
    all_arguments.push_back(
        val ? (*val)->GetLocationReference(*val).ReferencedType()
            : base::nullopt);
  }
  for (const Type* explicit_argument : explicit_arguments) {
    all_arguments.push_back(explicit_argument);
  }
  return generic->InferSpecializationTypes(explicit_specialization_types,
                                           all_arguments);
}

2010
template <class Container>
2011
Callable* ImplementationVisitor::LookupCallable(
2012
    const QualifiedName& name, const Container& declaration_container,
2013 2014
    const TypeVector& parameter_types,
    const std::vector<Binding<LocalLabel>*>& labels,
2015
    const TypeVector& specialization_types, bool silence_errors) {
2016
  Callable* result = nullptr;
2017 2018 2019

  std::vector<Declarable*> overloads;
  std::vector<Signature> overload_signatures;
2020
  std::vector<std::pair<GenericCallable*, std::string>> inapplicable_generics;
2021
  for (auto* declarable : declaration_container) {
2022
    if (GenericCallable* generic = GenericCallable::DynamicCast(declarable)) {
2023 2024
      TypeArgumentInference inference = InferSpecializationTypes(
          generic, specialization_types, parameter_types);
2025 2026
      if (inference.HasFailed()) {
        inapplicable_generics.push_back(
2027
            std::make_pair(generic, inference.GetFailureReason()));
2028 2029
        continue;
      }
2030 2031
      overloads.push_back(generic);
      overload_signatures.push_back(
2032
          DeclarationVisitor::MakeSpecializedSignature(
2033 2034
              SpecializationKey<GenericCallable>{generic,
                                                 inference.GetResult()}));
2035 2036 2037 2038 2039 2040 2041 2042 2043
    } else if (Callable* callable = Callable::DynamicCast(declarable)) {
      overloads.push_back(callable);
      overload_signatures.push_back(callable->signature());
    }
  }
  // Indices of candidates in overloads/overload_signatures.
  std::vector<size_t> candidates;
  for (size_t i = 0; i < overloads.size(); ++i) {
    const Signature& signature = overload_signatures[i];
2044
    if (IsCompatibleSignature(signature, parameter_types, labels.size())) {
2045 2046 2047 2048
      candidates.push_back(i);
    }
  }

2049
  if (overloads.empty() && inapplicable_generics.empty()) {
2050
    if (silence_errors) return nullptr;
2051 2052 2053 2054
    std::stringstream stream;
    stream << "no matching declaration found for " << name;
    ReportError(stream.str());
  } else if (candidates.empty()) {
2055
    if (silence_errors) return nullptr;
2056
    FailCallableLookup("cannot find suitable callable with name", name,
2057 2058
                       parameter_types, labels, overload_signatures,
                       inapplicable_generics);
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
  }

  auto is_better_candidate = [&](size_t a, size_t b) {
    return ParameterDifference(overload_signatures[a].GetExplicitTypes(),
                               parameter_types)
        .StrictlyBetterThan(ParameterDifference(
            overload_signatures[b].GetExplicitTypes(), parameter_types));
  };

  size_t best = *std::min_element(candidates.begin(), candidates.end(),
                                  is_better_candidate);
  // This check is contained in libstdc++'s std::min_element.
  DCHECK(!is_better_candidate(best, best));
  for (size_t candidate : candidates) {
    if (candidate != best && !is_better_candidate(best, candidate)) {
      std::vector<Signature> candidate_signatures;
2075
      candidate_signatures.reserve(candidates.size());
2076 2077
      for (size_t i : candidates) {
        candidate_signatures.push_back(overload_signatures[i]);
2078
      }
2079
      FailCallableLookup("ambiguous callable ", name, parameter_types, labels,
2080
                         candidate_signatures, inapplicable_generics);
2081
    }
2082
  }
2083

2084 2085
  if (GenericCallable* generic =
          GenericCallable::DynamicCast(overloads[best])) {
2086 2087
    TypeArgumentInference inference = InferSpecializationTypes(
        generic, specialization_types, parameter_types);
2088
    result = GetOrCreateSpecialization(
2089
        SpecializationKey<GenericCallable>{generic, inference.GetResult()});
2090
  } else {
2091
    result = Callable::cast(overloads[best]);
2092 2093 2094
  }

  size_t caller_size = parameter_types.size();
2095 2096
  size_t callee_size =
      result->signature().types().size() - result->signature().implicit_count;
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
  if (caller_size != callee_size &&
      !result->signature().parameter_types.var_args) {
    std::stringstream stream;
    stream << "parameter count mismatch calling " << *result << " - expected "
           << std::to_string(callee_size) << ", found "
           << std::to_string(caller_size);
    ReportError(stream.str());
  }

  return result;
}

2109 2110 2111 2112 2113
template <class Container>
Callable* ImplementationVisitor::LookupCallable(
    const QualifiedName& name, const Container& declaration_container,
    const Arguments& arguments, const TypeVector& specialization_types) {
  return LookupCallable(name, declaration_container,
2114 2115
                        arguments.parameters.ComputeTypeVector(),
                        arguments.labels, specialization_types);
2116 2117 2118
}

Method* ImplementationVisitor::LookupMethod(
2119
    const std::string& name, const AggregateType* receiver_type,
2120
    const Arguments& arguments, const TypeVector& specialization_types) {
2121
  TypeVector types(arguments.parameters.ComputeTypeVector());
2122 2123 2124 2125
  types.insert(types.begin(), receiver_type);
  return Method::cast(LookupCallable({{}, name}, receiver_type->Methods(name),
                                     types, arguments.labels,
                                     specialization_types));
2126 2127
}

2128
const Type* ImplementationVisitor::GetCommonType(const Type* left,
2129
                                                 const Type* right) {
2130
  const Type* common_type;
2131
  if (IsAssignableFrom(left, right)) {
2132
    common_type = left;
2133
  } else if (IsAssignableFrom(right, left)) {
2134 2135
    common_type = right;
  } else {
2136
    common_type = TypeOracle::GetUnionType(left, right);
2137
  }
2138
  common_type = common_type->NonConstexprVersion();
2139 2140 2141 2142
  return common_type;
}

VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
2143 2144 2145 2146 2147
  if (to_copy.IsOnStack()) {
    return VisitResult(to_copy.type(),
                       assembler().Peek(to_copy.stack_range(), to_copy.type()));
  }
  return to_copy;
2148 2149
}

2150
VisitResult ImplementationVisitor::Visit(StructExpression* expr) {
2151
  StackScope stack_scope(this);
2152

2153 2154 2155 2156 2157
  auto& initializers = expr->initializers;
  std::vector<VisitResult> values;
  std::vector<const Type*> term_argument_types;
  values.reserve(initializers.size());
  term_argument_types.reserve(initializers.size());
2158

2159 2160 2161 2162 2163 2164
  // Compute values and types of all initializer arguments
  for (const NameAndExpression& initializer : initializers) {
    VisitResult value = Visit(initializer.expression);
    values.push_back(value);
    term_argument_types.push_back(value.type());
  }
2165

2166
  // Compute and check struct type from given struct name and argument types
2167
  const Type* type = TypeVisitor::ComputeTypeForStructExpression(
2168
      expr->type, term_argument_types);
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
  if (const auto* struct_type = StructType::DynamicCast(type)) {
    CheckInitializersWellformed(struct_type->name(), struct_type->fields(),
                                initializers);

    // Implicitly convert values and thereby build the struct on the stack
    StackRange struct_range = assembler().TopRange(0);
    auto& fields = struct_type->fields();
    for (size_t i = 0; i < values.size(); i++) {
      values[i] =
          GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
      struct_range.Extend(values[i].stack_range());
    }
2181

2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
    return stack_scope.Yield(VisitResult(struct_type, struct_range));
  } else {
    const auto* bitfield_struct_type = BitFieldStructType::cast(type);
    CheckInitializersWellformed(bitfield_struct_type->name(),
                                bitfield_struct_type->fields(), initializers);

    // Create a zero and cast it to the desired bitfield struct type.
    VisitResult result{TypeOracle::GetConstInt32Type(), "0"};
    result = GenerateImplicitConvert(TypeOracle::GetInt32Type(), result);
    result = GenerateCall("Unsigned", Arguments{{result}, {}}, {});
    result = GenerateCall("%RawDownCast", Arguments{{result}, {}},
                          {bitfield_struct_type});

    // Set each field in the result. If these fields are constexpr, then all of
    // this initialization will end up reduced to a single value during TurboFan
    // optimization.
    auto& fields = bitfield_struct_type->fields();
    for (size_t i = 0; i < values.size(); i++) {
      values[i] =
          GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
      result = GenerateSetBitField(bitfield_struct_type, fields[i], result,
                                   values[i], /*starts_as_zero=*/true);
    }

    return stack_scope.Yield(result);
2207
  }
2208
}
2209

2210 2211 2212 2213 2214 2215 2216 2217
VisitResult ImplementationVisitor::GenerateSetBitField(
    const Type* bitfield_struct_type, const BitField& bitfield,
    VisitResult bitfield_struct, VisitResult value, bool starts_as_zero) {
  GenerateCopy(bitfield_struct);
  GenerateCopy(value);
  assembler().Emit(
      StoreBitFieldInstruction{bitfield_struct_type, bitfield, starts_as_zero});
  return VisitResult(bitfield_struct_type, assembler().TopRange(1));
2218 2219
}

2220
LocationReference ImplementationVisitor::GetLocationReference(
2221
    Expression* location) {
2222 2223 2224 2225 2226 2227 2228 2229 2230
  switch (location->kind) {
    case AstNode::Kind::kIdentifierExpression:
      return GetLocationReference(static_cast<IdentifierExpression*>(location));
    case AstNode::Kind::kFieldAccessExpression:
      return GetLocationReference(
          static_cast<FieldAccessExpression*>(location));
    case AstNode::Kind::kElementAccessExpression:
      return GetLocationReference(
          static_cast<ElementAccessExpression*>(location));
2231 2232 2233
    case AstNode::Kind::kDereferenceExpression:
      return GetLocationReference(
          static_cast<DereferenceExpression*>(location));
2234
    default:
2235
      return LocationReference::Temporary(Visit(location), "expression");
2236 2237 2238
  }
}

2239 2240
LocationReference ImplementationVisitor::GetLocationReference(
    FieldAccessExpression* expr) {
2241
  return GenerateFieldAccess(GetLocationReference(expr->object),
2242
                             expr->field->value, false, expr->field->pos);
2243 2244 2245 2246
}

LocationReference ImplementationVisitor::GenerateFieldAccess(
    LocationReference reference, const std::string& fieldname,
2247
    bool ignore_stuct_field_constness, base::Optional<SourcePosition> pos) {
2248
  if (reference.IsVariableAccess() &&
2249 2250
      reference.variable().type()->StructSupertype()) {
    const StructType* type = *reference.variable().type()->StructSupertype();
2251
    const Field& field = type->LookupField(fieldname);
2252 2253
    if (GlobalContext::collect_language_server_data() && pos.has_value()) {
      LanguageServerData::AddDefinition(*pos, field.pos);
2254
    }
2255 2256 2257
    if (GlobalContext::collect_kythe_data() && pos.has_value()) {
      KytheData::AddClassFieldUse(*pos, &field);
    }
2258
    if (field.const_qualified) {
2259
      VisitResult t_value = ProjectStructField(reference.variable(), fieldname);
2260 2261 2262 2263
      return LocationReference::Temporary(
          t_value, "for constant field '" + field.name_and_type.name + "'");
    } else {
      return LocationReference::VariableAccess(
2264
          ProjectStructField(reference.variable(), fieldname));
2265
    }
2266
  }
2267 2268
  if (reference.IsTemporary() &&
      reference.temporary().type()->StructSupertype()) {
2269
    if (GlobalContext::collect_language_server_data() && pos.has_value()) {
2270
      const StructType* type = *reference.temporary().type()->StructSupertype();
2271
      const Field& field = type->LookupField(fieldname);
2272
      LanguageServerData::AddDefinition(*pos, field.pos);
2273
    }
2274
    return LocationReference::Temporary(
2275
        ProjectStructField(reference.temporary(), fieldname),
2276
        reference.temporary_description());
2277
  }
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
  if (base::Optional<const Type*> referenced_type =
          reference.ReferencedType()) {
    if ((*referenced_type)->IsBitFieldStructType()) {
      const BitFieldStructType* bitfield_struct =
          BitFieldStructType::cast(*referenced_type);
      const BitField& field = bitfield_struct->LookupField(fieldname);
      return LocationReference::BitFieldAccess(reference, field);
    }
    if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
            (*referenced_type), TypeOracle::GetSmiTaggedGeneric())) {
      const BitFieldStructType* bitfield_struct =
          BitFieldStructType::DynamicCast(*type_wrapped_in_smi);
      if (bitfield_struct == nullptr) {
        ReportError(
            "When a value of type SmiTagged<T> is used in a field access "
            "expression, T is expected to be a bitfield struct type. Instead, "
            "T "
            "is ",
            **type_wrapped_in_smi);
      }
      const BitField& field = bitfield_struct->LookupField(fieldname);
      return LocationReference::BitFieldAccess(reference, field);
2300 2301
    }
  }
2302 2303
  if (reference.IsHeapReference()) {
    VisitResult ref = reference.heap_reference();
2304 2305 2306
    bool is_const;
    auto generic_type =
        TypeOracle::MatchReferenceGeneric(ref.type(), &is_const);
2307 2308 2309 2310 2311 2312
    if (!generic_type) {
      ReportError(
          "Left-hand side of field access expression is marked as a reference "
          "but is not of type Reference<...>. Found type: ",
          ref.type()->ToString());
    }
2313 2314
    if (auto struct_type = (*generic_type)->StructSupertype()) {
      const Field& field = (*struct_type)->LookupField(fieldname);
2315 2316
      // Update the Reference's type to refer to the field type within the
      // struct.
2317 2318 2319 2320
      ref.SetType(TypeOracle::GetReferenceType(
          field.name_and_type.type,
          is_const ||
              (field.const_qualified && !ignore_stuct_field_constness)));
2321 2322 2323 2324
      if (!field.offset.has_value()) {
        Error("accessing field with unknown offset").Throw();
      }
      if (*field.offset != 0) {
2325 2326 2327 2328 2329 2330 2331
        // Copy the Reference struct up the stack and update the new copy's
        // |offset| value to point to the struct field.
        StackScope scope(this);
        ref = GenerateCopy(ref);
        VisitResult ref_offset = ProjectStructField(ref, "offset");
        VisitResult struct_offset{
            TypeOracle::GetIntPtrType()->ConstexprVersion(),
2332
            std::to_string(*field.offset)};
2333
        VisitResult updated_offset =
2334
            GenerateCall("+", Arguments{{ref_offset, struct_offset}, {}});
2335 2336 2337 2338 2339 2340 2341
        assembler().Poke(ref_offset.stack_range(), updated_offset.stack_range(),
                         ref_offset.type());
        ref = scope.Yield(ref);
      }
      return LocationReference::HeapReference(ref);
    }
  }
2342
  VisitResult object_result = GenerateFetchFromLocation(reference);
2343 2344 2345 2346 2347 2348 2349 2350
  if (base::Optional<const ClassType*> class_type =
          object_result.type()->ClassSupertype()) {
    // This is a hack to distinguish the situation where we want to use
    // overloaded field accessors from when we want to create a reference.
    bool has_explicit_overloads = TestLookupCallable(
        QualifiedName{"." + fieldname}, {object_result.type()});
    if ((*class_type)->HasField(fieldname) && !has_explicit_overloads) {
      const Field& field = (*class_type)->LookupField(fieldname);
2351 2352
      if (GlobalContext::collect_language_server_data() && pos.has_value()) {
        LanguageServerData::AddDefinition(*pos, field.pos);
2353
      }
2354 2355 2356
      if (GlobalContext::collect_kythe_data()) {
        KytheData::AddClassFieldUse(*pos, &field);
      }
2357
      return GenerateFieldReference(object_result, field, *class_type);
2358 2359
    }
  }
2360
  return LocationReference::FieldAccess(object_result, fieldname);
2361 2362
}

2363 2364
LocationReference ImplementationVisitor::GetLocationReference(
    ElementAccessExpression* expr) {
2365
  LocationReference reference = GetLocationReference(expr->array);
2366
  VisitResult index = Visit(expr->index);
2367
  if (reference.IsHeapSlice()) {
2368
    return GenerateReferenceToItemInHeapSlice(reference, index);
2369 2370 2371 2372
  } else {
    return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
                                          index);
  }
2373 2374
}

2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
LocationReference ImplementationVisitor::GenerateReferenceToItemInHeapSlice(
    LocationReference slice, VisitResult index) {
  DCHECK(slice.IsHeapSlice());
  Arguments arguments{{index}, {}};
  const StructType* slice_type = *slice.heap_slice().type()->StructSupertype();
  Method* method = LookupMethod("AtIndex", slice_type, arguments, {});
  // The reference has to be treated like a normal value when calling methods
  // on the underlying slice implementation.
  LocationReference slice_value =
      LocationReference::Temporary(slice.GetVisitResult(), "slice as value");
  return LocationReference::HeapReference(
      GenerateCall(method, std::move(slice_value), arguments, {}, false));
}

2389 2390
LocationReference ImplementationVisitor::GetLocationReference(
    IdentifierExpression* expr) {
2391 2392
  if (expr->namespace_qualification.empty()) {
    if (base::Optional<Binding<LocalValue>*> value =
2393
            TryLookupLocalValue(expr->name->value)) {
2394
      if (GlobalContext::collect_language_server_data()) {
2395
        LanguageServerData::AddDefinition(expr->name->pos,
2396 2397
                                          (*value)->declaration_position());
      }
2398 2399 2400 2401 2402 2403 2404
      if (GlobalContext::collect_kythe_data()) {
        if (!expr->IsThis()) {
          DCHECK_EQ(expr->name->pos.end.column - expr->name->pos.start.column,
                    expr->name->value.length());
          KytheData::AddBindingUse(expr->name->pos, *value);
        }
      }
2405 2406 2407 2408
      if (expr->generic_arguments.size() != 0) {
        ReportError("cannot have generic parameters on local name ",
                    expr->name);
      }
2409
      return (*value)->GetLocationReference(*value);
2410 2411 2412
    }
  }

2413 2414 2415
  if (expr->IsThis()) {
    ReportError("\"this\" cannot be qualified");
  }
2416 2417
  QualifiedName name =
      QualifiedName(expr->namespace_qualification, expr->name->value);
2418
  if (base::Optional<Builtin*> builtin = Declarations::TryLookupBuiltin(name)) {
2419
    if (GlobalContext::collect_language_server_data()) {
2420 2421
      LanguageServerData::AddDefinition(expr->name->pos,
                                        (*builtin)->Position());
2422
    }
2423
    // TODO(v8:12261): Consider collecting KytheData here.
2424
    return LocationReference::Temporary(GetBuiltinCode(*builtin),
2425
                                        "builtin " + expr->name->value);
2426 2427
  }
  if (expr->generic_arguments.size() != 0) {
2428
    GenericCallable* generic = Declarations::LookupUniqueGeneric(name);
2429
    Callable* specialization =
2430
        GetOrCreateSpecialization(SpecializationKey<GenericCallable>{
2431
            generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
2432 2433 2434
    if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
      DCHECK(!builtin->IsExternal());
      return LocationReference::Temporary(GetBuiltinCode(builtin),
2435
                                          "builtin " + expr->name->value);
2436 2437 2438 2439
    } else {
      ReportError("cannot create function pointer for non-builtin ",
                  generic->name());
    }
2440
  }
2441
  Value* value = Declarations::LookupValue(name);
2442 2443 2444 2445
  CHECK(value->Position().source.IsValid());
  if (auto stream = CurrentFileStreams::Get()) {
    stream->required_builtin_includes.insert(value->Position().source);
  }
2446 2447 2448
  if (GlobalContext::collect_language_server_data()) {
    LanguageServerData::AddDefinition(expr->name->pos, value->name()->pos);
  }
2449
  if (auto* constant = NamespaceConstant::DynamicCast(value)) {
2450 2451 2452
    if (GlobalContext::collect_kythe_data()) {
      KytheData::AddConstantUse(expr->name->pos, constant);
    }
2453 2454
    if (constant->type()->IsConstexpr()) {
      return LocationReference::Temporary(
2455
          VisitResult(constant->type(), constant->external_name() + "(state_)"),
2456
          "namespace constant " + expr->name->value);
2457
    }
2458
    assembler().Emit(NamespaceConstantInstruction{constant});
2459 2460 2461 2462
    StackRange stack_range =
        assembler().TopRange(LoweredSlotCount(constant->type()));
    return LocationReference::Temporary(
        VisitResult(constant->type(), stack_range),
2463
        "namespace constant " + expr->name->value);
2464
  }
2465
  ExternConstant* constant = ExternConstant::cast(value);
2466 2467 2468
  if (GlobalContext::collect_kythe_data()) {
    KytheData::AddConstantUse(expr->name->pos, constant);
  }
2469
  return LocationReference::Temporary(constant->value(),
2470
                                      "extern value " + expr->name->value);
2471 2472
}

2473 2474 2475
LocationReference ImplementationVisitor::GetLocationReference(
    DereferenceExpression* expr) {
  VisitResult ref = Visit(expr->reference);
2476 2477 2478 2479
  if (!TypeOracle::MatchReferenceGeneric(ref.type())) {
    Error("Operator * expects a reference type but found a value of type ",
          *ref.type())
        .Throw();
2480 2481 2482 2483
  }
  return LocationReference::HeapReference(ref);
}

2484 2485 2486 2487 2488 2489
VisitResult ImplementationVisitor::GenerateFetchFromLocation(
    const LocationReference& reference) {
  if (reference.IsTemporary()) {
    return GenerateCopy(reference.temporary());
  } else if (reference.IsVariableAccess()) {
    return GenerateCopy(reference.variable());
2490
  } else if (reference.IsHeapReference()) {
2491
    const Type* referenced_type = *reference.ReferencedType();
2492 2493 2494 2495
    if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
      return GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                                        "LoadFloat64OrHole"),
                          Arguments{{reference.heap_reference()}, {}});
2496
    } else if (auto struct_type = referenced_type->StructSupertype()) {
2497
      StackRange result_range = assembler().TopRange(0);
2498
      for (const Field& field : (*struct_type)->fields()) {
2499 2500 2501 2502 2503 2504 2505 2506 2507
        StackScope scope(this);
        const std::string& fieldname = field.name_and_type.name;
        VisitResult field_value = scope.Yield(GenerateFetchFromLocation(
            GenerateFieldAccess(reference, fieldname)));
        result_range.Extend(field_value.stack_range());
      }
      return VisitResult(referenced_type, result_range);
    } else {
      GenerateCopy(reference.heap_reference());
2508 2509 2510
      assembler().Emit(LoadReferenceInstruction{referenced_type});
      DCHECK_EQ(1, LoweredSlotCount(referenced_type));
      return VisitResult(referenced_type, assembler().TopRange(1));
2511
    }
2512 2513 2514 2515
  } else if (reference.IsBitFieldAccess()) {
    // First fetch the bitfield struct, then get the bits out of it.
    VisitResult bit_field_struct =
        GenerateFetchFromLocation(reference.bit_field_struct_location());
2516 2517
    assembler().Emit(LoadBitFieldInstruction{bit_field_struct.type(),
                                             reference.bit_field()});
2518
    return VisitResult(*reference.ReferencedType(), assembler().TopRange(1));
2519
  } else {
2520
    if (reference.IsHeapSlice()) {
2521 2522 2523
      ReportError(
          "fetching a value directly from an indexed field isn't allowed");
    }
2524 2525 2526
    DCHECK(reference.IsCallAccess());
    return GenerateCall(reference.eval_function(),
                        Arguments{reference.call_arguments(), {}});
2527 2528 2529
  }
}

2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541
void ImplementationVisitor::GenerateAssignToLocation(
    const LocationReference& reference, const VisitResult& assignment_value) {
  if (reference.IsCallAccess()) {
    Arguments arguments{reference.call_arguments(), {}};
    arguments.parameters.push_back(assignment_value);
    GenerateCall(reference.assign_function(), arguments);
  } else if (reference.IsVariableAccess()) {
    VisitResult variable = reference.variable();
    VisitResult converted_value =
        GenerateImplicitConvert(variable.type(), assignment_value);
    assembler().Poke(variable.stack_range(), converted_value.stack_range(),
                     variable.type());
2542 2543 2544 2545 2546 2547

    // Local variables are detected by the existence of a binding. Assignment
    // to local variables is recorded to support lint errors.
    if (reference.binding()) {
      (*reference.binding())->SetWritten();
    }
2548
  } else if (reference.IsHeapSlice()) {
2549
    ReportError("assigning a value directly to an indexed field isn't allowed");
2550
  } else if (reference.IsHeapReference()) {
2551
    const Type* referenced_type = *reference.ReferencedType();
2552 2553 2554
    if (reference.IsConst()) {
      Error("cannot assign to const value of type ", *referenced_type).Throw();
    }
2555 2556 2557 2558 2559
    if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
      GenerateCall(
          QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                        "StoreFloat64OrHole"),
          Arguments{{reference.heap_reference(), assignment_value}, {}});
2560 2561
    } else if (auto struct_type = referenced_type->StructSupertype()) {
      if (!assignment_value.type()->IsSubtypeOf(referenced_type)) {
2562 2563 2564
        ReportError("Cannot assign to ", *referenced_type,
                    " with value of type ", *assignment_value.type());
      }
2565
      for (const Field& field : (*struct_type)->fields()) {
2566
        const std::string& fieldname = field.name_and_type.name;
2567 2568 2569
        // Allow assignment of structs even if they contain const fields.
        // Const on struct fields just disallows direct writes to them.
        bool ignore_stuct_field_constness = true;
2570
        GenerateAssignToLocation(
2571 2572
            GenerateFieldAccess(reference, fieldname,
                                ignore_stuct_field_constness),
2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
            ProjectStructField(assignment_value, fieldname));
      }
    } else {
      GenerateCopy(reference.heap_reference());
      VisitResult converted_assignment_value =
          GenerateImplicitConvert(referenced_type, assignment_value);
      if (referenced_type == TypeOracle::GetFloat64Type()) {
        VisitResult silenced_float_value = GenerateCall(
            "Float64SilenceNaN", Arguments{{assignment_value}, {}});
        assembler().Poke(converted_assignment_value.stack_range(),
                         silenced_float_value.stack_range(), referenced_type);
      }
      assembler().Emit(StoreReferenceInstruction{referenced_type});
    }
2587
  } else if (reference.IsBitFieldAccess()) {
2588 2589
    // First fetch the bitfield struct, then set the updated bits, then store
    // it back to where we found it.
2590 2591 2592
    VisitResult bit_field_struct =
        GenerateFetchFromLocation(reference.bit_field_struct_location());
    VisitResult converted_value =
2593
        GenerateImplicitConvert(*reference.ReferencedType(), assignment_value);
2594 2595 2596 2597 2598
    VisitResult updated_bit_field_struct =
        GenerateSetBitField(bit_field_struct.type(), reference.bit_field(),
                            bit_field_struct, converted_value);
    GenerateAssignToLocation(reference.bit_field_struct_location(),
                             updated_bit_field_struct);
2599
  } else {
2600
    DCHECK(reference.IsTemporary());
2601
    ReportError("cannot assign to const-bound or temporary ",
2602
                reference.temporary_description());
2603 2604 2605
  }
}

2606
VisitResult ImplementationVisitor::GeneratePointerCall(
2607
    Expression* callee, const Arguments& arguments, bool is_tailcall) {
2608
  StackScope scope(this);
2609
  TypeVector parameter_types(arguments.parameters.ComputeTypeVector());
2610
  VisitResult callee_result = Visit(callee);
2611
  if (!callee_result.type()->IsBuiltinPointerType()) {
2612 2613
    std::stringstream stream;
    stream << "Expected a function pointer type but found "
2614
           << *callee_result.type();
2615 2616
    ReportError(stream.str());
  }
2617 2618
  const BuiltinPointerType* type =
      BuiltinPointerType::cast(callee_result.type());
2619

2620 2621 2622
  if (type->parameter_types().size() != parameter_types.size()) {
    std::stringstream stream;
    stream << "parameter count mismatch calling function pointer with Type: "
2623
           << *type << " - expected "
2624 2625 2626 2627 2628 2629
           << std::to_string(type->parameter_types().size()) << ", found "
           << std::to_string(parameter_types.size());
    ReportError(stream.str());
  }

  ParameterTypes types{type->parameter_types(), false};
2630 2631
  Signature sig;
  sig.parameter_types = types;
2632
  if (!IsCompatibleSignature(sig, parameter_types, 0)) {
2633 2634 2635 2636 2637 2638 2639
    std::stringstream stream;
    stream << "parameters do not match function pointer signature. Expected: ("
           << type->parameter_types() << ") but got: (" << parameter_types
           << ")";
    ReportError(stream.str());
  }

2640 2641
  callee_result = GenerateCopy(callee_result);
  StackRange arg_range = assembler().TopRange(0);
2642 2643
  for (size_t current = 0; current < arguments.parameters.size(); ++current) {
    const Type* to_type = type->parameter_types()[current];
2644 2645 2646
    arg_range.Extend(
        GenerateImplicitConvert(to_type, arguments.parameters[current])
            .stack_range());
2647 2648
  }

2649 2650
  assembler().Emit(
      CallBuiltinPointerInstruction{is_tailcall, type, arg_range.Size()});
2651

2652 2653
  if (is_tailcall) {
    return VisitResult::NeverResult();
2654
  }
2655 2656
  DCHECK_EQ(1, LoweredSlotCount(type->return_type()));
  return scope.Yield(VisitResult(type->return_type(), assembler().TopRange(1)));
2657 2658
}

2659 2660 2661
void ImplementationVisitor::AddCallParameter(
    Callable* callable, VisitResult parameter, const Type* parameter_type,
    std::vector<VisitResult>* converted_arguments, StackRange* argument_range,
2662 2663 2664 2665 2666 2667 2668 2669
    std::vector<std::string>* constexpr_arguments, bool inline_macro) {
  VisitResult converted;
  if ((converted_arguments->size() < callable->signature().implicit_count) &&
      parameter.type()->IsTopType()) {
    converted = GenerateCopy(parameter);
  } else {
    converted = GenerateImplicitConvert(parameter_type, parameter);
  }
2670
  converted_arguments->push_back(converted);
2671
  if (!inline_macro) {
2672 2673 2674 2675 2676 2677 2678
    if (converted.IsOnStack()) {
      argument_range->Extend(converted.stack_range());
    } else {
      constexpr_arguments->push_back(converted.constexpr_value());
    }
  }
}
2679

2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702
namespace {
std::pair<std::string, std::string> GetClassInstanceTypeRange(
    const ClassType* class_type) {
  std::pair<std::string, std::string> result;
  if (class_type->InstanceTypeRange()) {
    auto instance_type_range = *class_type->InstanceTypeRange();
    std::string instance_type_string_first =
        "static_cast<InstanceType>(" +
        std::to_string(instance_type_range.first) + ")";
    std::string instance_type_string_second =
        "static_cast<InstanceType>(" +
        std::to_string(instance_type_range.second) + ")";
    result =
        std::make_pair(instance_type_string_first, instance_type_string_second);
  } else {
    ReportError(
        "%Min/MaxInstanceType must take a class type that is either a string "
        "or has a generated instance type range");
  }
  return result;
}
}  // namespace

2703 2704 2705 2706
VisitResult ImplementationVisitor::GenerateCall(
    Callable* callable, base::Optional<LocationReference> this_reference,
    Arguments arguments, const TypeVector& specialization_types,
    bool is_tailcall) {
2707 2708 2709 2710 2711
  CHECK(callable->Position().source.IsValid());
  if (auto stream = CurrentFileStreams::Get()) {
    stream->required_builtin_includes.insert(callable->Position().source);
  }

2712
  const Type* return_type = callable->signature().return_type;
2713

2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
  if (is_tailcall) {
    if (Builtin* builtin = Builtin::DynamicCast(CurrentCallable::Get())) {
      const Type* outer_return_type = builtin->signature().return_type;
      if (!return_type->IsSubtypeOf(outer_return_type)) {
        Error("Cannot tailcall, type of result is ", *return_type,
              " but should be a subtype of ", *outer_return_type, ".");
      }
    } else {
      Error("Tail calls are only allowed from builtins");
    }
  }

2726
  bool inline_macro = callable->ShouldBeInlined(output_type_);
2727 2728 2729
  std::vector<VisitResult> implicit_arguments;
  for (size_t i = 0; i < callable->signature().implicit_count; ++i) {
    std::string implicit_name = callable->signature().parameter_names[i]->value;
2730 2731
    base::Optional<Binding<LocalValue>*> val =
        TryLookupLocalValue(implicit_name);
2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751
    if (val) {
      implicit_arguments.push_back(
          GenerateFetchFromLocation((*val)->GetLocationReference(*val)));
    } else {
      VisitResult unititialized = VisitResult::TopTypeResult(
          "implicit parameter '" + implicit_name +
              "' is not defined when invoking " + callable->ReadableName() +
              " at " + PositionAsString(CurrentSourcePosition::Get()),
          callable->signature().parameter_types.types[i]);
      implicit_arguments.push_back(unititialized);
    }
    const Type* type = implicit_arguments.back().type();
    if (const TopType* top_type = TopType::DynamicCast(type)) {
      if (!callable->IsMacro() || callable->IsExternal()) {
        ReportError(
            "unititialized implicit parameters can only be passed to "
            "Torque-defined macros: the ",
            top_type->reason());
      }
      inline_macro = true;
2752
    }
2753 2754 2755 2756 2757 2758 2759 2760 2761
  }

  std::vector<VisitResult> converted_arguments;
  StackRange argument_range = assembler().TopRange(0);
  std::vector<std::string> constexpr_arguments;

  size_t current = 0;
  for (; current < callable->signature().implicit_count; ++current) {
    AddCallParameter(callable, implicit_arguments[current],
2762 2763
                     callable->signature().parameter_types.types[current],
                     &converted_arguments, &argument_range,
2764
                     &constexpr_arguments, inline_macro);
2765 2766
  }

2767 2768 2769
  if (this_reference) {
    DCHECK(callable->IsMethod());
    Method* method = Method::cast(callable);
2770 2771
    // By now, the this reference should either be a variable, a temporary or
    // a Slice. In either case the fetch of the VisitResult should succeed.
2772
    VisitResult this_value = this_reference->GetVisitResult();
2773
    if (inline_macro) {
2774 2775
      if (!this_value.type()->IsSubtypeOf(method->aggregate_type())) {
        ReportError("this parameter must be a subtype of ",
2776
                    *method->aggregate_type(), " but it is of type ",
2777
                    *this_value.type());
2778 2779 2780 2781
      }
    } else {
      AddCallParameter(callable, this_value, method->aggregate_type(),
                       &converted_arguments, &argument_range,
2782
                       &constexpr_arguments, inline_macro);
2783 2784 2785 2786 2787 2788 2789 2790 2791
    }
    ++current;
  }

  for (auto arg : arguments.parameters) {
    const Type* to_type = (current >= callable->signature().types().size())
                              ? TypeOracle::GetObjectType()
                              : callable->signature().types()[current++];
    AddCallParameter(callable, arg, to_type, &converted_arguments,
2792
                     &argument_range, &constexpr_arguments, inline_macro);
2793
  }
2794

2795
  size_t label_count = callable->signature().labels.size();
2796 2797
  if (label_count != arguments.labels.size()) {
    std::stringstream s;
2798 2799 2800
    s << "unexpected number of otherwise labels for "
      << callable->ReadableName() << " (expected "
      << std::to_string(label_count) << " found "
2801
      << std::to_string(arguments.labels.size()) << ")";
2802 2803
    ReportError(s.str());
  }
2804

2805
  if (callable->IsTransitioning()) {
2806
    if (!CurrentCallable::Get()->IsTransitioning()) {
2807
      std::stringstream s;
2808
      s << *CurrentCallable::Get()
2809 2810 2811 2812 2813 2814
        << " isn't marked transitioning but calls the transitioning "
        << *callable;
      ReportError(s.str());
    }
  }

2815
  if (auto* builtin = Builtin::DynamicCast(callable)) {
2816 2817 2818 2819
    base::Optional<Block*> catch_block = GetCatchBlock();
    assembler().Emit(CallBuiltinInstruction{
        is_tailcall, builtin, argument_range.Size(), catch_block});
    GenerateCatchBlock(catch_block);
2820 2821 2822 2823
    if (is_tailcall) {
      return VisitResult::NeverResult();
    } else {
      size_t slot_count = LoweredSlotCount(return_type);
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838
      if (builtin->IsStub()) {
        if (slot_count < 1 || slot_count > 2) {
          ReportError(
              "Builtin with stub linkage is expected to return one or two "
              "values but returns ",
              slot_count);
        }
      } else {
        if (slot_count != 1) {
          ReportError(
              "Builtin with JS linkage is expected to return one value but "
              "returns ",
              slot_count);
        }
      }
2839
      return VisitResult(return_type, assembler().TopRange(slot_count));
2840
    }
2841 2842 2843
  } else if (auto* macro = Macro::DynamicCast(callable)) {
    if (is_tailcall) {
      ReportError("can't tail call a macro");
2844
    }
2845

2846
    macro->SetUsed();
2847 2848 2849

    // If we're currently generating a C++ macro and it's calling another macro,
    // then we need to make sure that we also generate C++ code for the called
2850
    // macro within the same -inl.inc file.
2851 2852 2853
    if ((output_type_ == OutputType::kCC ||
         output_type_ == OutputType::kCCDebug) &&
        !inline_macro) {
2854
      if (auto* torque_macro = TorqueMacro::DynamicCast(macro)) {
2855 2856 2857
        auto* streams = CurrentFileStreams::Get();
        SourceId file = streams ? streams->file : SourceId::Invalid();
        GlobalContext::EnsureInCCOutputList(torque_macro, file);
2858 2859 2860
      }
    }

2861
    // TODO(torque-builder): Consider a function builder here.
2862 2863 2864
    if (return_type->IsConstexpr()) {
      DCHECK_EQ(0, arguments.labels.size());
      std::stringstream result;
2865
      result << "(";
2866
      bool first = true;
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890
      switch (output_type_) {
        case OutputType::kCSA: {
          if (auto* extern_macro = ExternMacro::DynamicCast(macro)) {
            result << extern_macro->external_assembler_name() << "(state_)."
                   << extern_macro->ExternalName() << "(";
          } else {
            result << macro->ExternalName() << "(state_";
            first = false;
          }
          break;
        }
        case OutputType::kCC: {
          auto* extern_macro = ExternMacro::DynamicCast(macro);
          CHECK_NOT_NULL(extern_macro);
          result << extern_macro->CCName() << "(";
          break;
        }
        case OutputType::kCCDebug: {
          auto* extern_macro = ExternMacro::DynamicCast(macro);
          CHECK_NOT_NULL(extern_macro);
          result << extern_macro->CCDebugName() << "(accessor";
          first = false;
          break;
        }
2891
      }
2892
      for (VisitResult arg : converted_arguments) {
2893 2894 2895 2896 2897 2898 2899 2900 2901
        DCHECK(!arg.IsOnStack());
        if (!first) {
          result << ", ";
        }
        first = false;
        result << arg.constexpr_value();
      }
      result << "))";
      return VisitResult(return_type, result.str());
2902
    } else if (inline_macro) {
2903 2904 2905 2906
      std::vector<Block*> label_blocks;
      for (Binding<LocalLabel>* label : arguments.labels) {
        label_blocks.push_back(label->block);
      }
2907 2908
      return InlineMacro(macro, this_reference, converted_arguments,
                         label_blocks);
2909 2910
    } else if (arguments.labels.empty() &&
               return_type != TypeOracle::GetNeverType()) {
2911 2912 2913 2914
      base::Optional<Block*> catch_block = GetCatchBlock();
      assembler().Emit(
          CallCsaMacroInstruction{macro, constexpr_arguments, catch_block});
      GenerateCatchBlock(catch_block);
2915 2916 2917 2918 2919 2920
      size_t return_slot_count = LoweredSlotCount(return_type);
      return VisitResult(return_type, assembler().TopRange(return_slot_count));
    } else {
      base::Optional<Block*> return_continuation;
      if (return_type != TypeOracle::GetNeverType()) {
        return_continuation = assembler().NewBlock();
2921 2922
      }

2923 2924 2925 2926 2927
      std::vector<Block*> label_blocks;

      for (size_t i = 0; i < label_count; ++i) {
        label_blocks.push_back(assembler().NewBlock());
      }
2928
      base::Optional<Block*> catch_block = GetCatchBlock();
2929
      assembler().Emit(CallCsaMacroAndBranchInstruction{
2930 2931 2932
          macro, constexpr_arguments, return_continuation, label_blocks,
          catch_block});
      GenerateCatchBlock(catch_block);
2933 2934

      for (size_t i = 0; i < label_count; ++i) {
2935
        Binding<LocalLabel>* label = arguments.labels[i];
2936 2937
        size_t callee_label_parameters =
            callable->signature().labels[i].types.size();
2938
        if (label->parameter_types.size() != callee_label_parameters) {
2939 2940 2941
          std::stringstream s;
          s << "label " << label->name()
            << " doesn't have the right number of parameters (found "
2942
            << std::to_string(label->parameter_types.size()) << " expected "
2943 2944 2945 2946 2947
            << std::to_string(callee_label_parameters) << ")";
          ReportError(s.str());
        }
        assembler().Bind(label_blocks[i]);
        assembler().Goto(
2948
            label->block,
2949 2950 2951 2952
            LowerParameterTypes(callable->signature().labels[i].types).size());

        size_t j = 0;
        for (auto t : callable->signature().labels[i].types) {
2953
          const Type* parameter_type = label->parameter_types[j];
2954 2955 2956 2957
          if (!t->IsSubtypeOf(parameter_type)) {
            ReportError("mismatch of label parameters (label expects ",
                        *parameter_type, " but macro produces ", *t,
                        " for parameter ", i + 1, ")");
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972
          }
          j++;
        }
      }

      if (return_continuation) {
        assembler().Bind(*return_continuation);
        size_t return_slot_count = LoweredSlotCount(return_type);
        return VisitResult(return_type,
                           assembler().TopRange(return_slot_count));
      } else {
        return VisitResult::NeverResult();
      }
    }
  } else if (auto* runtime_function = RuntimeFunction::DynamicCast(callable)) {
2973 2974 2975 2976
    base::Optional<Block*> catch_block = GetCatchBlock();
    assembler().Emit(CallRuntimeInstruction{
        is_tailcall, runtime_function, argument_range.Size(), catch_block});
    GenerateCatchBlock(catch_block);
2977
    if (is_tailcall || return_type == TypeOracle::GetNeverType()) {
2978 2979 2980 2981
      return VisitResult::NeverResult();
    } else {
      size_t slot_count = LoweredSlotCount(return_type);
      DCHECK_LE(slot_count, 1);
2982
      // TODO(turbofan): Actually, runtime functions have to return a value, so
2983 2984 2985
      // we should assert slot_count == 1 here.
      return VisitResult(return_type, assembler().TopRange(slot_count));
    }
2986
  } else if (auto* intrinsic = Intrinsic::DynamicCast(callable)) {
2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998
    if (intrinsic->ExternalName() == "%SizeOf") {
      if (specialization_types.size() != 1) {
        ReportError("%SizeOf must take a single type parameter");
      }
      const Type* type = specialization_types[0];
      std::string size_string;
      if (base::Optional<std::tuple<size_t, std::string>> size = SizeOf(type)) {
        size_string = std::get<1>(*size);
      } else {
        Error("size of ", *type, " is not known.");
      }
      return VisitResult(return_type, size_string);
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040
    } else if (intrinsic->ExternalName() == "%ClassHasMapConstant") {
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
        ReportError("%ClassHasMapConstant must take a class type parameter");
      }
      // If the class isn't actually used as the parameter to a TNode,
      // then we can't rely on the class existing in C++ or being of the same
      // type (e.g. it could be a template), so don't use the template CSA
      // machinery for accessing the class' map.
      if (class_type->name() != class_type->GetGeneratedTNodeTypeName()) {
        return VisitResult(return_type, std::string("false"));
      } else {
        return VisitResult(
            return_type,
            std::string("CodeStubAssembler(state_).ClassHasMapConstant<") +
                class_type->name() + ">()");
      }
    } else if (intrinsic->ExternalName() == "%MinInstanceType") {
      if (specialization_types.size() != 1) {
        ReportError("%MinInstanceType must take a single type parameter");
      }
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
        ReportError("%MinInstanceType must take a class type parameter");
      }
      std::pair<std::string, std::string> instance_types =
          GetClassInstanceTypeRange(class_type);
      return VisitResult(return_type, instance_types.first);
    } else if (intrinsic->ExternalName() == "%MaxInstanceType") {
      if (specialization_types.size() != 1) {
        ReportError("%MaxInstanceType must take a single type parameter");
      }
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
        ReportError("%MaxInstanceType must take a class type parameter");
      }
      std::pair<std::string, std::string> instance_types =
          GetClassInstanceTypeRange(class_type);
      return VisitResult(return_type, instance_types.second);
3041
    } else if (intrinsic->ExternalName() == "%RawConstexprCast") {
3042 3043 3044 3045 3046 3047
      if (intrinsic->signature().parameter_types.types.size() != 1 ||
          constexpr_arguments.size() != 1) {
        ReportError(
            "%RawConstexprCast must take a single parameter with constexpr "
            "type");
      }
3048 3049 3050 3051 3052 3053
      if (!return_type->IsConstexpr()) {
        std::stringstream s;
        s << *return_type
          << " return type for %RawConstexprCast is not constexpr";
        ReportError(s.str());
      }
3054 3055 3056 3057 3058
      std::stringstream result;
      result << "static_cast<" << return_type->GetGeneratedTypeName() << ">(";
      result << constexpr_arguments[0];
      result << ")";
      return VisitResult(return_type, result.str());
3059 3060 3061 3062 3063 3064 3065 3066 3067
    } else if (intrinsic->ExternalName() == "%IndexedFieldLength") {
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
        ReportError("%IndexedFieldLength must take a class type parameter");
      }
      const Field& field =
          class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
      return GenerateArrayLength(VisitResult(type, argument_range), field);
3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084
    } else if (intrinsic->ExternalName() == "%MakeLazy") {
      if (specialization_types[0]->IsStructType()) {
        ReportError("%MakeLazy can't use macros that return structs");
      }
      std::string getter_name = StringLiteralUnquote(constexpr_arguments[0]);

      // Normally the parser would split namespace names for us, but we
      // sidestepped it by putting the macro name in a string literal.
      QualifiedName qualified_getter_name = QualifiedName::Parse(getter_name);

      // converted_arguments contains all of the arguments to %MakeLazy. We're
      // looking for a function that takes all but the first.
      Arguments arguments_to_getter;
      arguments_to_getter.parameters.insert(
          arguments_to_getter.parameters.begin(),
          converted_arguments.begin() + 1, converted_arguments.end());

3085
      Callable* callable_macro = LookupCallable(
3086 3087
          qualified_getter_name, Declarations::Lookup(qualified_getter_name),
          arguments_to_getter, {});
3088
      Macro* getter = Macro::DynamicCast(callable_macro);
3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114
      if (!getter || getter->IsMethod()) {
        ReportError(
            "%MakeLazy expects a macro, not builtin or other type of callable");
      }
      if (!getter->signature().labels.empty()) {
        ReportError("%MakeLazy requires a macro with no labels");
      }
      if (!getter->signature().return_type->IsSubtypeOf(
              specialization_types[0])) {
        ReportError("%MakeLazy expected return type ", *specialization_types[0],
                    " but found ", *getter->signature().return_type);
      }
      if (getter->signature().implicit_count > 0) {
        ReportError("Implicit parameters are not yet supported in %MakeLazy");
      }

      getter->SetUsed();  // Prevent warnings about unused macros.

      // Now that we've looked up the getter macro, we have to convert the
      // arguments again, so that, for example, constexpr arguments can be
      // coerced to non-constexpr types and put on the stack.

      std::vector<VisitResult> converted_arguments_for_getter;
      StackRange argument_range_for_getter = assembler().TopRange(0);
      std::vector<std::string> constexpr_arguments_for_getter;

3115
      size_t arg_count = 0;
3116
      for (auto arg : arguments_to_getter.parameters) {
3117 3118
        DCHECK_LT(arg_count, getter->signature().types().size());
        const Type* to_type = getter->signature().types()[arg_count++];
3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129
        AddCallParameter(getter, arg, to_type, &converted_arguments_for_getter,
                         &argument_range_for_getter,
                         &constexpr_arguments_for_getter,
                         /*inline_macro=*/false);
      }

      // Now that the arguments are prepared, emit the instruction that consumes
      // them.
      assembler().Emit(MakeLazyNodeInstruction{getter, return_type,
                                               constexpr_arguments_for_getter});
      return VisitResult(return_type, assembler().TopRange(1));
3130 3131 3132 3133
    } else if (intrinsic->ExternalName() == "%FieldSlice") {
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
3134
        ReportError("The first type parameter to %FieldSlice must be a class");
3135 3136 3137
      }
      const Field& field =
          class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
3138 3139 3140 3141 3142 3143 3144 3145 3146 3147
      const Type* expected_slice_type =
          field.const_qualified
              ? TypeOracle::GetConstSliceType(field.name_and_type.type)
              : TypeOracle::GetMutableSliceType(field.name_and_type.type);
      const Type* declared_slice_type = specialization_types[1];
      if (expected_slice_type != declared_slice_type) {
        Error(
            "The second type parameter to %FieldSlice must be the precise "
            "slice type for the named field");
      }
3148 3149 3150 3151 3152 3153 3154
      LocationReference ref = GenerateFieldReference(
          VisitResult(type, argument_range), field, class_type,
          /*treat_optional_as_indexed=*/true);
      if (!ref.IsHeapSlice()) {
        ReportError("%FieldSlice expected an indexed or optional field");
      }
      return ref.heap_slice();
3155
    } else {
3156 3157
      assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
                                                constexpr_arguments});
3158 3159 3160 3161
      size_t return_slot_count =
          LoweredSlotCount(intrinsic->signature().return_type);
      return VisitResult(return_type, assembler().TopRange(return_slot_count));
    }
3162 3163
  } else {
    UNREACHABLE();
3164 3165 3166
  }
}

3167 3168 3169 3170
VisitResult ImplementationVisitor::GenerateCall(
    const QualifiedName& callable_name, Arguments arguments,
    const TypeVector& specialization_types, bool is_tailcall) {
  Callable* callable =
3171 3172
      LookupCallable(callable_name, Declarations::Lookup(callable_name),
                     arguments, specialization_types);
3173 3174 3175 3176
  return GenerateCall(callable, base::nullopt, arguments, specialization_types,
                      is_tailcall);
}

3177 3178
VisitResult ImplementationVisitor::Visit(CallExpression* expr,
                                         bool is_tailcall) {
3179
  StackScope scope(this);
3180 3181 3182 3183 3184

  if (expr->callee->name->value == "&" && expr->arguments.size() == 1) {
    if (auto* loc_expr = LocationExpression::DynamicCast(expr->arguments[0])) {
      LocationReference ref = GetLocationReference(loc_expr);
      if (ref.IsHeapReference()) return scope.Yield(ref.heap_reference());
3185
      if (ref.IsHeapSlice()) return scope.Yield(ref.heap_slice());
3186 3187 3188 3189
    }
    ReportError("Unable to create a heap reference.");
  }

3190
  Arguments arguments;
3191 3192
  QualifiedName name = QualifiedName(expr->callee->namespace_qualification,
                                     expr->callee->name->value);
3193
  TypeVector specialization_types =
3194
      TypeVisitor::ComputeTypeVector(expr->callee->generic_arguments);
3195 3196 3197 3198
  bool has_template_arguments = !specialization_types.empty();
  for (Expression* arg : expr->arguments)
    arguments.parameters.push_back(Visit(arg));
  arguments.labels = LabelsFromIdentifiers(expr->labels);
3199 3200
  if (!has_template_arguments && name.namespace_qualification.empty() &&
      TryLookupLocalValue(name.name)) {
3201
    return scope.Yield(
3202
        GeneratePointerCall(expr->callee, arguments, is_tailcall));
3203
  } else {
3204 3205 3206 3207
    if (GlobalContext::collect_language_server_data()) {
      Callable* callable = LookupCallable(name, Declarations::Lookup(name),
                                          arguments, specialization_types);
      LanguageServerData::AddDefinition(expr->callee->name->pos,
3208
                                        callable->IdentifierPosition());
3209
    }
3210 3211 3212 3213 3214 3215
    if (GlobalContext::collect_kythe_data()) {
      Callable* callable = LookupCallable(name, Declarations::Lookup(name),
                                          arguments, specialization_types);
      Callable* caller = CurrentCallable::Get();
      KytheData::AddCall(caller, expr->callee->name->pos, callable);
    }
3216 3217 3218 3219 3220 3221 3222 3223 3224 3225
    if (expr->callee->name->value == "!" && arguments.parameters.size() == 1) {
      PropagateBitfieldMark(expr->arguments[0], expr);
    }
    if (expr->callee->name->value == "==" && arguments.parameters.size() == 2) {
      if (arguments.parameters[0].type()->IsConstexpr()) {
        PropagateBitfieldMark(expr->arguments[1], expr);
      } else if (arguments.parameters[1].type()->IsConstexpr()) {
        PropagateBitfieldMark(expr->arguments[0], expr);
      }
    }
3226 3227
    return scope.Yield(
        GenerateCall(name, arguments, specialization_types, is_tailcall));
3228 3229 3230
  }
}

3231 3232 3233
VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
  StackScope scope(this);
  Arguments arguments;
3234
  std::string method_name = expr->method->name->value;
3235
  TypeVector specialization_types =
3236
      TypeVisitor::ComputeTypeVector(expr->method->generic_arguments);
3237 3238 3239
  LocationReference target = GetLocationReference(expr->target);
  if (!target.IsVariableAccess()) {
    VisitResult result = GenerateFetchFromLocation(target);
3240
    target = LocationReference::Temporary(result, "this parameter");
3241 3242
  }
  const AggregateType* target_type =
3243
      (*target.ReferencedType())->AggregateSupertype().value_or(nullptr);
3244 3245 3246 3247
  if (!target_type) {
    ReportError("target of method call not a struct or class type");
  }
  for (Expression* arg : expr->arguments) {
3248
    arguments.parameters.push_back(Visit(arg));
3249
  }
3250
  arguments.labels = LabelsFromIdentifiers(expr->labels);
3251
  TypeVector argument_types = arguments.parameters.ComputeTypeVector();
3252 3253
  DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
  QualifiedName qualified_name = QualifiedName(method_name);
3254
  Callable* callable = LookupMethod(method_name, target_type, arguments, {});
3255 3256 3257 3258
  if (GlobalContext::collect_language_server_data()) {
    LanguageServerData::AddDefinition(expr->method->name->pos,
                                      callable->IdentifierPosition());
  }
3259 3260 3261 3262
  if (GlobalContext::collect_kythe_data()) {
    Callable* caller = CurrentCallable::Get();
    KytheData::AddCall(caller, expr->method->name->pos, callable);
  }
3263 3264 3265
  return scope.Yield(GenerateCall(callable, target, arguments, {}, false));
}

3266 3267 3268
VisitResult ImplementationVisitor::Visit(IntrinsicCallExpression* expr) {
  StackScope scope(this);
  Arguments arguments;
3269 3270
  TypeVector specialization_types =
      TypeVisitor::ComputeTypeVector(expr->generic_arguments);
3271 3272 3273
  for (Expression* arg : expr->arguments)
    arguments.parameters.push_back(Visit(arg));
  return scope.Yield(
3274
      GenerateCall(expr->name->value, arguments, specialization_types, false));
3275 3276
}

3277
void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
3278 3279
                                           Block* true_block,
                                           Block* false_block) {
3280 3281
  DCHECK_EQ(condition,
            VisitResult(TypeOracle::GetBoolType(), assembler().TopRange(1)));
3282 3283 3284
  assembler().Branch(true_block, false_block);
}

3285 3286 3287 3288
VisitResult ImplementationVisitor::GenerateBoolConstant(bool constant) {
  return GenerateImplicitConvert(TypeOracle::GetBoolType(),
                                 VisitResult(TypeOracle::GetConstexprBoolType(),
                                             constant ? "true" : "false"));
3289 3290
}

3291 3292 3293
void ImplementationVisitor::GenerateExpressionBranch(Expression* expression,
                                                     Block* true_block,
                                                     Block* false_block) {
3294 3295 3296 3297 3298
  StackScope stack_scope(this);
  VisitResult expression_result = this->Visit(expression);
  expression_result = stack_scope.Yield(
      GenerateImplicitConvert(TypeOracle::GetBoolType(), expression_result));
  GenerateBranch(expression_result, true_block, false_block);
3299 3300
}

3301
VisitResult ImplementationVisitor::GenerateImplicitConvert(
3302
    const Type* destination_type, VisitResult source) {
3303
  StackScope scope(this);
3304 3305 3306 3307
  if (source.type() == TypeOracle::GetNeverType()) {
    ReportError("it is not allowed to use a value of type never");
  }

3308
  if (destination_type == source.type()) {
3309
    return scope.Yield(GenerateCopy(source));
3310
  }
3311

3312 3313
  if (auto from = TypeOracle::ImplicitlyConvertableFrom(destination_type,
                                                        source.type())) {
3314 3315
    return scope.Yield(GenerateCall(kFromConstexprMacroName,
                                    Arguments{{source}, {}},
3316
                                    {destination_type, *from}, false));
3317
  } else if (IsAssignableFrom(destination_type, source.type())) {
3318
    source.SetType(destination_type);
3319
    return scope.Yield(GenerateCopy(source));
3320 3321
  } else {
    std::stringstream s;
3322 3323 3324 3325 3326 3327 3328
    if (const TopType* top_type = TopType::DynamicCast(source.type())) {
      s << "undefined expression of type " << *destination_type << ": the "
        << top_type->reason();
    } else {
      s << "cannot use expression of type " << *source.type()
        << " as a value of type " << *destination_type;
    }
3329 3330 3331 3332
    ReportError(s.str());
  }
}

3333
StackRange ImplementationVisitor::GenerateLabelGoto(
3334 3335
    LocalLabel* label, base::Optional<StackRange> arguments) {
  return assembler().Goto(label->block, arguments ? arguments->Size() : 0);
3336 3337
}

3338
std::vector<Binding<LocalLabel>*> ImplementationVisitor::LabelsFromIdentifiers(
3339
    const std::vector<Identifier*>& names) {
3340
  std::vector<Binding<LocalLabel>*> result;
3341
  result.reserve(names.size());
3342
  for (const auto& name : names) {
3343 3344 3345 3346 3347 3348 3349 3350 3351 3352
    Binding<LocalLabel>* label = LookupLabel(name->value);
    result.push_back(label);

    // Link up labels in "otherwise" part of the call expression with
    // either the label in the signature of the calling macro or the label
    // block ofa surrounding "try".
    if (GlobalContext::collect_language_server_data()) {
      LanguageServerData::AddDefinition(name->pos,
                                        label->declaration_position());
    }
3353
    // TODO(v8:12261): Might have to track KytheData here.
3354 3355 3356 3357
  }
  return result;
}

3358 3359 3360
StackRange ImplementationVisitor::LowerParameter(
    const Type* type, const std::string& parameter_name,
    Stack<std::string>* lowered_parameters) {
3361
  if (base::Optional<const StructType*> struct_type = type->StructSupertype()) {
3362
    StackRange range = lowered_parameters->TopRange(0);
3363
    for (auto& field : (*struct_type)->fields()) {
3364
      StackRange parameter_range = LowerParameter(
3365 3366
          field.name_and_type.type,
          parameter_name + "." + field.name_and_type.name, lowered_parameters);
3367 3368 3369 3370 3371 3372 3373 3374 3375
      range.Extend(parameter_range);
    }
    return range;
  } else {
    lowered_parameters->Push(parameter_name);
    return lowered_parameters->TopRange(1);
  }
}

3376 3377 3378
void ImplementationVisitor::LowerLabelParameter(
    const Type* type, const std::string& parameter_name,
    std::vector<std::string>* lowered_parameters) {
3379 3380
  if (base::Optional<const StructType*> struct_type = type->StructSupertype()) {
    for (auto& field : (*struct_type)->fields()) {
3381 3382 3383 3384 3385 3386 3387 3388 3389 3390
      LowerLabelParameter(
          field.name_and_type.type,
          "&((*" + parameter_name + ")." + field.name_and_type.name + ")",
          lowered_parameters);
    }
  } else {
    lowered_parameters->push_back(parameter_name);
  }
}

3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405
std::string ImplementationVisitor::ExternalLabelName(
    const std::string& label_name) {
  return "label_" + label_name;
}

std::string ImplementationVisitor::ExternalLabelParameterName(
    const std::string& label_name, size_t i) {
  return "label_" + label_name + "_parameter_" + std::to_string(i);
}

std::string ImplementationVisitor::ExternalParameterName(
    const std::string& name) {
  return std::string("p_") + name;
}

3406 3407 3408
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::ValueBindingsManager)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentCallable)
3409
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentFileStreams)
3410
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentReturnValue)
3411 3412

bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
3413
                           size_t label_count) {
3414 3415 3416
  auto i = sig.parameter_types.types.begin() + sig.implicit_count;
  if ((sig.parameter_types.types.size() - sig.implicit_count) > types.size())
    return false;
3417
  if (sig.labels.size() != label_count) return false;
3418 3419 3420 3421 3422 3423 3424 3425 3426
  for (auto current : types) {
    if (i == sig.parameter_types.types.end()) {
      if (!sig.parameter_types.var_args) return false;
      if (!IsAssignableFrom(TypeOracle::GetObjectType(), current)) return false;
    } else {
      if (!IsAssignableFrom(*i++, current)) return false;
    }
  }
  return true;
3427 3428
}

3429 3430 3431
base::Optional<Block*> ImplementationVisitor::GetCatchBlock() {
  base::Optional<Block*> catch_block;
  if (base::Optional<Binding<LocalLabel>*> catch_handler =
3432
          TryLookupLabel(kCatchLabelName)) {
3433 3434 3435 3436 3437 3438 3439 3440 3441
    catch_block = assembler().NewBlock(base::nullopt, true);
  }
  return catch_block;
}

void ImplementationVisitor::GenerateCatchBlock(
    base::Optional<Block*> catch_block) {
  if (catch_block) {
    base::Optional<Binding<LocalLabel>*> catch_handler =
3442
        TryLookupLabel(kCatchLabelName);
3443 3444 3445
    // Reset the local scopes to prevent the macro calls below from using the
    // current catch handler.
    BindingsManagersScope bindings_managers_scope;
3446 3447
    if (assembler().CurrentBlockIsComplete()) {
      assembler().Bind(*catch_block);
3448 3449 3450 3451
      GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                                 "GetAndResetPendingMessage"),
                   Arguments{{}, {}}, {}, false);
      assembler().Goto((*catch_handler)->block, 2);
3452 3453
    } else {
      CfgAssemblerScopedTemporaryBlock temp(&assembler(), *catch_block);
3454 3455 3456 3457
      GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                                 "GetAndResetPendingMessage"),
                   Arguments{{}, {}}, {}, false);
      assembler().Goto((*catch_handler)->block, 2);
3458 3459 3460
    }
  }
}
3461
void ImplementationVisitor::VisitAllDeclarables() {
3462
  CurrentCallable::Scope current_callable(nullptr);
3463 3464
  const std::vector<std::unique_ptr<Declarable>>& all_declarables =
      GlobalContext::AllDeclarables();
3465

3466 3467 3468
  // This has to be an index-based loop because all_declarables can be extended
  // during the loop.
  for (size_t i = 0; i < all_declarables.size(); ++i) {
3469 3470 3471 3472 3473
    try {
      Visit(all_declarables[i].get());
    } catch (TorqueAbortCompilation&) {
      // Recover from compile errors here. The error is recorded already.
    }
3474
  }
3475 3476 3477

  // Do the same for macros which generate C++ code.
  output_type_ = OutputType::kCC;
3478
  const std::vector<std::pair<TorqueMacro*, SourceId>>& cc_macros =
3479 3480 3481
      GlobalContext::AllMacrosForCCOutput();
  for (size_t i = 0; i < cc_macros.size(); ++i) {
    try {
3482
      Visit(static_cast<Declarable*>(cc_macros[i].first), cc_macros[i].second);
3483 3484 3485 3486
    } catch (TorqueAbortCompilation&) {
      // Recover from compile errors here. The error is recorded already.
    }
  }
3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497

  // Do the same for macros which generate C++ debug code.
  // The set of macros is the same as C++ macros.
  output_type_ = OutputType::kCCDebug;
  for (size_t i = 0; i < cc_macros.size(); ++i) {
    try {
      Visit(static_cast<Declarable*>(cc_macros[i].first), cc_macros[i].second);
    } catch (TorqueAbortCompilation&) {
      // Recover from compile errors here. The error is recorded already.
    }
  }
3498
  output_type_ = OutputType::kCSA;
3499 3500
}

3501 3502
void ImplementationVisitor::Visit(Declarable* declarable,
                                  base::Optional<SourceId> file) {
3503
  CurrentScope::Scope current_scope(declarable->ParentScope());
3504
  CurrentSourcePosition::Scope current_source_position(declarable->Position());
3505
  CurrentFileStreams::Scope current_file_streams(
3506 3507
      &GlobalContext::GeneratedPerFile(file ? *file
                                            : declarable->Position().source));
3508
  if (Callable* callable = Callable::DynamicCast(declarable)) {
3509
    if (!callable->ShouldGenerateExternalCode(output_type_))
3510 3511
      CurrentFileStreams::Get() = nullptr;
  }
3512
  switch (declarable->kind()) {
3513 3514 3515 3516
    case Declarable::kExternMacro:
      return Visit(ExternMacro::cast(declarable));
    case Declarable::kTorqueMacro:
      return Visit(TorqueMacro::cast(declarable));
3517 3518
    case Declarable::kMethod:
      return Visit(Method::cast(declarable));
3519 3520 3521 3522
    case Declarable::kBuiltin:
      return Visit(Builtin::cast(declarable));
    case Declarable::kTypeAlias:
      return Visit(TypeAlias::cast(declarable));
3523 3524
    case Declarable::kNamespaceConstant:
      return Visit(NamespaceConstant::cast(declarable));
3525
    case Declarable::kRuntimeFunction:
3526
    case Declarable::kIntrinsic:
3527
    case Declarable::kExternConstant:
3528
    case Declarable::kNamespace:
3529 3530
    case Declarable::kGenericCallable:
    case Declarable::kGenericType:
3531 3532 3533 3534
      return;
  }
}

3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548
std::string MachineTypeString(const Type* type) {
  if (type->IsSubtypeOf(TypeOracle::GetSmiType())) {
    return "MachineType::TaggedSigned()";
  }
  if (type->IsSubtypeOf(TypeOracle::GetHeapObjectType())) {
    return "MachineType::TaggedPointer()";
  }
  if (type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
    return "MachineType::AnyTagged()";
  }
  return "MachineTypeOf<" + type->GetGeneratedTNodeTypeName() + ">::value";
}

void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
3549
    const std::string& output_directory) {
3550
  std::stringstream builtin_definitions;
3551
  std::string builtin_definitions_file_name = "builtin-definitions.h";
3552 3553 3554 3555 3556

  // This file contains plain interface descriptor definitions and has to be
  // included in the middle of interface-descriptors.h. Thus it is not a normal
  // header file and uses the .inc suffix instead of the .h suffix.
  std::stringstream interface_descriptors;
3557
  std::string interface_descriptors_file_name = "interface-descriptors.inc";
3558
  {
3559 3560 3561 3562
    IncludeGuardScope builtin_definitions_include_guard(
        builtin_definitions, builtin_definitions_file_name);

    builtin_definitions
3563
        << "\n"
3564
           "#define BUILTIN_LIST_FROM_TORQUE(CPP, TFJ, TFC, TFS, TFH, "
3565 3566 3567 3568 3569 3570
           "ASM) "
           "\\\n";
    for (auto& declarable : GlobalContext::AllDeclarables()) {
      Builtin* builtin = Builtin::DynamicCast(declarable.get());
      if (!builtin || builtin->IsExternal()) continue;
      if (builtin->IsStub()) {
3571 3572 3573
        builtin_definitions << "TFC(" << builtin->ExternalName() << ", "
                            << builtin->ExternalName();
        std::string descriptor_name = builtin->ExternalName() + "Descriptor";
3574 3575
        bool has_context_parameter = builtin->signature().HasContextParameter();
        size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
3576
        TypeVector return_types = LowerType(builtin->signature().return_type);
3577

3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597
        interface_descriptors << "class " << descriptor_name
                              << " : public StaticCallInterfaceDescriptor<"
                              << descriptor_name << "> {\n";

        interface_descriptors << " public:\n";

        if (has_context_parameter) {
          interface_descriptors << "  DEFINE_RESULT_AND_PARAMETERS(";
        } else {
          interface_descriptors << "  DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(";
        }
        interface_descriptors << return_types.size();
        for (size_t i = kFirstNonContextParameter;
             i < builtin->parameter_names().size(); ++i) {
          Identifier* parameter = builtin->parameter_names()[i];
          interface_descriptors << ", k" << CamelifyString(parameter->value);
        }
        interface_descriptors << ")\n";

        interface_descriptors << "  DEFINE_RESULT_AND_PARAMETER_TYPES(";
3598 3599
        PrintCommaSeparatedList(interface_descriptors, return_types,
                                MachineTypeString);
3600 3601 3602
        for (size_t i = kFirstNonContextParameter;
             i < builtin->parameter_names().size(); ++i) {
          const Type* type = builtin->signature().parameter_types.types[i];
3603
          interface_descriptors << ", " << MachineTypeString(type);
3604
        }
3605 3606 3607 3608
        interface_descriptors << ")\n";

        interface_descriptors << "  DECLARE_DEFAULT_DESCRIPTOR("
                              << descriptor_name << ")\n";
3609
        interface_descriptors << "};\n\n";
3610
      } else {
3611
        builtin_definitions << "TFJ(" << builtin->ExternalName();
3612
        if (builtin->IsVarArgsJavaScript()) {
3613
          builtin_definitions << ", kDontAdaptArgumentsSentinel";
3614
        } else {
3615
          DCHECK(builtin->IsFixedArgsJavaScript());
3616 3617
          // FixedArg javascript builtins need to offer the parameter
          // count.
3618 3619
          int parameter_count =
              static_cast<int>(builtin->signature().ExplicitCount());
3620 3621
          builtin_definitions << ", JSParameterCount(" << parameter_count
                              << ")";
3622
          // And the receiver is explicitly declared.
3623 3624 3625 3626 3627 3628
          builtin_definitions << ", kReceiver";
          for (size_t i = builtin->signature().implicit_count;
               i < builtin->parameter_names().size(); ++i) {
            Identifier* parameter = builtin->parameter_names()[i];
            builtin_definitions << ", k" << CamelifyString(parameter->value);
          }
3629 3630
        }
      }
3631
      builtin_definitions << ") \\\n";
3632
    }
3633
    builtin_definitions << "\n";
3634

3635
    builtin_definitions
3636 3637 3638 3639 3640 3641 3642
        << "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n";
    for (const BuiltinPointerType* type :
         TypeOracle::AllBuiltinPointerTypes()) {
      Builtin* example_builtin =
          Declarations::FindSomeInternalBuiltinWithType(type);
      if (!example_builtin) {
        CurrentSourcePosition::Scope current_source_position(
3643 3644
            SourcePosition{CurrentSourceFile::Get(), LineAndColumn::Invalid(),
                           LineAndColumn::Invalid()});
3645 3646
        ReportError("unable to find any builtin with type \"", *type, "\"");
      }
3647
      builtin_definitions << "  V(" << type->function_pointer_type_id() << ","
3648
                          << example_builtin->ExternalName() << ")\\\n";
3649
    }
3650
    builtin_definitions << "\n";
3651
  }
3652 3653 3654 3655
  WriteFile(output_directory + "/" + builtin_definitions_file_name,
            builtin_definitions.str());
  WriteFile(output_directory + "/" + interface_descriptors_file_name,
            interface_descriptors.str());
3656 3657
}

3658 3659
namespace {

3660
enum class FieldSectionType : uint32_t {
3661
  kNoSection = 0,
3662 3663 3664
  kWeakSection = 1 << 0,
  kStrongSection = 2 << 0,
  kScalarSection = 3 << 0
3665 3666
};

3667 3668 3669
bool IsPointerSection(FieldSectionType type) {
  return type == FieldSectionType::kWeakSection ||
         type == FieldSectionType::kStrongSection;
3670 3671
}

3672 3673
using FieldSections = base::Flags<FieldSectionType>;

3674
std::string ToString(FieldSectionType type) {
3675 3676
  switch (type) {
    case FieldSectionType::kNoSection:
3677
      return "NoSection";
3678
    case FieldSectionType::kWeakSection:
3679
      return "WeakFields";
3680
    case FieldSectionType::kStrongSection:
3681
      return "StrongFields";
3682
    case FieldSectionType::kScalarSection:
3683
      return "ScalarFields";
3684
  }
3685
  UNREACHABLE();
3686 3687
}

3688 3689
class FieldOffsetsGenerator {
 public:
3690
  explicit FieldOffsetsGenerator(const ClassType* type) : type_(type) {}
3691

3692
  virtual void WriteField(const Field& f, const std::string& size_string) = 0;
3693
  virtual void WriteFieldOffsetGetter(const Field& f) = 0;
3694 3695 3696
  virtual void WriteMarker(const std::string& marker) = 0;

  virtual ~FieldOffsetsGenerator() { CHECK(is_finished_); }
3697 3698 3699 3700

  void RecordOffsetFor(const Field& f) {
    CHECK(!is_finished_);
    UpdateSection(f);
3701 3702

    // Emit kHeaderSize before any indexed field.
3703
    if (f.index.has_value() && !header_size_emitted_) {
3704
      WriteMarker("kHeaderSize");
3705
      header_size_emitted_ = true;
3706 3707
    }

3708 3709 3710 3711 3712 3713 3714
    // We don't know statically how much space an indexed field takes, so report
    // it as zero.
    std::string size_string = "0";
    if (!f.index.has_value()) {
      size_t field_size;
      std::tie(field_size, size_string) = f.GetFieldSizeInformation();
    }
3715 3716 3717 3718 3719
    if (f.offset.has_value()) {
      WriteField(f, size_string);
    } else {
      WriteFieldOffsetGetter(f);
    }
3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732
  }

  void Finish() {
    End(current_section_);
    if (!(completed_sections_ & FieldSectionType::kWeakSection)) {
      Begin(FieldSectionType::kWeakSection);
      End(FieldSectionType::kWeakSection);
    }
    if (!(completed_sections_ & FieldSectionType::kStrongSection)) {
      Begin(FieldSectionType::kStrongSection);
      End(FieldSectionType::kStrongSection);
    }
    is_finished_ = true;
3733 3734 3735

    // In the presence of indexed fields, we already emitted kHeaderSize before
    // the indexed field.
3736
    if (!type_->IsShape() && !header_size_emitted_) {
3737
      WriteMarker("kHeaderSize");
3738
    }
3739
    if (!type_->IsAbstract() && type_->HasStaticSize()) {
3740
      WriteMarker("kSize");
3741 3742 3743
    }
  }

3744 3745 3746
 protected:
  const ClassType* type_;

3747 3748
 private:
  FieldSectionType GetSectionFor(const Field& f) {
3749 3750
    const Type* field_type = f.name_and_type.type;
    if (field_type == TypeOracle::GetVoidType()) {
3751 3752 3753
      // Allow void type for marker constants of size zero.
      return current_section_;
    }
3754 3755
    StructType::Classification struct_contents =
        StructType::ClassificationFlag::kEmpty;
3756 3757
    if (auto field_as_struct = field_type->StructSupertype()) {
      struct_contents = (*field_as_struct)->ClassifyContents();
3758
    }
3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770
    if ((struct_contents & StructType::ClassificationFlag::kStrongTagged) &&
        (struct_contents & StructType::ClassificationFlag::kWeakTagged)) {
      // It's okay for a struct to contain both strong and weak data. We'll just
      // treat the whole thing as weak. This is required for DescriptorEntry.
      struct_contents &= ~StructType::Classification(
          StructType::ClassificationFlag::kStrongTagged);
    }
    bool struct_contains_tagged_fields =
        (struct_contents & StructType::ClassificationFlag::kStrongTagged) ||
        (struct_contents & StructType::ClassificationFlag::kWeakTagged);
    if (struct_contains_tagged_fields &&
        (struct_contents & StructType::ClassificationFlag::kUntagged)) {
3771 3772 3773 3774 3775 3776 3777
      // We can't declare what section a struct goes in if it has multiple
      // categories of data within.
      Error(
          "Classes do not support fields which are structs containing both "
          "tagged and untagged data.")
          .Position(f.pos);
    }
3778 3779 3780 3781 3782 3783 3784
    if ((field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType()) ||
         struct_contents == StructType::ClassificationFlag::kStrongTagged) &&
        !f.custom_weak_marking) {
      return FieldSectionType::kStrongSection;
    } else if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
               struct_contains_tagged_fields) {
      return FieldSectionType::kWeakSection;
3785 3786
    } else {
      return FieldSectionType::kScalarSection;
3787 3788
    }
  }
3789 3790 3791 3792 3793 3794 3795
  void UpdateSection(const Field& f) {
    FieldSectionType type = GetSectionFor(f);
    if (current_section_ == type) return;
    if (IsPointerSection(type)) {
      if (completed_sections_ & type) {
        std::stringstream s;
        s << "cannot declare field " << f.name_and_type.name << " in class "
3796
          << type_->name() << ", because section " << ToString(type)
3797 3798 3799
          << " to which it belongs has already been finished.";
        Error(s.str()).Position(f.pos);
      }
3800
    }
3801 3802 3803
    End(current_section_);
    current_section_ = type;
    Begin(current_section_);
3804
  }
3805 3806
  void Begin(FieldSectionType type) {
    DCHECK(type != FieldSectionType::kNoSection);
3807 3808
    if (!IsPointerSection(type)) return;
    WriteMarker("kStartOf" + ToString(type) + "Offset");
3809 3810 3811 3812
  }
  void End(FieldSectionType type) {
    if (!IsPointerSection(type)) return;
    completed_sections_ |= type;
3813
    WriteMarker("kEndOf" + ToString(type) + "Offset");
3814 3815 3816 3817 3818
  }

  FieldSectionType current_section_ = FieldSectionType::kNoSection;
  FieldSections completed_sections_ = FieldSectionType::kNoSection;
  bool is_finished_ = false;
3819
  bool header_size_emitted_ = false;
3820 3821
};

3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
void GenerateClassExport(const ClassType* type, std::ostream& header,
                         std::ostream& inl_header) {
  const ClassType* super = type->GetSuperClass();
  std::string parent = "TorqueGenerated" + type->name() + "<" + type->name() +
                       ", " + super->name() + ">";
  header << "class " << type->name() << " : public " << parent << " {\n";
  header << " public:\n";
  if (type->ShouldGenerateBodyDescriptor()) {
    header << "  class BodyDescriptor;\n";
  }
  header << "  TQ_OBJECT_CONSTRUCTORS(" << type->name() << ")\n";
  header << "};\n\n";
  inl_header << "TQ_OBJECT_CONSTRUCTORS_IMPL(" << type->name() << ")\n";
3835 3836
}

3837 3838
}  // namespace

3839
void ImplementationVisitor::GenerateVisitorLists(
3840 3841
    const std::string& output_directory) {
  std::stringstream header;
3842
  std::string file_name = "visitor-lists.h";
3843 3844 3845
  {
    IncludeGuardScope include_guard(header, file_name);

3846
    header << "#define TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(V)\\\n";
3847
    for (const ClassType* type : TypeOracle::GetClasses()) {
3848
      if (type->ShouldGenerateBodyDescriptor() && type->OwnInstanceType()) {
3849 3850
        std::string type_name =
            CapifyStringWithUnderscores(type->name()) + "_TYPE";
3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867
        header << "V(" << type_name << "," << type->name() << ")\\\n";
      }
    }
    header << "\n";

    header << "#define TORQUE_DATA_ONLY_VISITOR_ID_LIST(V)\\\n";
    for (const ClassType* type : TypeOracle::GetClasses()) {
      if (type->ShouldGenerateBodyDescriptor() && type->HasNoPointerSlots()) {
        header << "V(" << type->name() << ")\\\n";
      }
    }
    header << "\n";

    header << "#define TORQUE_POINTER_VISITOR_ID_LIST(V)\\\n";
    for (const ClassType* type : TypeOracle::GetClasses()) {
      if (type->ShouldGenerateBodyDescriptor() && !type->HasNoPointerSlots()) {
        header << "V(" << type->name() << ")\\\n";
3868 3869 3870
      }
    }
    header << "\n";
3871
  }
3872 3873
  const std::string output_header_path = output_directory + "/" + file_name;
  WriteFile(output_header_path, header.str());
3874 3875
}

3876 3877 3878
void ImplementationVisitor::GenerateBitFields(
    const std::string& output_directory) {
  std::stringstream header;
3879
  std::string file_name = "bit-fields.h";
3880 3881 3882 3883 3884 3885
  {
    IncludeGuardScope include_guard(header, file_name);
    header << "#include \"src/base/bit-field.h\"\n\n";
    NamespaceScope namespaces(header, {"v8", "internal"});

    for (const auto& type : TypeOracle::GetBitFieldStructTypes()) {
3886 3887
      bool all_single_bits = true;  // Track whether every field is one bit.

3888 3889
      header << "#define DEFINE_TORQUE_GENERATED_"
             << CapifyStringWithUnderscores(type->name()) << "() \\\n";
3890 3891 3892
      std::string type_name = type->GetConstexprGeneratedTypeName();
      for (const auto& field : type->fields()) {
        const char* suffix = field.num_bits == 1 ? "Bit" : "Bits";
3893
        all_single_bits = all_single_bits && field.num_bits == 1;
3894 3895 3896 3897 3898
        std::string field_type_name =
            field.name_and_type.type->GetConstexprGeneratedTypeName();
        header << "  using " << CamelifyString(field.name_and_type.name)
               << suffix << " = base::BitField<" << field_type_name << ", "
               << field.offset << ", " << field.num_bits << ", " << type_name
3899
               << ">; \\\n";
3900
      }
3901

3902 3903
      // If every field is one bit, we can also generate a convenient enum.
      if (all_single_bits) {
3904
        header << "  enum Flag: " << type_name << " { \\\n";
3905 3906
        header << "    kNone = 0, \\\n";
        for (const auto& field : type->fields()) {
3907 3908
          header << "    k" << CamelifyString(field.name_and_type.name) << " = "
                 << type_name << "{1} << " << field.offset << ", \\\n";
3909 3910
        }
        header << "  }; \\\n";
3911
        header << "  using Flags = base::Flags<Flag>; \\\n";
3912 3913
        header << "  static constexpr int kFlagCount = "
               << type->fields().size() << "; \\\n";
3914 3915
      }

3916
      header << "\n";
3917 3918 3919 3920 3921 3922
    }
  }
  const std::string output_header_path = output_directory + "/" + file_name;
  WriteFile(output_header_path, header.str());
}

3923 3924
namespace {

3925 3926
class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
 public:
3927
  ClassFieldOffsetGenerator(std::ostream& header, std::ostream& inline_header,
3928 3929
                            const ClassType* type, std::string gen_name,
                            const ClassType* parent)
3930 3931
      : FieldOffsetsGenerator(type),
        hdr_(header),
3932
        inl_(inline_header),
3933 3934
        previous_field_end_((parent && parent->IsShape()) ? "P::kSize"
                                                          : "P::kHeaderSize"),
3935
        gen_name_(gen_name) {}
3936

3937
  void WriteField(const Field& f, const std::string& size_string) override {
3938
    hdr_ << "  // " << f.pos << "\n";
3939 3940 3941 3942 3943
    std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
    std::string field_end = field + "End";
    hdr_ << "  static constexpr int " << field << " = " << previous_field_end_
         << ";\n";
    hdr_ << "  static constexpr int " << field_end << " = " << field << " + "
3944 3945
         << size_string << " - 1;\n";
    previous_field_end_ = field_end + " + 1";
3946
  }
3947

3948 3949 3950 3951 3952 3953 3954
  void WriteFieldOffsetGetter(const Field& f) override {
    // A static constexpr int is more convenient than a getter if the offset is
    // known.
    DCHECK(!f.offset.has_value());

    std::string function_name = CamelifyString(f.name_and_type.name) + "Offset";

3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966
    std::vector<cpp::TemplateParameter> params = {cpp::TemplateParameter("D"),
                                                  cpp::TemplateParameter("P")};
    cpp::Class owner(std::move(params), gen_name_);

    auto getter = cpp::Function::DefaultGetter("int", &owner, function_name);
    getter.PrintDeclaration(hdr_);
    getter.PrintDefinition(inl_, [&](std::ostream& stream) {
      // Item 1 in a flattened slice is the offset.
      stream << "  return static_cast<int>(std::get<1>("
             << Callable::PrefixNameForCCOutput(type_->GetSliceMacroName(f))
             << "(*static_cast<const D*>(this))));\n";
    });
3967
  }
3968
  void WriteMarker(const std::string& marker) override {
3969 3970 3971 3972 3973 3974
    hdr_ << "  static constexpr int " << marker << " = " << previous_field_end_
         << ";\n";
  }

 private:
  std::ostream& hdr_;
3975
  std::ostream& inl_;
3976
  std::string previous_field_end_;
3977
  std::string gen_name_;
3978 3979
};

3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999
class CppClassGenerator {
 public:
  CppClassGenerator(const ClassType* type, std::ostream& header,
                    std::ostream& inl_header, std::ostream& impl)
      : type_(type),
        super_(type->GetSuperClass()),
        name_(type->name()),
        gen_name_("TorqueGenerated" + name_),
        gen_name_T_(gen_name_ + "<D, P>"),
        gen_name_I_(gen_name_ + "<" + name_ + ", " + super_->name() + ">"),
        hdr_(header),
        inl_(inl_header),
        impl_(impl) {}
  const std::string template_decl() const {
    return "template <class D, class P>";
  }

  void GenerateClass();

 private:
4000 4001
  SourcePosition Position();

4002
  void GenerateClassConstructors();
4003 4004 4005 4006 4007 4008 4009

  // Generates getter and setter runtime member functions for the given class
  // field. Traverses depth-first through any nested struct fields to generate
  // accessors for them also; struct_fields represents the stack of currently
  // active struct fields.
  void GenerateFieldAccessors(const Field& class_field,
                              std::vector<const Field*>& struct_fields);
4010
  void EmitLoadFieldStatement(std::ostream& stream, const Field& class_field,
4011
                              std::vector<const Field*>& struct_fields);
4012
  void EmitStoreFieldStatement(std::ostream& stream, const Field& class_field,
4013
                               std::vector<const Field*>& struct_fields);
4014

4015 4016
  void GenerateClassCasts();

4017 4018
  std::string GetFieldOffsetForAccessor(const Field& f);

4019 4020 4021 4022 4023 4024
  // Gets the C++ type name that should be used in accessors for referring to
  // the value of a class field.
  std::string GetTypeNameForAccessor(const Field& f);

  bool CanContainHeapObjects(const Type* t);

4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035
  const ClassType* type_;
  const ClassType* super_;
  const std::string name_;
  const std::string gen_name_;
  const std::string gen_name_T_;
  const std::string gen_name_I_;
  std::ostream& hdr_;
  std::ostream& inl_;
  std::ostream& impl_;
};

4036 4037 4038 4039 4040 4041
base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
    const ClassType& type) {
  std::vector<Field> result;
  std::set<std::string> index_names;
  for (const Field& field : type.ComputeAllFields()) {
    if (field.index) {
4042
      auto name_and_type = ExtractSimpleFieldArraySize(type, field.index->expr);
4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058
      if (!name_and_type) {
        return base::nullopt;
      }
      index_names.insert(name_and_type->name);
    }
  }

  for (const Field& field : type.ComputeAllFields()) {
    if (index_names.count(field.name_and_type.name) != 0) {
      result.push_back(field);
    }
  }

  return result;
}

4059
void CppClassGenerator::GenerateClass() {
4060
  // Is<name>_NonInline(HeapObject)
4061
  if (!type_->IsShape()) {
4062 4063 4064 4065 4066 4067
    cpp::Function f("Is"s + name_ + "_NonInline");
    f.SetDescription("Alias for HeapObject::Is"s + name_ +
                     "() that avoids inlining.");
    f.SetExport(true);
    f.SetReturnType("bool");
    f.AddParameter("HeapObject", "o");
4068

4069
    f.PrintDeclaration(hdr_);
4070
    hdr_ << "\n";
4071
    f.PrintDefinition(impl_, [&](std::ostream& stream) {
4072
      stream << "  return o.Is" << name_ << "();\n";
4073 4074
    });
  }
4075
  hdr_ << "// Definition " << Position() << "\n";
4076 4077
  hdr_ << template_decl() << "\n";
  hdr_ << "class " << gen_name_ << " : public P {\n";
4078 4079 4080 4081 4082 4083 4084
  hdr_ << "  static_assert(\n"
       << "      std::is_same<" << name_ << ", D>::value,\n"
       << "      \"Use this class as direct base for " << name_ << ".\");\n";
  hdr_ << "  static_assert(\n"
       << "      std::is_same<" << super_->name() << ", P>::value,\n"
       << "      \"Pass in " << super_->name()
       << " as second template parameter for " << gen_name_ << ".\");\n\n";
4085
  hdr_ << " public: \n";
4086 4087
  hdr_ << "  using Super = P;\n";
  hdr_ << "  using TorqueGeneratedClass = " << gen_name_ << "<D,P>;\n\n";
4088 4089 4090
  if (!type_->ShouldExport() && !type_->IsExtern()) {
    hdr_ << " protected: // not extern or @export\n";
  }
4091
  for (const Field& f : type_->fields()) {
4092
    CurrentSourcePosition::Scope scope(f.pos);
4093 4094
    std::vector<const Field*> struct_fields;
    GenerateFieldAccessors(f, struct_fields);
4095
  }
4096 4097 4098
  if (!type_->ShouldExport() && !type_->IsExtern()) {
    hdr_ << " public:\n";
  }
4099 4100 4101

  GenerateClassCasts();

4102 4103 4104 4105
  std::vector<cpp::TemplateParameter> templateArgs = {
      cpp::TemplateParameter("D"), cpp::TemplateParameter("P")};
  cpp::Class c(std::move(templateArgs), gen_name_);

4106
  if (type_->ShouldGeneratePrint()) {
4107
    hdr_ << "  DECL_PRINTER(" << name_ << ")\n\n";
4108 4109 4110 4111
  }

  if (type_->ShouldGenerateVerify()) {
    IfDefScope hdr_scope(hdr_, "VERIFY_HEAP");
4112 4113 4114 4115 4116 4117
    // V8_EXPORT_PRIVATE void Verify(Isolate*);
    cpp::Function f(&c, name_ + "Verify");
    f.SetExport();
    f.SetReturnType("void");
    f.AddParameter("Isolate*", "isolate");
    f.PrintDeclaration(hdr_);
4118 4119 4120 4121 4122 4123 4124 4125

    IfDefScope impl_scope(impl_, "VERIFY_HEAP");
    impl_ << "\ntemplate <>\n";
    impl_ << "void " << gen_name_I_ << "::" << name_
          << "Verify(Isolate* isolate) {\n";
    impl_ << "  TorqueGeneratedClassVerifiers::" << name_ << "Verify(" << name_
          << "::cast(*this), "
             "isolate);\n";
4126 4127 4128 4129
    impl_ << "}\n\n";
  }
  if (type_->ShouldGenerateVerify()) {
    impl_ << "\n";
4130 4131
  }

4132
  hdr_ << "\n";
4133 4134
  ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_,
                              type_->GetSuperClass());
4135 4136 4137 4138 4139 4140
  for (auto f : type_->fields()) {
    CurrentSourcePosition::Scope scope(f.pos);
    g.RecordOffsetFor(f);
  }
  g.Finish();
  hdr_ << "\n";
4141

4142 4143 4144 4145 4146
  auto index_fields = GetOrderedUniqueIndexFields(*type_);

  if (!index_fields.has_value()) {
    hdr_ << "  // SizeFor implementations not generated due to complex array "
            "lengths\n\n";
4147 4148 4149 4150

    const Field& last_field = type_->LastField();
    std::string last_field_item_size =
        std::get<1>(*SizeOf(last_field.name_and_type.type));
4151 4152 4153 4154 4155

    // int AllocatedSize() const
    {
      cpp::Function f =
          cpp::Function::DefaultGetter("int", &c, "AllocatedSize");
4156
      f.PrintDeclaration(hdr_);
4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167

      f.PrintDefinition(inl_, [&](std::ostream& stream) {
        stream << "  auto slice = "
               << Callable::PrefixNameForCCOutput(
                      type_->GetSliceMacroName(last_field))
               << "(*static_cast<const D*>(this));\n";
        stream << "  return static_cast<int>(std::get<1>(slice)) + "
               << last_field_item_size
               << " * static_cast<int>(std::get<2>(slice));\n";
      });
    }
4168 4169 4170
  } else if (type_->ShouldGenerateBodyDescriptor() ||
             (!type_->IsAbstract() &&
              !type_->IsSubtypeOf(TypeOracle::GetJSObjectType()))) {
4171 4172 4173 4174
    cpp::Function f(&c, "SizeFor");
    f.SetReturnType("int32_t");
    f.SetFlags(cpp::Function::kStatic | cpp::Function::kConstexpr |
               cpp::Function::kV8Inline);
4175
    for (const Field& field : *index_fields) {
4176
      f.AddParameter("int", field.name_and_type.name);
4177
    }
4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199
    f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
      if (index_fields->empty()) {
        stream << "    DCHECK(kHeaderSize == kSize && kHeaderSize == "
               << *type_->size().SingleValue() << ");\n";
      }
      stream << "    int32_t size = kHeaderSize;\n";
      for (const Field& field : type_->ComputeAllFields()) {
        if (field.index) {
          auto index_name_and_type =
              *ExtractSimpleFieldArraySize(*type_, field.index->expr);
          stream << "    size += " << index_name_and_type.name << " * "
                 << std::get<0>(field.GetFieldSizeInformation()) << ";\n";
        }
      }
      if (type_->size().Alignment() < TargetArchitecture::TaggedSize()) {
        stream << "    size = OBJECT_POINTER_ALIGN(size);\n";
      }
      stream << "    return size;\n";
    });

    // V8_INLINE int32_t AllocatedSize() const
    {
4200
      cpp::Function allocated_size_f =
4201
          cpp::Function::DefaultGetter("int32_t", &c, "AllocatedSize");
4202 4203
      allocated_size_f.SetFlag(cpp::Function::kV8Inline);
      allocated_size_f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
4204 4205 4206 4207 4208 4209 4210 4211 4212
        stream << "    return SizeFor(";
        bool first = true;
        for (auto field : *index_fields) {
          if (!first) stream << ", ";
          stream << "this->" << field.name_and_type.name << "()";
          first = false;
        }
        stream << ");\n";
      });
4213 4214 4215 4216 4217
    }
  }

  hdr_ << "  friend class Factory;\n\n";

4218 4219 4220
  GenerateClassConstructors();

  hdr_ << "};\n\n";
4221

4222
  if (type_->ShouldGenerateFullClassDefinition()) {
4223 4224 4225 4226 4227 4228 4229 4230
    // If this class extends from another class which is defined in the same tq
    // file, and that other class doesn't generate a full class definition, then
    // the resulting .inc file would be uncompilable due to ordering
    // requirements: the generated file must go before the hand-written
    // definition of the base class, but it must also go after that same
    // hand-written definition.
    base::Optional<const ClassType*> parent = type_->parent()->ClassSupertype();
    while (parent) {
4231
      if ((*parent)->ShouldGenerateCppClassDefinitions() &&
4232 4233 4234 4235 4236 4237 4238 4239
          !(*parent)->ShouldGenerateFullClassDefinition() &&
          (*parent)->AttributedToFile() == type_->AttributedToFile()) {
        Error("Exported ", *type_,
              " cannot be in the same file as its parent extern ", **parent);
      }
      parent = (*parent)->parent()->ClassSupertype();
    }

4240 4241
    GenerateClassExport(type_, hdr_, inl_);
  }
4242 4243 4244
}

void CppClassGenerator::GenerateClassCasts() {
4245 4246 4247
  cpp::Class owner({cpp::TemplateParameter("D"), cpp::TemplateParameter("P")},
                   gen_name_);
  cpp::Function f(&owner, "cast");
4248 4249 4250 4251 4252
  f.SetFlags(cpp::Function::kV8Inline | cpp::Function::kStatic);
  f.SetReturnType("D");
  f.AddParameter("Object", "object");

  // V8_INLINE static D cast(Object)
4253 4254
  f.PrintDeclaration(hdr_);
  f.PrintDefinition(inl_, [](std::ostream& stream) {
4255
    stream << "    return D(object.ptr());\n";
4256 4257 4258 4259
  });
  // V8_INLINE static D unchecked_cast(Object)
  f.SetName("unchecked_cast");
  f.PrintInlineDefinition(hdr_, [](std::ostream& stream) {
4260
    stream << "    return bit_cast<D>(object);\n";
4261
  });
4262 4263
}

4264 4265
SourcePosition CppClassGenerator::Position() { return type_->GetPosition(); }

4266
void CppClassGenerator::GenerateClassConstructors() {
4267 4268 4269 4270 4271 4272 4273 4274 4275
  const ClassType* typecheck_type = type_;
  while (typecheck_type->IsShape()) {
    typecheck_type = typecheck_type->GetSuperClass();

    // Shapes have already been checked earlier to inherit from JSObject, so we
    // should have found an appropriate type.
    DCHECK(typecheck_type);
  }

4276 4277
  hdr_ << "  template <class DAlias = D>\n";
  hdr_ << "  constexpr " << gen_name_ << "() : P() {\n";
4278 4279 4280 4281 4282
  hdr_ << "    static_assert(\n";
  hdr_ << "        std::is_base_of<" << gen_name_ << ", DAlias>::value,\n";
  hdr_ << "        \"class " << gen_name_
       << " should be used as direct base for " << name_ << ".\");\n";
  hdr_ << "  }\n\n";
4283

4284
  hdr_ << " protected:\n";
4285
  hdr_ << "  inline explicit " << gen_name_ << "(Address ptr);\n";
4286 4287 4288 4289 4290
  hdr_ << "  // Special-purpose constructor for subclasses that have fast "
          "paths where\n";
  hdr_ << "  // their ptr() is a Smi.\n";
  hdr_ << "  inline explicit " << gen_name_
       << "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi);\n";
4291 4292 4293

  inl_ << "template<class D, class P>\n";
  inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
4294
  inl_ << "    : P(ptr) {\n";
4295 4296
  inl_ << "  SLOW_DCHECK(Is" << typecheck_type->name()
       << "_NonInline(*this));\n";
4297
  inl_ << "}\n";
4298 4299 4300 4301

  inl_ << "template<class D, class P>\n";
  inl_ << "inline " << gen_name_T_ << "::" << gen_name_
       << "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi)\n";
4302
  inl_ << "    : P(ptr, allow_smi) {\n";
4303 4304 4305
  inl_ << "  SLOW_DCHECK("
       << "(allow_smi == HeapObject::AllowInlineSmiStorage::kAllowBeingASmi"
          " && this->IsSmi()) || Is"
4306
       << typecheck_type->name() << "_NonInline(*this));\n";
4307
  inl_ << "}\n";
4308 4309
}

4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320
namespace {
std::string GenerateRuntimeTypeCheck(const Type* type,
                                     const std::string& value) {
  bool maybe_object = !type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
  std::stringstream type_check;
  bool at_start = true;
  // If weak pointers are allowed, then start by checking for a cleared value.
  if (maybe_object) {
    type_check << value << ".IsCleared()";
    at_start = false;
  }
4321
  for (const TypeChecker& runtime_type : type->GetTypeCheckers()) {
4322 4323 4324 4325
    if (!at_start) type_check << " || ";
    at_start = false;
    if (maybe_object) {
      bool strong = runtime_type.weak_ref_to.empty();
4326 4327 4328 4329
      if (strong && runtime_type.type == WEAK_HEAP_OBJECT) {
        // Rather than a generic Weak<T>, this is the basic type WeakHeapObject.
        // We can't validate anything more about the type of the object pointed
        // to, so just check that it's weak.
4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346
        type_check << value << ".IsWeak()";
      } else {
        type_check << "(" << (strong ? "!" : "") << value << ".IsWeak() && "
                   << value << ".GetHeapObjectOrSmi().Is"
                   << (strong ? runtime_type.type : runtime_type.weak_ref_to)
                   << "())";
      }
    } else {
      type_check << value << ".Is" << runtime_type.type << "()";
    }
  }
  return type_check.str();
}

void GenerateBoundsDCheck(std::ostream& os, const std::string& index,
                          const ClassType* type, const Field& f) {
  os << "  DCHECK_GE(" << index << ", 0);\n";
4347
  std::string length_expression;
4348
  if (base::Optional<NameAndType> array_length =
4349
          ExtractSimpleFieldArraySize(*type, f.index->expr)) {
4350 4351 4352 4353 4354 4355 4356
    length_expression = "this ->" + array_length->name + "()";
  } else {
    // The length is element 2 in the flattened field slice.
    length_expression =
        "static_cast<int>(std::get<2>(" +
        Callable::PrefixNameForCCOutput(type->GetSliceMacroName(f)) +
        "(*static_cast<const D*>(this))))";
4357
  }
4358
  os << "  DCHECK_LT(" << index << ", " << length_expression << ");\n";
4359
}
4360 4361 4362 4363 4364 4365 4366 4367

bool CanGenerateFieldAccessors(const Type* field_type) {
  // float64_or_hole should be treated like float64. For now, we don't need it.
  // TODO(v8:10391) Generate accessors for external pointers.
  return field_type != TypeOracle::GetVoidType() &&
         field_type != TypeOracle::GetFloat64OrHoleType() &&
         !field_type->IsSubtypeOf(TypeOracle::GetExternalPointerType());
}
4368 4369
}  // namespace

4370
// TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
4371 4372 4373 4374 4375
void CppClassGenerator::GenerateFieldAccessors(
    const Field& class_field, std::vector<const Field*>& struct_fields) {
  const Field& innermost_field =
      struct_fields.empty() ? class_field : *struct_fields.back();
  const Type* field_type = innermost_field.name_and_type.type;
4376
  if (!CanGenerateFieldAccessors(field_type)) return;
4377 4378 4379 4380 4381 4382 4383 4384 4385 4386

  if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
    struct_fields.resize(struct_fields.size() + 1);
    for (const Field& struct_field : struct_type->fields()) {
      struct_fields[struct_fields.size() - 1] = &struct_field;
      GenerateFieldAccessors(class_field, struct_fields);
    }
    struct_fields.resize(struct_fields.size() - 1);
    return;
  }
4387

4388
  bool indexed = class_field.index && !class_field.index->optional;
4389
  std::string type_name = GetTypeNameForAccessor(innermost_field);
4390
  bool can_contain_heap_objects = CanContainHeapObjects(field_type);
4391 4392 4393 4394 4395 4396 4397

  // Assemble an accessor name by accumulating together all of the nested field
  // names.
  std::string name = class_field.name_and_type.name;
  for (const Field* nested_struct_field : struct_fields) {
    name += "_" + nested_struct_field->name_and_type.name;
  }
4398

4399 4400 4401 4402 4403
  // Generate declarations in header.
  if (can_contain_heap_objects && !field_type->IsClassType() &&
      !field_type->IsStructType() &&
      field_type != TypeOracle::GetObjectType()) {
    hdr_ << "  // Torque type: " << field_type->ToString() << "\n";
4404 4405
  }

4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416
  std::vector<cpp::TemplateParameter> templateParameters = {
      cpp::TemplateParameter("D"), cpp::TemplateParameter("P")};
  cpp::Class owner(std::move(templateParameters), gen_name_);

  // getter
  {
    auto getter = cpp::Function::DefaultGetter(type_name, &owner, name);
    if (indexed) {
      getter.AddParameter("int", "i");
    }
    const char* tag_argument;
4417 4418
    switch (class_field.read_synchronization) {
      case FieldSynchronization::kNone:
4419
        tag_argument = "";
4420 4421
        break;
      case FieldSynchronization::kRelaxed:
4422 4423
        getter.AddParameter("RelaxedLoadTag");
        tag_argument = ", kRelaxedLoad";
4424 4425
        break;
      case FieldSynchronization::kAcquireRelease:
4426 4427
        getter.AddParameter("AcquireLoadTag");
        tag_argument = ", kAcquireLoad";
4428 4429 4430
        break;
    }

4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
    getter.PrintDeclaration(hdr_);

    // For tagged data, generate the extra getter that derives an
    // PtrComprCageBase from the current object's pointer.
    if (can_contain_heap_objects) {
      getter.PrintDefinition(inl_, [&](auto& stream) {
        stream
            << "  PtrComprCageBase cage_base = GetPtrComprCageBase(*this);\n";
        stream << "  return " << gen_name_ << "::" << name << "(cage_base"
               << (indexed ? ", i" : "") << tag_argument << ");\n";
      });

      getter.InsertParameter(0, "PtrComprCageBase", "cage_base");
      getter.PrintDeclaration(hdr_);
    }

    getter.PrintDefinition(inl_, [&](auto& stream) {
      stream << "  " << type_name << " value;\n";
      EmitLoadFieldStatement(stream, class_field, struct_fields);
      stream << "  return value;\n";
    });
4452
  }
4453 4454 4455 4456 4457 4458 4459

  // setter
  {
    auto setter = cpp::Function::DefaultSetter(
        &owner, std::string("set_") + name, type_name, "value");
    if (indexed) {
      setter.InsertParameter(0, "int", "i");
4460
    }
4461
    switch (class_field.write_synchronization) {
4462 4463 4464
      case FieldSynchronization::kNone:
        break;
      case FieldSynchronization::kRelaxed:
4465
        setter.AddParameter("RelaxedStoreTag");
4466 4467
        break;
      case FieldSynchronization::kAcquireRelease:
4468
        setter.AddParameter("ReleaseStoreTag");
4469 4470
        break;
    }
4471 4472 4473 4474
    if (can_contain_heap_objects) {
      setter.AddParameter("WriteBarrierMode", "mode", "UPDATE_WRITE_BARRIER");
    }
    setter.PrintDeclaration(hdr_);
4475

4476 4477 4478
    setter.PrintDefinition(inl_, [&](auto& stream) {
      EmitStoreFieldStatement(stream, class_field, struct_fields);
    });
4479
  }
4480

4481
  hdr_ << "\n";
4482 4483
}

4484 4485 4486
std::string CppClassGenerator::GetFieldOffsetForAccessor(const Field& f) {
  if (f.offset.has_value()) {
    return "k" + CamelifyString(f.name_and_type.name) + "Offset";
4487
  }
4488 4489
  return CamelifyString(f.name_and_type.name) + "Offset()";
}
4490

4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504
std::string CppClassGenerator::GetTypeNameForAccessor(const Field& f) {
  const Type* field_type = f.name_and_type.type;
  if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
    const Type* constexpr_version = field_type->ConstexprVersion();
    if (!constexpr_version) {
      Error("Field accessor for ", type_->name(), ":: ", f.name_and_type.name,
            " cannot be generated because its type ", *field_type,
            " is neither a subclass of Object nor does the type have a "
            "constexpr "
            "version.")
          .Position(f.pos)
          .Throw();
    }
    return constexpr_version->GetGeneratedTypeName();
4505
  }
4506 4507 4508
  if (field_type->IsSubtypeOf(TypeOracle::GetSmiType())) {
    // Follow the convention to create Smi accessors with type int.
    return "int";
4509
  }
4510 4511
  return field_type->UnhandlifiedCppTypeName();
}
4512

4513 4514 4515
bool CppClassGenerator::CanContainHeapObjects(const Type* t) {
  return t->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
         !t->IsSubtypeOf(TypeOracle::GetSmiType());
4516 4517
}

4518
void CppClassGenerator::EmitLoadFieldStatement(
4519 4520
    std::ostream& stream, const Field& class_field,
    std::vector<const Field*>& struct_fields) {
4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537
  const Field& innermost_field =
      struct_fields.empty() ? class_field : *struct_fields.back();
  const Type* field_type = innermost_field.name_and_type.type;
  std::string type_name = GetTypeNameForAccessor(innermost_field);
  const std::string class_field_size =
      std::get<1>(class_field.GetFieldSizeInformation());

  // field_offset contains both the offset from the beginning of the object to
  // the class field and the combined offsets of any nested struct fields
  // within, but not the index adjustment.
  std::string field_offset = GetFieldOffsetForAccessor(class_field);
  for (const Field* nested_struct_field : struct_fields) {
    field_offset += " + " + std::to_string(*nested_struct_field->offset);
  }

  std::string offset = field_offset;
  if (class_field.index) {
4538
    const char* index = class_field.index->optional ? "0" : "i";
4539 4540 4541
    GenerateBoundsDCheck(stream, index, type_, class_field);
    stream << "  int offset = " << field_offset << " + " << index << " * "
           << class_field_size << ";\n";
4542
    offset = "offset";
4543
  }
4544

4545
  stream << "  value = ";
4546

4547
  if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4548 4549
    if (class_field.read_synchronization ==
        FieldSynchronization::kAcquireRelease) {
4550
      ReportError("Torque doesn't support @cppAcquireRead on untagged data");
4551 4552
    } else if (class_field.read_synchronization ==
               FieldSynchronization::kRelaxed) {
4553
      ReportError("Torque doesn't support @cppRelaxedRead on untagged data");
4554
    }
4555 4556
    stream << "this->template ReadField<" << type_name << ">(" << offset
           << ");\n";
4557
  } else {
4558
    const char* load;
4559
    switch (class_field.read_synchronization) {
4560 4561 4562 4563 4564 4565 4566 4567 4568 4569
      case FieldSynchronization::kNone:
        load = "load";
        break;
      case FieldSynchronization::kRelaxed:
        load = "Relaxed_Load";
        break;
      case FieldSynchronization::kAcquireRelease:
        load = "Acquire_Load";
        break;
    }
4570 4571 4572
    bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
    const std::string load_type = is_smi ? "Smi" : type_name;
    const char* postfix = is_smi ? ".value()" : "";
4573
    const char* optional_cage_base = is_smi ? "" : "cage_base, ";
4574

4575 4576 4577
    stream << "TaggedField<" << load_type << ">::" << load << "("
           << optional_cage_base << "*this, " << offset << ")" << postfix
           << ";\n";
4578
  }
4579 4580

  if (CanContainHeapObjects(field_type)) {
4581 4582
    stream << "  DCHECK(" << GenerateRuntimeTypeCheck(field_type, "value")
           << ");\n";
4583
  }
4584 4585
}

4586
void CppClassGenerator::EmitStoreFieldStatement(
4587 4588
    std::ostream& stream, const Field& class_field,
    std::vector<const Field*>& struct_fields) {
4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605
  const Field& innermost_field =
      struct_fields.empty() ? class_field : *struct_fields.back();
  const Type* field_type = innermost_field.name_and_type.type;
  std::string type_name = GetTypeNameForAccessor(innermost_field);
  const std::string class_field_size =
      std::get<1>(class_field.GetFieldSizeInformation());

  // field_offset contains both the offset from the beginning of the object to
  // the class field and the combined offsets of any nested struct fields
  // within, but not the index adjustment.
  std::string field_offset = GetFieldOffsetForAccessor(class_field);
  for (const Field* nested_struct_field : struct_fields) {
    field_offset += " + " + std::to_string(*nested_struct_field->offset);
  }

  std::string offset = field_offset;
  if (class_field.index) {
4606
    const char* index = class_field.index->optional ? "0" : "i";
4607 4608 4609
    GenerateBoundsDCheck(stream, index, type_, class_field);
    stream << "  int offset = " << field_offset << " + " << index << " * "
           << class_field_size << ";\n";
4610
    offset = "offset";
4611 4612 4613
  }

  if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4614 4615
    stream << "  this->template WriteField<" << type_name << ">(" << offset
           << ", value);\n";
4616
  } else {
4617 4618
    bool strong_pointer = field_type->IsSubtypeOf(TypeOracle::GetObjectType());
    bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
4619 4620
    const char* write_macro;
    if (!strong_pointer) {
4621 4622
      if (class_field.write_synchronization ==
          FieldSynchronization::kAcquireRelease) {
4623 4624 4625 4626
        ReportError("Torque doesn't support @releaseWrite on weak fields");
      }
      write_macro = "RELAXED_WRITE_WEAK_FIELD";
    } else {
4627
      switch (class_field.write_synchronization) {
4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638
        case FieldSynchronization::kNone:
          write_macro = "WRITE_FIELD";
          break;
        case FieldSynchronization::kRelaxed:
          write_macro = "RELAXED_WRITE_FIELD";
          break;
        case FieldSynchronization::kAcquireRelease:
          write_macro = "RELEASE_WRITE_FIELD";
          break;
      }
    }
4639 4640 4641
    const std::string value_to_write = is_smi ? "Smi::FromInt(value)" : "value";

    if (!is_smi) {
4642 4643
      stream << "  SLOW_DCHECK("
             << GenerateRuntimeTypeCheck(field_type, "value") << ");\n";
4644
    }
4645 4646
    stream << "  " << write_macro << "(*this, " << offset << ", "
           << value_to_write << ");\n";
4647 4648 4649 4650
    if (!is_smi) {
      const char* write_barrier = strong_pointer
                                      ? "CONDITIONAL_WRITE_BARRIER"
                                      : "CONDITIONAL_WEAK_WRITE_BARRIER";
4651 4652
      stream << "  " << write_barrier << "(*this, " << offset
             << ", value, mode);\n";
4653
    }
4654
  }
4655 4656
}

4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669
void GenerateStructLayoutDescription(std::ostream& header,
                                     const StructType* type) {
  header << "struct TorqueGenerated" << CamelifyString(type->name())
         << "Offsets {\n";
  for (const Field& field : type->fields()) {
    header << "  static constexpr int k"
           << CamelifyString(field.name_and_type.name)
           << "Offset = " << *field.offset << ";\n";
  }
  header << "  static constexpr int kSize = " << type->PackedSize() << ";\n";
  header << "};\n\n";
}

4670 4671
}  // namespace

4672 4673
void ImplementationVisitor::GenerateClassDefinitions(
    const std::string& output_directory) {
4674 4675
  std::stringstream factory_header;
  std::stringstream factory_impl;
4676
  std::string factory_basename = "factory";
4677

4678 4679
  std::stringstream forward_declarations;
  std::string forward_declarations_filename = "class-forward-declarations.h";
4680

4681
  {
4682 4683
    factory_impl << "#include \"src/heap/factory-base.h\"\n";
    factory_impl << "#include \"src/heap/factory-base-inl.h\"\n";
4684 4685
    factory_impl << "#include \"src/heap/heap.h\"\n";
    factory_impl << "#include \"src/heap/heap-inl.h\"\n";
4686
    factory_impl << "#include \"src/execution/isolate.h\"\n";
4687
    factory_impl << "#include "
4688
                    "\"src/objects/all-objects-inl.h\"\n\n";
4689 4690
    NamespaceScope factory_impl_namespaces(factory_impl, {"v8", "internal"});
    factory_impl << "\n";
4691

4692 4693 4694 4695
    IncludeGuardScope include_guard(forward_declarations,
                                    forward_declarations_filename);
    NamespaceScope forward_declarations_namespaces(forward_declarations,
                                                   {"v8", "internal"});
4696

4697
    std::set<const StructType*, TypeLess> structs_used_in_classes;
4698

4699 4700
    // Emit forward declarations.
    for (const ClassType* type : TypeOracle::GetClasses()) {
4701
      CurrentSourcePosition::Scope position_activator(type->GetPosition());
4702 4703
      auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
      std::ostream& header = streams.class_definition_headerfile;
4704
      std::string name = type->ShouldGenerateCppClassDefinitions()
4705 4706 4707 4708
                             ? type->name()
                             : type->GetGeneratedTNodeTypeName();
      header << "class " << name << ";\n";
      forward_declarations << "class " << name << ";\n";
4709 4710
    }

4711
    for (const ClassType* type : TypeOracle::GetClasses()) {
4712
      CurrentSourcePosition::Scope position_activator(type->GetPosition());
4713 4714 4715 4716
      auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
      std::ostream& header = streams.class_definition_headerfile;
      std::ostream& inline_header = streams.class_definition_inline_headerfile;
      std::ostream& implementation = streams.class_definition_ccfile;
4717

4718
      if (type->ShouldGenerateCppClassDefinitions()) {
4719 4720 4721
        CppClassGenerator g(type, header, inline_header, implementation);
        g.GenerateClass();
      }
4722 4723
      for (const Field& f : type->fields()) {
        const Type* field_type = f.name_and_type.type;
4724 4725
        if (auto field_as_struct = field_type->StructSupertype()) {
          structs_used_in_classes.insert(*field_as_struct);
4726 4727
        }
      }
4728
      if (type->ShouldGenerateFactoryFunction()) {
4729 4730 4731
        std::string return_type = type->HandlifiedCppTypeName();
        std::string function_name = "New" + type->name();
        std::stringstream parameters;
4732 4733 4734 4735 4736
        for (const Field& f : type->ComputeAllFields()) {
          if (f.name_and_type.name == "map") continue;
          if (!f.index) {
            std::string type_string =
                f.name_and_type.type->HandlifiedCppTypeName();
4737
            parameters << type_string << " " << f.name_and_type.name << ", ";
4738 4739
          }
        }
4740
        parameters << "AllocationType allocation_type";
4741

4742 4743 4744 4745 4746 4747
        factory_header << return_type << " " << function_name << "("
                       << parameters.str() << ");\n";
        factory_impl << "template <typename Impl>\n";
        factory_impl << return_type
                     << " TorqueGeneratedFactory<Impl>::" << function_name
                     << "(" << parameters.str() << ") {\n";
4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767

        factory_impl << " int size = ";
        const ClassType* super = type->GetSuperClass();
        std::string gen_name = "TorqueGenerated" + type->name();
        std::string gen_name_T =
            gen_name + "<" + type->name() + ", " + super->name() + ">";
        factory_impl << gen_name_T << "::SizeFor(";

        bool first = true;
        auto index_fields = GetOrderedUniqueIndexFields(*type);
        CHECK(index_fields.has_value());
        for (auto index_field : *index_fields) {
          if (!first) {
            factory_impl << ", ";
          }
          factory_impl << index_field.name_and_type.name;
          first = false;
        }

        factory_impl << ");\n";
4768 4769
        factory_impl << "  Map map = factory()->read_only_roots()."
                     << SnakeifyString(type->name()) << "_map();";
4770
        factory_impl << "  HeapObject raw_object =\n";
4771 4772
        factory_impl << "    factory()->AllocateRawWithImmortalMap(size, "
                        "allocation_type, map);\n";
4773 4774 4775 4776
        factory_impl << "  " << type->UnhandlifiedCppTypeName()
                     << " result = " << type->UnhandlifiedCppTypeName()
                     << "::cast(raw_object);\n";
        factory_impl << "  DisallowGarbageCollection no_gc;";
4777 4778 4779 4780
        factory_impl << "  WriteBarrierMode write_barrier_mode =\n"
                     << "     allocation_type == AllocationType::kYoung\n"
                     << "     ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;\n"
                     << "  USE(write_barrier_mode);\n";
4781 4782 4783 4784

        for (const Field& f : type->ComputeAllFields()) {
          if (f.name_and_type.name == "map") continue;
          if (!f.index) {
4785
            factory_impl << "  result.TorqueGeneratedClass::set_"
4786 4787 4788 4789 4790
                         << SnakeifyString(f.name_and_type.name) << "(";
            if (f.name_and_type.type->IsSubtypeOf(
                    TypeOracle::GetTaggedType()) &&
                !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
              factory_impl << "*" << f.name_and_type.name
4791
                           << ", write_barrier_mode";
4792 4793 4794 4795 4796 4797 4798
            } else {
              factory_impl << f.name_and_type.name;
            }
            factory_impl << ");\n";
          }
        }

4799
        factory_impl << "  return handle(result, factory()->isolate());\n";
4800
        factory_impl << "}\n\n";
4801 4802 4803 4804 4805 4806 4807 4808 4809 4810

        factory_impl << "template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) "
                     << return_type
                     << "TorqueGeneratedFactory<Factory>::" << function_name
                     << "(" << parameters.str() << ");\n";
        factory_impl << "template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) "
                     << return_type << "TorqueGeneratedFactory<LocalFactory>::"
                     << function_name << "(" << parameters.str() << ");\n";

        factory_impl << "\n\n";
4811
      }
4812 4813 4814
    }

    for (const StructType* type : structs_used_in_classes) {
4815
      CurrentSourcePosition::Scope position_activator(type->GetPosition());
4816 4817 4818
      std::ostream& header =
          GlobalContext::GeneratedPerFile(type->GetPosition().source)
              .class_definition_headerfile;
4819
      if (type != TypeOracle::GetFloat64OrHoleType()) {
4820
        GenerateStructLayoutDescription(header, type);
4821
      }
4822 4823
    }
  }
4824 4825 4826 4827 4828 4829
  WriteFile(output_directory + "/" + factory_basename + ".inc",
            factory_header.str());
  WriteFile(output_directory + "/" + factory_basename + ".cc",
            factory_impl.str());
  WriteFile(output_directory + "/" + forward_declarations_filename,
            forward_declarations.str());
4830 4831 4832 4833 4834 4835 4836 4837 4838 4839
}

namespace {
void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
                                      const std::string& gen_name,
                                      const std::string& gen_name_T,
                                      const std::string template_params) {
  impl << template_params << "\n";
  impl << "void " << gen_name_T << "::" << type->name()
       << "Print(std::ostream& os) {\n";
4840
  impl << "  this->PrintHeader(os, \"" << type->name() << "\");\n";
4841 4842 4843 4844
  auto hierarchy = type->GetHierarchy();
  std::map<std::string, const AggregateType*> field_names;
  for (const AggregateType* aggregate_type : hierarchy) {
    for (const Field& f : aggregate_type->fields()) {
4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860
      if (f.name_and_type.name == "map" || f.index.has_value() ||
          !CanGenerateFieldAccessors(f.name_and_type.type)) {
        continue;
      }
      std::string getter = f.name_and_type.name;
      if (aggregate_type != type) {
        // We must call getters directly on the class that provided them,
        // because a subclass could have hidden them.
        getter = aggregate_type->name() + "::TorqueGeneratedClass::" + getter;
      }
      if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()) ||
          !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
        impl << "  os << \"\\n - " << f.name_and_type.name << ": \" << ";
        if (f.name_and_type.type->StructSupertype()) {
          // TODO(turbofan): Print struct fields too.
          impl << "\" <struct field printing still unimplemented>\";\n";
4861
        } else {
4862
          impl << "this->" << getter;
4863 4864
          switch (f.read_synchronization) {
            case FieldSynchronization::kNone:
4865
              impl << "();\n";
4866 4867
              break;
            case FieldSynchronization::kRelaxed:
4868
              impl << "(kRelaxedLoad);\n";
4869 4870
              break;
            case FieldSynchronization::kAcquireRelease:
4871
              impl << "(kAcquireLoad);\n";
4872 4873
              break;
          }
4874
        }
4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888
      } else {
        impl << "  os << \"\\n - " << f.name_and_type.name << ": \" << "
             << "Brief(this->" << getter;
        switch (f.read_synchronization) {
          case FieldSynchronization::kNone:
            impl << "());\n";
            break;
          case FieldSynchronization::kRelaxed:
            impl << "(kRelaxedLoad));\n";
            break;
          case FieldSynchronization::kAcquireRelease:
            impl << "(kAcquireLoad));\n";
            break;
        }
4889
      }
4890 4891
    }
  }
4892
  impl << "  os << '\\n';\n";
4893 4894 4895 4896
  impl << "}\n\n";
}
}  // namespace

4897 4898
void ImplementationVisitor::GeneratePrintDefinitions(
    const std::string& output_directory) {
4899
  std::stringstream impl;
4900
  std::string file_name = "objects-printer.cc";
4901
  {
4902
    IfDefScope object_print(impl, "OBJECT_PRINT");
4903

4904
    impl << "#include <iosfwd>\n\n";
4905
    impl << "#include \"src/objects/all-objects-inl.h\"\n\n";
4906

4907
    NamespaceScope impl_namespaces(impl, {"v8", "internal"});
4908

4909
    for (const ClassType* type : TypeOracle::GetClasses()) {
4910
      if (!type->ShouldGeneratePrint()) continue;
4911
      DCHECK(type->ShouldGenerateCppClassDefinitions());
4912 4913 4914 4915 4916 4917 4918
      const ClassType* super = type->GetSuperClass();
      std::string gen_name = "TorqueGenerated" + type->name();
      std::string gen_name_T =
          gen_name + "<" + type->name() + ", " + super->name() + ">";
      std::string template_decl = "template <>";
      GeneratePrintDefinitionsForClass(impl, type, gen_name, gen_name_T,
                                       template_decl);
4919 4920 4921
    }
  }

4922
  std::string new_contents(impl.str());
4923
  WriteFile(output_directory + "/" + file_name, new_contents);
4924 4925
}

4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961
base::Optional<std::string> MatchSimpleBodyDescriptor(const ClassType* type) {
  std::vector<ObjectSlotKind> slots = type->ComputeHeaderSlotKinds();
  if (!type->HasStaticSize()) {
    slots.push_back(*type->ComputeArraySlotKind());
  }

  // Skip the map slot.
  size_t i = 1;
  while (i < slots.size() && slots[i] == ObjectSlotKind::kNoPointer) ++i;
  if (i == slots.size()) return "DataOnlyBodyDescriptor";
  bool has_weak_pointers = false;
  size_t start_index = i;
  for (; i < slots.size(); ++i) {
    if (slots[i] == ObjectSlotKind::kStrongPointer) {
      continue;
    } else if (slots[i] == ObjectSlotKind::kMaybeObjectPointer) {
      has_weak_pointers = true;
    } else if (slots[i] == ObjectSlotKind::kNoPointer) {
      break;
    } else {
      return base::nullopt;
    }
  }
  size_t end_index = i;
  for (; i < slots.size(); ++i) {
    if (slots[i] != ObjectSlotKind::kNoPointer) return base::nullopt;
  }
  size_t start_offset = start_index * TargetArchitecture::TaggedSize();
  size_t end_offset = end_index * TargetArchitecture::TaggedSize();
  // We pick a suffix-range body descriptor even in cases where the object size
  // is fixed, to reduce the amount of code executed for object visitation.
  if (end_index == slots.size()) {
    return ToString("SuffixRange", has_weak_pointers ? "Weak" : "",
                    "BodyDescriptor<", start_offset, ">");
  }
  if (!has_weak_pointers) {
4962 4963
    return ToString("FixedRangeBodyDescriptor<", start_offset, ", ", end_offset,
                    ">");
4964 4965 4966 4967
  }
  return base::nullopt;
}

4968 4969
void ImplementationVisitor::GenerateBodyDescriptors(
    const std::string& output_directory) {
4970
  std::string file_name = "objects-body-descriptors-inl.inc";
4971 4972 4973 4974 4975 4976
  std::stringstream h_contents;

    for (const ClassType* type : TypeOracle::GetClasses()) {
      std::string name = type->name();
      if (!type->ShouldGenerateBodyDescriptor()) continue;

4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987
      bool has_array_fields = !type->HasStaticSize();
      std::vector<ObjectSlotKind> header_slot_kinds =
          type->ComputeHeaderSlotKinds();
      base::Optional<ObjectSlotKind> array_slot_kind =
          type->ComputeArraySlotKind();
      DCHECK_EQ(has_array_fields, array_slot_kind.has_value());

      h_contents << "class " << name << "::BodyDescriptor final : public ";
      if (auto descriptor_name = MatchSimpleBodyDescriptor(type)) {
        h_contents << *descriptor_name << " {\n";
        h_contents << " public:\n";
4988
      } else {
4989 4990
        h_contents << "BodyDescriptorBase {\n";
        h_contents << " public:\n";
4991

4992 4993 4994 4995
        h_contents << "  static bool IsValidSlot(Map map, HeapObject obj, int "
                      "offset) {\n";
        if (has_array_fields) {
          h_contents << "    if (offset < kHeaderSize) {\n";
4996
        }
4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010
        h_contents << "      bool valid_slots[] = {";
        for (ObjectSlotKind slot : header_slot_kinds) {
          h_contents << (slot != ObjectSlotKind::kNoPointer ? "1" : "0") << ",";
        }
        h_contents << "};\n"
                   << "      return valid_slots[static_cast<unsigned "
                      "int>(offset)/kTaggedSize];\n";
        if (has_array_fields) {
          h_contents << "    }\n";
          bool array_is_tagged = *array_slot_kind != ObjectSlotKind::kNoPointer;
          h_contents << "    return " << (array_is_tagged ? "true" : "false")
                     << ";\n";
        }
        h_contents << "  }\n\n";
5011

5012
        h_contents << "  template <typename ObjectVisitor>\n";
5013
        h_contents
5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037
            << "  static inline void IterateBody(Map map, HeapObject obj, "
               "int object_size, ObjectVisitor* v) {\n";

        std::vector<ObjectSlotKind> slots = std::move(header_slot_kinds);
        if (has_array_fields) slots.push_back(*array_slot_kind);

        // Skip the map slot.
        slots.erase(slots.begin());
        size_t start_offset = TargetArchitecture::TaggedSize();

        size_t end_offset = start_offset;
        ObjectSlotKind section_kind;
        for (size_t i = 0; i <= slots.size(); ++i) {
          base::Optional<ObjectSlotKind> next_section_kind;
          bool finished_section = false;
          if (i == 0) {
            next_section_kind = slots[i];
          } else if (i < slots.size()) {
            if (auto combined = Combine(section_kind, slots[i])) {
              next_section_kind = *combined;
            } else {
              next_section_kind = slots[i];
              finished_section = true;
            }
5038
          } else {
5039
            finished_section = true;
5040
          }
5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069
          if (finished_section) {
            bool is_array_slot = i == slots.size() && has_array_fields;
            bool multiple_slots =
                is_array_slot ||
                (end_offset - start_offset > TargetArchitecture::TaggedSize());
            base::Optional<std::string> iterate_command;
            switch (section_kind) {
              case ObjectSlotKind::kStrongPointer:
                iterate_command = "IteratePointer";
                break;
              case ObjectSlotKind::kMaybeObjectPointer:
                iterate_command = "IterateMaybeWeakPointer";
                break;
              case ObjectSlotKind::kCustomWeakPointer:
                iterate_command = "IterateCustomWeakPointer";
                break;
              case ObjectSlotKind::kNoPointer:
                break;
            }
            if (iterate_command) {
              if (multiple_slots) *iterate_command += "s";
              h_contents << "    " << *iterate_command << "(obj, "
                         << start_offset;
              if (multiple_slots) {
                h_contents << ", "
                           << (i == slots.size() ? "object_size"
                                                 : std::to_string(end_offset));
              }
              h_contents << ", v);\n";
5070
            }
5071
            start_offset = end_offset;
5072
          }
5073 5074
          if (i < slots.size()) section_kind = *next_section_kind;
          end_offset += TargetArchitecture::TaggedSize();
5075 5076
        }

5077 5078
        h_contents << "  }\n\n";
      }
5079 5080 5081

      h_contents
          << "  static inline int SizeOf(Map map, HeapObject raw_object) {\n";
5082 5083 5084
      if (type->size().SingleValue()) {
        h_contents << "    return " << *type->size().SingleValue() << ";\n";
      } else {
5085 5086
        // We use an unchecked_cast here because this is used for concurrent
        // marking, where we shouldn't re-read the map.
5087
        h_contents << "    return " << name
5088
                   << "::unchecked_cast(raw_object).AllocatedSize();\n";
5089
      }
5090 5091 5092 5093 5094
      h_contents << "  }\n\n";

      h_contents << "};\n";
    }

5095
    WriteFile(output_directory + "/" + file_name, h_contents.str());
5096 5097
}

5098 5099
namespace {

5100 5101 5102
// Generate verification code for a single piece of class data, which might be
// nested within a struct or might be a single element in an indexed field (or
// both).
5103 5104 5105
void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
                                std::string offset, const Field& leaf_field,
                                std::string indexed_field_size,
5106
                                std::ostream& cc_contents, bool is_map) {
5107
  const Type* field_type = leaf_field.name_and_type.type;
5108

5109
  bool maybe_object =
5110
      !field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
5111
  const char* object_type = maybe_object ? "MaybeObject" : "Object";
5112
  const char* verify_fn =
5113
      maybe_object ? "VerifyMaybeObjectPointer" : "VerifyPointer";
5114 5115
  if (indexed) {
    offset += " + i * " + indexed_field_size;
5116
  }
5117
  // Name the local var based on the field name for nicer CHECK output.
5118
  const std::string value = leaf_field.name_and_type.name + "__value";
5119 5120

  // Read the field.
5121 5122 5123 5124 5125 5126
  if (is_map) {
    cc_contents << "    " << object_type << " " << value << " = o.map();\n";
  } else {
    cc_contents << "    " << object_type << " " << value << " = TaggedField<"
                << object_type << ">::load(o, " << offset << ");\n";
  }
5127 5128 5129 5130 5131 5132 5133 5134

  // Call VerifyPointer or VerifyMaybeObjectPointer on it.
  cc_contents << "    " << object_type << "::" << verify_fn << "(isolate, "
              << value << ");\n";

  // Check that the value is of an appropriate type. We can skip this part for
  // the Object type because it would not check anything beyond what we already
  // checked with VerifyPointer.
5135
  if (field_type != TypeOracle::GetObjectType()) {
5136 5137
    cc_contents << "    CHECK(" << GenerateRuntimeTypeCheck(field_type, value)
                << ");\n";
5138
  }
5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149
}

void GenerateClassFieldVerifier(const std::string& class_name,
                                const ClassType& class_type, const Field& f,
                                std::ostream& h_contents,
                                std::ostream& cc_contents) {
  const Type* field_type = f.name_and_type.type;

  // We only verify tagged types, not raw numbers or pointers. Structs
  // consisting of tagged types are also included.
  if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
5150
      !field_type->StructSupertype())
5151
    return;
5152
  if (field_type == TypeOracle::GetFloat64OrHoleType()) return;
5153 5154 5155
  // Do not verify if the field may be uninitialized.
  if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;

5156
  std::string field_start_offset;
5157
  if (f.index) {
5158 5159 5160 5161 5162 5163 5164 5165
    field_start_offset = f.name_and_type.name + "__offset";
    std::string length = f.name_and_type.name + "__length";
    cc_contents << "  intptr_t " << field_start_offset << ", " << length
                << ";\n";
    cc_contents << "  std::tie(std::ignore, " << field_start_offset << ", "
                << length << ") = "
                << Callable::PrefixNameForCCOutput(
                       class_type.GetSliceMacroName(f))
5166
                << "(o);\n";
5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177

    // Slices use intptr, but TaggedField<T>.load() uses int, so verify that
    // such a cast is valid.
    cc_contents << "  CHECK_EQ(" << field_start_offset << ", static_cast<int>("
                << field_start_offset << "));\n";
    cc_contents << "  CHECK_EQ(" << length << ", static_cast<int>(" << length
                << "));\n";
    field_start_offset = "static_cast<int>(" + field_start_offset + ")";
    length = "static_cast<int>(" + length + ")";

    cc_contents << "  for (int i = 0; i < " << length << "; ++i) {\n";
5178
  } else {
5179 5180
    // Non-indexed fields have known offsets.
    field_start_offset = std::to_string(*f.offset);
5181 5182 5183
    cc_contents << "  {\n";
  }

5184
  if (auto struct_type = field_type->StructSupertype()) {
5185 5186 5187 5188 5189 5190 5191
    for (const Field& struct_field : (*struct_type)->fields()) {
      if (struct_field.name_and_type.type->IsSubtypeOf(
              TypeOracle::GetTaggedType())) {
        GenerateFieldValueVerifier(
            class_name, f.index.has_value(),
            field_start_offset + " + " + std::to_string(*struct_field.offset),
            struct_field, std::to_string((*struct_type)->PackedSize()),
5192
            cc_contents, f.name_and_type.name == "map");
5193
      }
5194 5195
    }
  } else {
5196 5197
    GenerateFieldValueVerifier(class_name, f.index.has_value(),
                               field_start_offset, f, "kTaggedSize",
5198
                               cc_contents, f.name_and_type.name == "map");
5199 5200
  }

5201 5202 5203 5204 5205 5206 5207
  cc_contents << "  }\n";
}

}  // namespace

void ImplementationVisitor::GenerateClassVerifiers(
    const std::string& output_directory) {
5208
  std::string file_name = "class-verifiers";
5209 5210
  std::stringstream h_contents;
  std::stringstream cc_contents;
5211 5212 5213 5214
  {
    IncludeGuardScope include_guard(h_contents, file_name + ".h");
    IfDefScope verify_heap_h(h_contents, "VERIFY_HEAP");
    IfDefScope verify_heap_cc(cc_contents, "VERIFY_HEAP");
5215

5216
    h_contents << "#include \"src/base/macros.h\"\n\n";
5217

5218 5219
    cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n\n";
    cc_contents << "#include \"src/objects/all-objects-inl.h\"\n";
5220

5221
    IncludeObjectMacrosScope object_macros(cc_contents);
5222

5223 5224
    NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
    NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});
5225

5226 5227 5228
    cc_contents
        << "#include \"torque-generated/test/torque/test-torque-tq-inl.inc\"\n";

5229 5230
    // Generate forward declarations to avoid including any headers.
    h_contents << "class Isolate;\n";
5231
    for (const ClassType* type : TypeOracle::GetClasses()) {
5232
      if (!type->ShouldGenerateVerify()) continue;
5233
      h_contents << "class " << type->name() << ";\n";
5234 5235
    }

5236 5237
    const char* verifier_class = "TorqueGeneratedClassVerifiers";

5238
    h_contents << "class V8_EXPORT_PRIVATE " << verifier_class << "{\n";
5239 5240
    h_contents << " public:\n";

5241
    for (const ClassType* type : TypeOracle::GetClasses()) {
5242
      std::string name = type->name();
5243
      if (!type->ShouldGenerateVerify()) continue;
5244

5245
      std::string method_name = name + "Verify";
5246

5247
      h_contents << "  static void " << method_name << "(" << name
5248
                 << " o, Isolate* isolate);\n";
5249

5250
      cc_contents << "void " << verifier_class << "::" << method_name << "("
5251
                  << name << " o, Isolate* isolate) {\n";
5252

5253 5254 5255 5256 5257 5258 5259 5260
      // First, do any verification for the super class. Not all classes have
      // verifiers, so skip to the nearest super class that has one.
      const ClassType* super_type = type->GetSuperClass();
      while (super_type && !super_type->ShouldGenerateVerify()) {
        super_type = super_type->GetSuperClass();
      }
      if (super_type) {
        std::string super_name = super_type->name();
5261
        cc_contents << "  o." << super_name << "Verify(isolate);\n";
5262
      }
5263

5264
      // Second, verify that this object is what it claims to be.
5265
      cc_contents << "  CHECK(o.Is" << name << "(isolate));\n";
5266

5267 5268
      // Third, verify its properties.
      for (auto f : type->fields()) {
5269
        GenerateClassFieldVerifier(name, *type, f, h_contents, cc_contents);
5270 5271 5272 5273
      }

      cc_contents << "}\n";
    }
5274

5275 5276
    h_contents << "};\n";
  }
5277 5278
  WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
  WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
5279 5280
}

5281 5282
void ImplementationVisitor::GenerateEnumVerifiers(
    const std::string& output_directory) {
5283
  std::string file_name = "enum-verifiers";
5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312
  std::stringstream cc_contents;
  {
    cc_contents << "#include \"src/compiler/code-assembler.h\"\n";
    for (const std::string& include_path : GlobalContext::CppIncludes()) {
      cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
    }
    cc_contents << "\n";

    NamespaceScope cc_namespaces(cc_contents, {"v8", "internal", ""});

    cc_contents << "class EnumVerifier {\n";
    for (const auto& desc : GlobalContext::Get().ast()->EnumDescriptions()) {
      cc_contents << "  // " << desc.name << " (" << desc.pos << ")\n";
      cc_contents << "  void VerifyEnum_" << desc.name << "("
                  << desc.constexpr_generates
                  << " x) {\n"
                     "    switch(x) {\n";
      for (const auto& entry : desc.entries) {
        cc_contents << "      case " << entry << ": break;\n";
      }
      if (desc.is_open) cc_contents << "      default: break;\n";
      cc_contents << "    }\n  }\n\n";
    }
    cc_contents << "};\n";
  }

  WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
}

5313 5314
void ImplementationVisitor::GenerateExportedMacrosAssembler(
    const std::string& output_directory) {
5315
  std::string file_name = "exported-macros-assembler";
5316 5317 5318 5319 5320 5321 5322
  std::stringstream h_contents;
  std::stringstream cc_contents;
  {
    IncludeGuardScope include_guard(h_contents, file_name + ".h");

    h_contents << "#include \"src/compiler/code-assembler.h\"\n";
    h_contents << "#include \"src/execution/frames.h\"\n";
5323
    h_contents << "#include \"torque-generated/csa-types.h\"\n";
5324 5325 5326

    for (const std::string& include_path : GlobalContext::CppIncludes()) {
      cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
5327
    }
5328
    cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
5329 5330 5331 5332 5333

    for (SourceId file : SourceFileMap::AllSources()) {
      cc_contents << "#include \"torque-generated/" +
                         SourceFileMap::PathFromV8RootWithoutExtension(file) +
                         "-tq-csa.h\"\n";
5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349
    }

    NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
    NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});

    h_contents << "class V8_EXPORT_PRIVATE "
                  "TorqueGeneratedExportedMacrosAssembler {\n"
               << " public:\n"
               << "  explicit TorqueGeneratedExportedMacrosAssembler"
                  "(compiler::CodeAssemblerState* state) : state_(state) {\n"
               << "    USE(state_);\n"
               << "  }\n";

    for (auto& declarable : GlobalContext::AllDeclarables()) {
      TorqueMacro* macro = TorqueMacro::DynamicCast(declarable.get());
      if (!(macro && macro->IsExportedToCSA())) continue;
5350
      CurrentSourcePosition::Scope position_activator(macro->Position());
5351

5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365
      cpp::Class assembler("TorqueGeneratedExportedMacrosAssembler");
      std::vector<std::string> generated_parameter_names;
      cpp::Function f = GenerateFunction(
          &assembler, macro->ReadableName(), macro->signature(),
          macro->parameter_names(), false, &generated_parameter_names);

      f.PrintDeclaration(h_contents);
      f.PrintDefinition(cc_contents, [&](std::ostream& stream) {
        stream << "return " << macro->ExternalName() << "(state_";
        for (const auto& name : generated_parameter_names) {
          stream << ", " << name;
        }
        stream << ");";
      });
5366 5367 5368 5369 5370 5371 5372 5373 5374 5375
    }

    h_contents << " private:\n"
               << "  compiler::CodeAssemblerState* state_;\n"
               << "};\n";
  }
  WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
  WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
}

5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393
namespace {

void CollectAllFields(const std::string& path, const Field& field,
                      std::vector<std::string>& result) {
  if (field.name_and_type.type->StructSupertype()) {
    std::string next_path = path + field.name_and_type.name + ".";
    const StructType* struct_type =
        StructType::DynamicCast(field.name_and_type.type);
    for (const auto& inner_field : struct_type->fields()) {
      CollectAllFields(next_path, inner_field, result);
    }
  } else {
    result.push_back(path + field.name_and_type.name);
  }
}

}  // namespace

5394 5395
void ImplementationVisitor::GenerateCSATypes(
    const std::string& output_directory) {
5396
  std::string file_name = "csa-types";
5397 5398 5399 5400 5401 5402 5403
  std::stringstream h_contents;
  {
    IncludeGuardScope include_guard(h_contents, file_name + ".h");
    h_contents << "#include \"src/compiler/code-assembler.h\"\n\n";

    NamespaceScope h_namespaces(h_contents, {"v8", "internal"});

5404 5405
    // Generates headers for all structs in a topologically-sorted order, since
    // TypeOracle keeps them in the order of their resolution
5406
    for (const auto& type : TypeOracle::GetAggregateTypes()) {
5407
      const StructType* struct_type = StructType::DynamicCast(type.get());
5408
      if (!struct_type) continue;
5409 5410
      h_contents << "struct " << struct_type->GetGeneratedTypeNameImpl()
                 << " {\n";
5411 5412 5413 5414 5415 5416
      for (auto& field : struct_type->fields()) {
        h_contents << "  " << field.name_and_type.type->GetGeneratedTypeName();
        h_contents << " " << field.name_and_type.name << ";\n";
      }
      h_contents << "\n  std::tuple<";
      bool first = true;
5417
      for (const Type* lowered_type : LowerType(struct_type)) {
5418 5419 5420 5421
        if (!first) {
          h_contents << ", ";
        }
        first = false;
5422
        h_contents << lowered_type->GetGeneratedTypeName();
5423
      }
5424
      std::vector<std::string> all_fields;
5425
      for (auto& field : struct_type->fields()) {
5426
        CollectAllFields("", field, all_fields);
5427
      }
5428 5429 5430
      h_contents << "> Flatten() const {\n"
                    "    return std::make_tuple(";
      PrintCommaSeparatedList(h_contents, all_fields);
5431 5432 5433 5434 5435 5436 5437 5438
      h_contents << ");\n";
      h_contents << "  }\n";
      h_contents << "};\n";
    }
  }
  WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
}

5439 5440 5441 5442 5443 5444 5445 5446 5447 5448
void ReportAllUnusedMacros() {
  for (const auto& declarable : GlobalContext::AllDeclarables()) {
    if (!declarable->IsMacro() || declarable->IsExternMacro()) continue;

    Macro* macro = Macro::cast(declarable.get());
    if (macro->IsUsed()) continue;

    if (macro->IsTorqueMacro() && TorqueMacro::cast(macro)->IsExportedToCSA()) {
      continue;
    }
5449 5450 5451 5452 5453 5454 5455 5456 5457 5458
    // TODO(gsps): Mark methods of generic structs used if they are used in any
    // instantiation
    if (Method* method = Method::DynamicCast(macro)) {
      if (StructType* struct_type =
              StructType::DynamicCast(method->aggregate_type())) {
        if (struct_type->GetSpecializedFrom().has_value()) {
          continue;
        }
      }
    }
5459 5460 5461 5462 5463

    std::vector<std::string> ignored_prefixes = {"Convert<", "Cast<",
                                                 "FromConstexpr<"};
    const std::string name = macro->ReadableName();
    const bool ignore =
5464
        StartsWithSingleUnderscore(name) ||
5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476
        std::any_of(ignored_prefixes.begin(), ignored_prefixes.end(),
                    [&name](const std::string& prefix) {
                      return StringStartsWith(name, prefix);
                    });

    if (!ignore) {
      Lint("Macro '", macro->ReadableName(), "' is never used.")
          .Position(macro->IdentifierPosition());
    }
  }
}

5477 5478 5479
}  // namespace torque
}  // namespace internal
}  // namespace v8