implementation-visitor.cc 209 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6
#include "src/torque/implementation-visitor.h"

7
#include <algorithm>
8
#include <iomanip>
9
#include <string>
10

11
#include "src/base/optional.h"
12
#include "src/common/globals.h"
13
#include "src/torque/cc-generator.h"
14
#include "src/torque/cfg.h"
15
#include "src/torque/constants.h"
16
#include "src/torque/cpp-builder.h"
17
#include "src/torque/csa-generator.h"
18
#include "src/torque/declaration-visitor.h"
19
#include "src/torque/global-context.h"
20
#include "src/torque/parameter-difference.h"
21
#include "src/torque/server-data.h"
22
#include "src/torque/source-positions.h"
23
#include "src/torque/type-inference.h"
24
#include "src/torque/type-visitor.h"
25
#include "src/torque/types.h"
26
#include "src/torque/utils.h"
27 28 29 30 31

namespace v8 {
namespace internal {
namespace torque {

32 33 34 35 36 37
// Sadly, 'using std::string_literals::operator""s;' is bugged in MSVC (see
// https://developercommunity.visualstudio.com/t/Incorrect-warning-when-using-standard-st/673948).
// TODO(nicohartmann@): Change to 'using std::string_literals::operator""s;'
// once this is fixed.
using namespace std::string_literals;  // NOLINT(build/namespaces)

38 39 40 41
namespace {
const char* BuiltinIncludesMarker = "// __BUILTIN_INCLUDES_MARKER__\n";
}  // namespace

42
VisitResult ImplementationVisitor::Visit(Expression* expr) {
43
  CurrentSourcePosition::Scope scope(expr->pos);
44 45 46 47 48 49 50
  switch (expr->kind) {
#define ENUM_ITEM(name)        \
  case AstNode::Kind::k##name: \
    return Visit(name::cast(expr));
    AST_EXPRESSION_NODE_KIND_LIST(ENUM_ITEM)
#undef ENUM_ITEM
    default:
51
      UNREACHABLE();
52 53 54
  }
}

55
const Type* ImplementationVisitor::Visit(Statement* stmt) {
56
  CurrentSourcePosition::Scope scope(stmt->pos);
57
  StackScope stack_scope(this);
58
  const Type* result;
59
  switch (stmt->kind) {
60 61 62 63
#define ENUM_ITEM(name)               \
  case AstNode::Kind::k##name:        \
    result = Visit(name::cast(stmt)); \
    break;
64 65 66
    AST_STATEMENT_NODE_KIND_LIST(ENUM_ITEM)
#undef ENUM_ITEM
    default:
67
      UNREACHABLE();
68
  }
69 70 71
  DCHECK_EQ(result == TypeOracle::GetNeverType(),
            assembler().CurrentBlockIsComplete());
  return result;
72 73
}

74 75 76 77 78
void ImplementationVisitor::BeginGeneratedFiles() {
  std::set<SourceId> contains_class_definitions;
  for (const ClassType* type : TypeOracle::GetClasses()) {
    if (type->GenerateCppClassDefinitions()) {
      contains_class_definitions.insert(type->AttributedToFile());
79
    }
80
  }
81

82 83
  for (SourceId source : SourceFileMap::AllSources()) {
    auto& streams = GlobalContext::GeneratedPerFile(source);
84 85
    // Output beginning of CSA .cc file.
    {
86
      cpp::File& file = streams.csa_cc;
87

88
      for (const std::string& include_path : GlobalContext::CppIncludes()) {
89
        file << "#include " << StringLiteralQuote(include_path) << "\n";
90
      }
91

92 93 94 95
      file << "// Required Builtins:\n";
      file << "#include \"torque-generated/" +
                  SourceFileMap::PathFromV8RootWithoutExtension(source) +
                  "-tq-csa.h\"\n";
96 97 98 99 100 101
      // Now that required include files are collected while generting the file,
      // we only know the full set at the end. Insert a marker here that is
      // replaced with the list of includes at the very end.
      // TODO(nicohartmann@): This is not the most beautiful way to do this,
      // replace once the cpp file builder is available, where this can be
      // handled easily.
102 103
      file << BuiltinIncludesMarker;
      file << "\n";
104

105
      streams.csa_cc.BeginNamespace("v8", "internal");
106 107 108
    }
    // Output beginning of CSA .h file.
    {
109
      cpp::File& file = streams.csa_header;
110
      std::string header_define =
111
          "V8_GEN_TORQUE_GENERATED_" +
112 113 114 115
          UnderlinifyPath(SourceFileMap::PathFromV8Root(source)) + "_CSA_H_";
      streams.csa_header.BeginIncludeGuard(header_define);
      file << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
      file << "\n";
116

117
      streams.csa_header.BeginNamespace("v8", "internal");
118 119 120
    }
    // Output beginning of class definition .cc file.
    {
121 122 123 124 125 126 127
      cpp::File& file = streams.class_definition_cc;
      if (contains_class_definitions.count(source) != 0) {
        file << "#include \""
             << SourceFileMap::PathFromV8RootWithoutExtension(source)
             << "-inl.h\"\n\n";
        file << "#include \"torque-generated/class-verifiers.h\"\n";
        file << "#include \"src/objects/instance-type-inl.h\"\n\n";
128 129
      }

130
      streams.class_definition_cc.BeginNamespace("v8", "internal");
131
    }
132
  }
133
}
134

135
void ImplementationVisitor::EndGeneratedFiles() {
136
  for (SourceId file : SourceFileMap::AllSources()) {
137
    auto& streams = GlobalContext::GeneratedPerFile(file);
138

139 140
    // Output ending of CSA .cc file.
    streams.csa_cc.EndNamespace("v8", "internal");
141

142 143
    // Output ending of CSA .h file.
    {
144
      std::string header_define =
145
          "V8_GEN_TORQUE_GENERATED_" +
146
          UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_CSA_H_";
147

148 149
      streams.csa_header.EndNamespace("v8", "internal");
      streams.csa_header.EndIncludeGuard(header_define);
150 151
    }

152 153
    // Output ending of class definition .cc file.
    streams.class_definition_cc.EndNamespace("v8", "internal");
154
  }
155 156
}

157
void ImplementationVisitor::BeginDebugMacrosFile() {
158
  // TODO(torque-builer): Can use builder for debug_macros_*_
159 160 161 162
  std::ostream& source = debug_macros_cc_;
  std::ostream& header = debug_macros_h_;

  source << "#include \"torque-generated/debug-macros.h\"\n\n";
163 164
  source << "#include \"src/objects/swiss-name-dictionary.h\"\n";
  source << "#include \"src/objects/ordered-hash-table.h\"\n";
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
  source << "#include \"tools/debug_helper/debug-macro-shims.h\"\n";
  source << "#include \"include/v8-internal.h\"\n";
  source << "\n";

  source << "namespace v8 {\n"
         << "namespace internal {\n"
         << "namespace debug_helper_internal {\n"
         << "\n";

  const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_";
  header << "#ifndef " << kHeaderDefine << "\n";
  header << "#define " << kHeaderDefine << "\n\n";
  header << "#include \"tools/debug_helper/debug-helper-internal.h\"\n";
  header << "\n";

  header << "namespace v8 {\n"
         << "namespace internal {\n"
182
         << "namespace debug_helper_internal {\n"
183 184 185 186
         << "\n";
}

void ImplementationVisitor::EndDebugMacrosFile() {
187
  // TODO(torque-builder): Can use builder for debug_macros_*_
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
  std::ostream& source = debug_macros_cc_;
  std::ostream& header = debug_macros_h_;

  source << "}  // namespace internal\n"
         << "}  // namespace v8\n"
         << "}  // namespace debug_helper_internal\n"
         << "\n";

  header << "\n}  // namespace internal\n"
         << "}  // namespace v8\n"
         << "}  // namespace debug_helper_internal\n"
         << "\n";
  header << "#endif  // V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_\n";
}

203
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
204 205
  Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(),
                      {}, false};
206

207 208
  BindingsManagersScope bindings_managers_scope;

209 210
  cpp::Function f =
      GenerateFunction(nullptr, decl->external_name(), signature, {});
211

212
  f.PrintDeclaration(csa_headerfile());
213

214 215
  f.PrintDefinition(csa_ccfile(), [&](std::ostream& stream) {
    stream << "  compiler::CodeAssembler ca_(state_);\n";
216

217
    DCHECK(!signature.return_type->IsVoidOrNever());
218

219
    assembler_ = CfgAssembler(Stack<const Type*>{});
220

221 222 223
    VisitResult expression_result = Visit(decl->body());
    VisitResult return_result =
        GenerateImplicitConvert(signature.return_type, expression_result);
224

225 226
    CSAGenerator csa_generator{assembler().Result(), stream};
    Stack<std::string> values = *csa_generator.EmitGraph(Stack<std::string>{});
227

228 229 230 231 232 233
    assembler_ = base::nullopt;

    stream << "  return ";
    CSAGenerator::EmitCSAValue(return_result, values, stream);
    stream << ";";
  });
234 235
}

236 237
void ImplementationVisitor::Visit(TypeAlias* alias) {
  if (alias->IsRedeclaration()) return;
238 239 240 241 242
  if (const ClassType* class_type = ClassType::DynamicCast(alias->type())) {
    if (class_type->IsExtern() && !class_type->nspace()->IsDefaultNamespace()) {
      Error(
          "extern classes are currently only supported in the default "
          "namespace");
243
    }
244
  }
245 246
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
class ImplementationVisitor::MacroInliningScope {
 public:
  MacroInliningScope(ImplementationVisitor* visitor, const Macro* macro)
      : visitor_(visitor), macro_(macro) {
    if (!visitor_->inlining_macros_.insert(macro).second) {
      // Recursive macro expansion would just keep going until stack overflow.
      // To avoid crashes, throw an error immediately.
      ReportError("Recursive macro call to ", *macro);
    }
  }
  ~MacroInliningScope() { visitor_->inlining_macros_.erase(macro_); }

 private:
  ImplementationVisitor* visitor_;
  const Macro* macro_;
};

264
VisitResult ImplementationVisitor::InlineMacro(
265 266
    Macro* macro, base::Optional<LocationReference> this_reference,
    const std::vector<VisitResult>& arguments,
267
    const std::vector<Block*> label_blocks) {
268
  MacroInliningScope macro_inlining_scope(this, macro);
269 270 271 272 273 274 275 276 277
  CurrentScope::Scope current_scope(macro);
  BindingsManagersScope bindings_managers_scope;
  CurrentCallable::Scope current_callable(macro);
  CurrentReturnValue::Scope current_return_value;
  const Signature& signature = macro->signature();
  const Type* return_type = macro->signature().return_type;
  bool can_return = return_type != TypeOracle::GetNeverType();

  BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
278 279 280 281 282
  BlockBindings<LocalLabel> label_bindings(&LabelBindingsManager::Get());
  DCHECK_EQ(macro->signature().parameter_names.size(),
            arguments.size() + (this_reference ? 1 : 0));
  DCHECK_EQ(this_reference.has_value(), macro->IsMethod());

283 284 285 286 287 288 289 290 291
  // Bind the this for methods. Methods that modify a struct-type "this" must
  // only be called if the this is in a variable, in which case the
  // LocalValue is non-const. Otherwise, the LocalValue used for the parameter
  // binding is const, and thus read-only, which will cause errors if
  // modified, e.g. when called by a struct method that sets the structs
  // fields. This prevents using temporary struct values for anything other
  // than read operations.
  if (this_reference) {
    DCHECK(macro->IsMethod());
292 293
    parameter_bindings.Add(kThisParameterName, LocalValue{*this_reference},
                           true);
294
  }
295

296 297 298
  size_t i = 0;
  for (auto arg : arguments) {
    if (this_reference && i == signature.implicit_count) i++;
299
    const bool mark_as_used = signature.implicit_count > i;
300
    const Identifier* name = macro->parameter_names()[i++];
301 302 303 304
    parameter_bindings.Add(name,
                           LocalValue{LocationReference::Temporary(
                               arg, "parameter " + name->value)},
                           mark_as_used);
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
  }

  DCHECK_EQ(label_blocks.size(), signature.labels.size());
  for (size_t i = 0; i < signature.labels.size(); ++i) {
    const LabelDeclaration& label_info = signature.labels[i];
    label_bindings.Add(label_info.name,
                       LocalLabel{label_blocks[i], label_info.types});
  }

  Block* macro_end;
  base::Optional<Binding<LocalLabel>> macro_end_binding;
  if (can_return) {
    Stack<const Type*> stack = assembler().CurrentStack();
    std::vector<const Type*> lowered_return_types = LowerType(return_type);
    stack.PushMany(lowered_return_types);
    if (!return_type->IsConstexpr()) {
      SetReturnValue(VisitResult(return_type,
                                 stack.TopRange(lowered_return_types.size())));
    }
324 325 326 327 328 329 330 331 332 333
    // The stack copy used to initialize the _macro_end block is only used
    // as a template for the actual gotos generated by return statements. It
    // doesn't correspond to any real return values, and thus shouldn't contain
    // top types, because these would pollute actual return value types that get
    // unioned with them for return statements, erroneously forcing them to top.
    for (auto i = stack.begin(); i != stack.end(); ++i) {
      if ((*i)->IsTopType()) {
        *i = TopType::cast(*i)->source_type();
      }
    }
334
    macro_end = assembler().NewBlock(std::move(stack));
335
    macro_end_binding.emplace(&LabelBindingsManager::Get(), kMacroEndLabelName,
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
                              LocalLabel{macro_end, {return_type}});
  } else {
    SetReturnValue(VisitResult::NeverResult());
  }

  const Type* result = Visit(*macro->body());

  if (result->IsNever()) {
    if (!return_type->IsNever() && !macro->HasReturns()) {
      std::stringstream s;
      s << "macro " << macro->ReadableName()
        << " that never returns must have return type never";
      ReportError(s.str());
    }
  } else {
    if (return_type->IsNever()) {
      std::stringstream s;
      s << "macro " << macro->ReadableName()
        << " has implicit return at end of its declartion but return type "
           "never";
      ReportError(s.str());
    } else if (!macro->signature().return_type->IsVoid()) {
      std::stringstream s;
      s << "macro " << macro->ReadableName()
        << " expects to return a value but doesn't on all paths";
      ReportError(s.str());
    }
  }
  if (!result->IsNever()) {
    assembler().Goto(macro_end);
  }

  if (macro->HasReturns() || !result->IsNever()) {
    assembler().Bind(macro_end);
  }

  return GetAndClearReturnValue();
}

375 376
void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
  CurrentCallable::Scope current_callable(macro);
377 378
  const Signature& signature = macro->signature();
  const Type* return_type = macro->signature().return_type;
379 380 381
  bool can_return = return_type != TypeOracle::GetNeverType();
  bool has_return_value =
      can_return && return_type != TypeOracle::GetVoidType();
382

383 384 385 386
  cpp::Function f = GenerateMacroFunctionDeclaration(macro);
  f.PrintDeclaration(csa_headerfile());

  cpp::File csa_cc(csa_ccfile());
387

388 389 390
  // Avoid multiple-definition errors since it is possible for multiple
  // generated -inl.inc files to all contain function definitions for the same
  // Torque macro.
391
  base::Optional<cpp::IncludeGuardScope> include_guard;
392
  if (output_type_ == OutputType::kCC) {
393
    include_guard.emplace(&csa_cc, "V8_INTERNAL_DEFINED_"s + macro->CCName());
394
  } else if (output_type_ == OutputType::kCCDebug) {
395 396
    include_guard.emplace(&csa_cc,
                          "V8_INTERNAL_DEFINED_"s + macro->CCDebugName());
397 398
  }

399
  f.PrintBeginDefinition(csa_ccfile());
400 401 402 403

  if (output_type_ == OutputType::kCC) {
    // For now, generated C++ is only for field offset computations. If we ever
    // generate C++ code that can allocate, then it should be handlified.
404
    csa_ccfile() << "  DisallowGarbageCollection no_gc;\n";
405
  } else if (output_type_ == OutputType::kCSA) {
406 407
    csa_ccfile() << "  compiler::CodeAssembler ca_(state_);\n";
    csa_ccfile()
408 409
        << "  compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
  }
410

411 412
  Stack<std::string> lowered_parameters;
  Stack<const Type*> lowered_parameter_types;
413

414
  std::vector<VisitResult> arguments;
415

416 417 418
  base::Optional<LocationReference> this_reference;
  if (Method* method = Method::DynamicCast(macro)) {
    const Type* this_type = method->aggregate_type();
419 420 421 422 423 424 425 426 427 428 429 430 431
    LowerParameter(this_type, ExternalParameterName(kThisParameterName),
                   &lowered_parameters);
    StackRange range = lowered_parameter_types.PushMany(LowerType(this_type));
    VisitResult this_result = VisitResult(this_type, range);
    // For classes, mark 'this' as a temporary to prevent assignment to it.
    // Note that using a VariableAccess for non-class types is technically
    // incorrect because changes to the 'this' variable do not get reflected
    // to the caller. Therefore struct methods should always be inlined and a
    // C++ version should never be generated, since it would be incorrect.
    // However, in order to be able to type- and semantics-check even unused
    // struct methods, set the this_reference to be the local variable copy of
    // the passed-in this, which allows the visitor to at least find and report
    // errors.
432
    this_reference =
433 434 435
        (this_type->IsClassType())
            ? LocationReference::Temporary(this_result, "this parameter")
            : LocationReference::VariableAccess(this_result);
436 437
  }

438 439
  for (size_t i = 0; i < macro->signature().parameter_names.size(); ++i) {
    if (this_reference && i == macro->signature().implicit_count) continue;
440
    const std::string& name = macro->parameter_names()[i]->value;
441
    std::string external_name = ExternalParameterName(name);
442
    const Type* type = macro->signature().types()[i];
443

444
    if (type->IsConstexpr()) {
445
      arguments.push_back(VisitResult(type, external_name));
446 447 448
    } else {
      LowerParameter(type, external_name, &lowered_parameters);
      StackRange range = lowered_parameter_types.PushMany(LowerType(type));
449
      arguments.push_back(VisitResult(type, range));
450
    }
451
  }
452

453 454
  DCHECK_EQ(lowered_parameters.Size(), lowered_parameter_types.Size());
  assembler_ = CfgAssembler(lowered_parameter_types);
455

456
  std::vector<Block*> label_blocks;
457 458 459 460
  for (const LabelDeclaration& label_info : signature.labels) {
    Stack<const Type*> label_input_stack;
    for (const Type* type : label_info.types) {
      label_input_stack.PushMany(LowerType(type));
461
    }
462
    Block* block = assembler().NewBlock(std::move(label_input_stack));
463
    label_blocks.push_back(block);
464
  }
465

466 467
  VisitResult return_value =
      InlineMacro(macro, this_reference, arguments, label_blocks);
468 469 470
  Block* end = assembler().NewBlock();
  if (return_type != TypeOracle::GetNeverType()) {
    assembler().Goto(end);
471
  }
472

473 474 475 476
  for (size_t i = 0; i < label_blocks.size(); ++i) {
    Block* label_block = label_blocks[i];
    const LabelDeclaration& label_info = signature.labels[i];
    assembler().Bind(label_block);
477
    std::vector<std::string> label_parameter_variables;
478
    for (size_t i = 0; i < label_info.types.size(); ++i) {
479
      LowerLabelParameter(label_info.types[i],
480
                          ExternalLabelParameterName(label_info.name->value, i),
481
                          &label_parameter_variables);
482
    }
483 484
    assembler().Emit(GotoExternalInstruction{
        ExternalLabelName(label_info.name->value), label_parameter_variables});
485
  }
486

487 488
  if (return_type != TypeOracle::GetNeverType()) {
    assembler().Bind(end);
489
  }
490

491 492
  base::Optional<Stack<std::string>> values;
  if (output_type_ == OutputType::kCC) {
493
    CCGenerator cc_generator{assembler().Result(), csa_ccfile()};
494
    values = cc_generator.EmitGraph(lowered_parameters);
495 496 497
  } else if (output_type_ == OutputType::kCCDebug) {
    CCGenerator cc_generator{assembler().Result(), csa_ccfile(), true};
    values = cc_generator.EmitGraph(lowered_parameters);
498
  } else {
499
    CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
500 501
    values = csa_generator.EmitGraph(lowered_parameters);
  }
502

503
  assembler_ = base::nullopt;
504

505
  if (has_return_value) {
506
    csa_ccfile() << "  return ";
507 508 509 510 511
    if (output_type_ == OutputType::kCCDebug) {
      csa_ccfile() << "{d::MemoryAccessResult::kOk, ";
      CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
      csa_ccfile() << "}";
    } else if (output_type_ == OutputType::kCC) {
512
      CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
513
    } else {
514
      CSAGenerator::EmitCSAValue(return_value, *values, csa_ccfile());
515
    }
516
    csa_ccfile() << ";\n";
517
  }
518 519 520
  f.PrintEndDefinition(csa_ccfile());

  include_guard.reset();
521
  csa_ccfile() << "\n";
522 523
}

524
void ImplementationVisitor::Visit(TorqueMacro* macro) {
525 526 527 528 529 530 531 532
  VisitMacroCommon(macro);
}

void ImplementationVisitor::Visit(Method* method) {
  DCHECK(!method->IsExternal());
  VisitMacroCommon(method);
}

533
namespace {
534

535
std::string AddParameter(size_t i, Builtin* builtin,
536
                         Stack<std::string>* parameters,
537
                         Stack<const Type*>* parameter_types,
538 539
                         BlockBindings<LocalValue>* parameter_bindings,
                         bool mark_as_used) {
540
  const Identifier* name = builtin->signature().parameter_names[i];
541
  const Type* type = builtin->signature().types()[i];
542 543 544
  std::string external_name = "parameter" + std::to_string(i);
  parameters->Push(external_name);
  StackRange range = parameter_types->PushMany(LowerType(type));
545 546 547 548 549
  parameter_bindings->Add(
      name,
      LocalValue{LocationReference::Temporary(VisitResult(type, range),
                                              "parameter " + name->value)},
      mark_as_used);
550
  return external_name;
551
}
552

553 554
}  // namespace

555 556 557
void ImplementationVisitor::Visit(Builtin* builtin) {
  if (builtin->IsExternal()) return;
  CurrentScope::Scope current_scope(builtin);
558 559 560
  CurrentCallable::Scope current_callable(builtin);
  CurrentReturnValue::Scope current_return_value;

561
  const std::string& name = builtin->ExternalName();
562
  const Signature& signature = builtin->signature();
563
  csa_ccfile() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
564 565 566
               << "  compiler::CodeAssemblerState* state_ = state();"
               << "  compiler::CodeAssembler ca_(state());\n";

567 568 569
  Stack<const Type*> parameter_types;
  Stack<std::string> parameters;

570 571 572 573
  BindingsManagersScope bindings_managers_scope;

  BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());

574 575 576 577 578 579 580
  if (builtin->IsVarArgsJavaScript() || builtin->IsFixedArgsJavaScript()) {
    if (builtin->IsVarArgsJavaScript()) {
      DCHECK(signature.parameter_types.var_args);
      if (signature.ExplicitCount() > 0) {
        Error("Cannot mix explicit parameters with varargs.")
            .Position(signature.parameter_names[signature.implicit_count]->pos);
      }
581

582
      csa_ccfile() << "   TNode<Word32T> argc = UncheckedParameter<Word32T>("
583
                   << "Descriptor::kJSActualArgumentsCount);\n";
584
      csa_ccfile() << "  TNode<IntPtrT> "
585 586
                      "arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
                      "Int32T>(argc)));\n";
587
      csa_ccfile() << "  TNode<RawPtrT> arguments_frame = "
588
                      "UncheckedCast<RawPtrT>(LoadFramePointer());\n";
589
      csa_ccfile() << "  TorqueStructArguments "
590 591
                      "torque_arguments(GetFrameArguments(arguments_frame, "
                      "arguments_length));\n";
592
      csa_ccfile()
593 594 595 596 597 598 599
          << "  CodeStubArguments arguments(this, torque_arguments);\n";

      parameters.Push("torque_arguments.frame");
      parameters.Push("torque_arguments.base");
      parameters.Push("torque_arguments.length");
      const Type* arguments_type = TypeOracle::GetArgumentsType();
      StackRange range = parameter_types.PushMany(LowerType(arguments_type));
600 601 602 603 604
      parameter_bindings.Add(*signature.arguments_variable,
                             LocalValue{LocationReference::Temporary(
                                 VisitResult(arguments_type, range),
                                 "parameter " + *signature.arguments_variable)},
                             true);
605 606 607 608 609 610 611 612
    }

    for (size_t i = 0; i < signature.implicit_count; ++i) {
      const std::string& param_name = signature.parameter_names[i]->value;
      SourcePosition param_pos = signature.parameter_names[i]->pos;
      std::string generated_name = AddParameter(
          i, builtin, &parameters, &parameter_types, &parameter_bindings, true);
      const Type* actual_type = signature.parameter_types.types[i];
613
      std::vector<const Type*> expected_types;
614
      if (param_name == "context") {
615
        csa_ccfile() << "  TNode<NativeContext> " << generated_name
616 617
                     << " = UncheckedParameter<NativeContext>("
                     << "Descriptor::kContext);\n";
618
        csa_ccfile() << "  USE(" << generated_name << ");\n";
619 620
        expected_types = {TypeOracle::GetNativeContextType(),
                          TypeOracle::GetContextType()};
621
      } else if (param_name == "receiver") {
622
        csa_ccfile()
623 624 625
            << "  TNode<Object> " << generated_name << " = "
            << (builtin->IsVarArgsJavaScript()
                    ? "arguments.GetReceiver()"
626
                    : "UncheckedParameter<Object>(Descriptor::kReceiver)")
627
            << ";\n";
628
        csa_ccfile() << "USE(" << generated_name << ");\n";
629
        expected_types = {TypeOracle::GetJSAnyType()};
630
      } else if (param_name == "newTarget") {
631
        csa_ccfile() << "  TNode<Object> " << generated_name
632 633
                     << " = UncheckedParameter<Object>("
                     << "Descriptor::kJSNewTarget);\n";
634
        csa_ccfile() << "USE(" << generated_name << ");\n";
635
        expected_types = {TypeOracle::GetJSAnyType()};
636
      } else if (param_name == "target") {
637
        csa_ccfile() << "  TNode<JSFunction> " << generated_name
638 639
                     << " = UncheckedParameter<JSFunction>("
                     << "Descriptor::kJSTarget);\n";
640
        csa_ccfile() << "USE(" << generated_name << ");\n";
641
        expected_types = {TypeOracle::GetJSFunctionType()};
642 643 644 645 646 647
      } else {
        Error(
            "Unexpected implicit parameter \"", param_name,
            "\" for JavaScript calling convention, "
            "expected \"context\", \"receiver\", \"target\", or \"newTarget\"")
            .Position(param_pos);
648
        expected_types = {actual_type};
649
      }
650 651
      if (std::find(expected_types.begin(), expected_types.end(),
                    actual_type) == expected_types.end()) {
652
        Error("According to JavaScript calling convention, expected parameter ",
653 654
              param_name, " to have type ", PrintList(expected_types, " or "),
              " but found type ", *actual_type)
655 656 657 658 659 660 661 662 663 664 665
            .Position(param_pos);
      }
    }

    for (size_t i = signature.implicit_count;
         i < signature.parameter_names.size(); ++i) {
      const std::string& parameter_name = signature.parameter_names[i]->value;
      const Type* type = signature.types()[i];
      const bool mark_as_used = signature.implicit_count > i;
      std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
                                     &parameter_bindings, mark_as_used);
666
      csa_ccfile() << "  " << type->GetGeneratedTypeName() << " " << var
667
                   << " = "
668 669 670
                   << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
                   << ">(Descriptor::k" << CamelifyString(parameter_name)
                   << ");\n";
671
      csa_ccfile() << "  USE(" << var << ");\n";
672
    }
673

674 675 676
  } else {
    DCHECK(builtin->IsStub());

677
    for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
678
      const std::string& parameter_name = signature.parameter_names[i]->value;
679 680 681 682
      const Type* type = signature.types()[i];
      const bool mark_as_used = signature.implicit_count > i;
      std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
                                     &parameter_bindings, mark_as_used);
683
      csa_ccfile() << "  " << type->GetGeneratedTypeName() << " " << var
684
                   << " = "
685
                   << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
686 687
                   << ">(Descriptor::k" << CamelifyString(parameter_name)
                   << ");\n";
688
      csa_ccfile() << "  USE(" << var << ");\n";
689 690
    }
  }
691
  assembler_ = CfgAssembler(parameter_types);
692
  const Type* body_result = Visit(*builtin->body());
693 694 695
  if (body_result != TypeOracle::GetNeverType()) {
    ReportError("control reaches end of builtin, expected return of a value");
  }
696
  CSAGenerator csa_generator{assembler().Result(), csa_ccfile(),
697 698 699
                             builtin->kind()};
  csa_generator.EmitGraph(parameters);
  assembler_ = base::nullopt;
700
  csa_ccfile() << "}\n\n";
701 702
}

703
const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
704 705 706 707 708 709 710 711 712 713 714
  BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
  return Visit(stmt, &block_bindings);
}

const Type* ImplementationVisitor::Visit(
    VarDeclarationStatement* stmt, BlockBindings<LocalValue>* block_bindings) {
  // const qualified variables are required to be initialized properly.
  if (stmt->const_qualified && !stmt->initializer) {
    ReportError("local constant \"", stmt->name, "\" is not initialized.");
  }

715
  base::Optional<const Type*> type;
716
  if (stmt->type) {
717
    type = TypeVisitor::ComputeType(*stmt->type);
718
  }
719 720
  base::Optional<VisitResult> init_result;
  if (stmt->initializer) {
721
    StackScope scope(this);
722
    init_result = Visit(*stmt->initializer);
723 724 725
    if (type) {
      init_result = GenerateImplicitConvert(*type, *init_result);
    }
726 727 728 729 730 731 732
    type = init_result->type();
    if ((*type)->IsConstexpr() && !stmt->const_qualified) {
      Error("Use 'const' instead of 'let' for variable '", stmt->name->value,
            "' of constexpr type '", (*type)->ToString(), "'.")
          .Position(stmt->name->pos)
          .Throw();
    }
733 734 735 736 737 738 739 740
    init_result = scope.Yield(*init_result);
  } else {
    DCHECK(type.has_value());
    if ((*type)->IsConstexpr()) {
      ReportError("constexpr variables need an initializer");
    }
    TypeVector lowered_types = LowerType(*type);
    for (const Type* type : lowered_types) {
741
      assembler().Emit(PushUninitializedInstruction{TypeOracle::GetTopType(
742
          "uninitialized variable '" + stmt->name->value + "' of type " +
743 744 745
              type->ToString() + " originally defined at " +
              PositionAsString(stmt->pos),
          type)});
746 747 748
    }
    init_result =
        VisitResult(*type, assembler().TopRange(lowered_types.size()));
749
  }
750 751 752 753 754
  LocationReference ref = stmt->const_qualified
                              ? LocationReference::Temporary(
                                    *init_result, "const " + stmt->name->value)
                              : LocationReference::VariableAccess(*init_result);
  block_bindings->Add(stmt->name, LocalValue{std::move(ref)});
755
  return TypeOracle::GetVoidType();
756 757
}

758
const Type* ImplementationVisitor::Visit(TailCallStatement* stmt) {
759 760 761 762
  return Visit(stmt->call, true).type();
}

VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
763 764
  Block* true_block = assembler().NewBlock(assembler().CurrentStack());
  Block* false_block = assembler().NewBlock(assembler().CurrentStack());
765 766
  Block* done_block = assembler().NewBlock();
  Block* true_conversion_block = assembler().NewBlock();
767
  GenerateExpressionBranch(expr->condition, true_block, false_block);
768

769 770
  VisitResult left;
  VisitResult right;
771 772

  {
773 774 775 776
    // The code for both paths of the conditional need to be generated first
    // before evaluating the conditional expression because the common type of
    // the result of both the true and false of the condition needs to be known
    // to convert both branches to a common type.
777
    assembler().Bind(true_block);
778 779 780
    StackScope left_scope(this);
    left = Visit(expr->if_true);
    assembler().Goto(true_conversion_block);
781

782 783
    const Type* common_type;
    {
784
      assembler().Bind(false_block);
785 786 787 788 789
      StackScope right_scope(this);
      right = Visit(expr->if_false);
      common_type = GetCommonType(left.type(), right.type());
      right = right_scope.Yield(GenerateImplicitConvert(common_type, right));
      assembler().Goto(done_block);
790 791
    }

792 793 794
    assembler().Bind(true_conversion_block);
    left = left_scope.Yield(GenerateImplicitConvert(common_type, left));
    assembler().Goto(done_block);
795
  }
796 797 798 799

  assembler().Bind(done_block);
  CHECK_EQ(left, right);
  return left;
800 801 802
}

VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
803 804
  StackScope outer_scope(this);
  VisitResult left_result = Visit(expr->left);
805

806
  if (left_result.type()->IsConstexprBool()) {
807 808 809 810 811 812
    VisitResult right_result = Visit(expr->right);
    if (!right_result.type()->IsConstexprBool()) {
      ReportError(
          "expected type constexpr bool on right-hand side of operator "
          "||");
    }
813 814 815
    return VisitResult(TypeOracle::GetConstexprBoolType(),
                       std::string("(") + left_result.constexpr_value() +
                           " || " + right_result.constexpr_value() + ")");
816
  }
817

818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
  Block* true_block = assembler().NewBlock();
  Block* false_block = assembler().NewBlock();
  Block* done_block = assembler().NewBlock();

  left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
  GenerateBranch(left_result, true_block, false_block);

  assembler().Bind(true_block);
  VisitResult true_result = GenerateBoolConstant(true);
  assembler().Goto(done_block);

  assembler().Bind(false_block);
  VisitResult false_result;
  {
    StackScope false_block_scope(this);
    false_result = false_block_scope.Yield(
        GenerateImplicitConvert(TypeOracle::GetBoolType(), Visit(expr->right)));
835
  }
836 837 838 839 840
  assembler().Goto(done_block);

  assembler().Bind(done_block);
  DCHECK_EQ(true_result, false_result);
  return outer_scope.Yield(true_result);
841 842 843
}

VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
844 845
  StackScope outer_scope(this);
  VisitResult left_result = Visit(expr->left);
846

847
  if (left_result.type()->IsConstexprBool()) {
848 849 850 851 852 853
    VisitResult right_result = Visit(expr->right);
    if (!right_result.type()->IsConstexprBool()) {
      ReportError(
          "expected type constexpr bool on right-hand side of operator "
          "&&");
    }
854 855 856
    return VisitResult(TypeOracle::GetConstexprBoolType(),
                       std::string("(") + left_result.constexpr_value() +
                           " && " + right_result.constexpr_value() + ")");
857
  }
858

859 860 861 862 863 864 865 866 867 868 869
  Block* true_block = assembler().NewBlock();
  Block* false_block = assembler().NewBlock();
  Block* done_block = assembler().NewBlock();

  left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
  GenerateBranch(left_result, true_block, false_block);

  assembler().Bind(true_block);
  VisitResult true_result;
  {
    StackScope true_block_scope(this);
870 871 872 873 874 875 876 877 878
    VisitResult right_result = Visit(expr->right);
    if (TryGetSourceForBitfieldExpression(expr->left) != nullptr &&
        TryGetSourceForBitfieldExpression(expr->right) != nullptr &&
        TryGetSourceForBitfieldExpression(expr->left)->value ==
            TryGetSourceForBitfieldExpression(expr->right)->value) {
      Lint(
          "Please use & rather than && when checking multiple bitfield "
          "values, to avoid complexity in generated code.");
    }
879
    true_result = true_block_scope.Yield(
880
        GenerateImplicitConvert(TypeOracle::GetBoolType(), right_result));
881
  }
882 883 884 885 886 887 888 889 890
  assembler().Goto(done_block);

  assembler().Bind(false_block);
  VisitResult false_result = GenerateBoolConstant(false);
  assembler().Goto(done_block);

  assembler().Bind(done_block);
  DCHECK_EQ(true_result, false_result);
  return outer_scope.Yield(true_result);
891 892 893
}

VisitResult ImplementationVisitor::Visit(IncrementDecrementExpression* expr) {
894 895 896
  StackScope scope(this);
  LocationReference location_ref = GetLocationReference(expr->location);
  VisitResult current_value = GenerateFetchFromLocation(location_ref);
897
  VisitResult one = {TypeOracle::GetConstInt31Type(), "1"};
898 899
  Arguments args;
  args.parameters = {current_value, one};
900
  VisitResult assignment_value = GenerateCall(
901
      expr->op == IncrementDecrementOperator::kIncrement ? "+" : "-", args);
902 903
  GenerateAssignToLocation(location_ref, assignment_value);
  return scope.Yield(expr->postfix ? current_value : assignment_value);
904 905 906
}

VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
907
  StackScope scope(this);
908 909 910
  LocationReference location_ref = GetLocationReference(expr->location);
  VisitResult assignment_value;
  if (expr->op) {
911
    VisitResult location_value = GenerateFetchFromLocation(location_ref);
912 913
    assignment_value = Visit(expr->value);
    Arguments args;
914
    args.parameters = {location_value, assignment_value};
915
    assignment_value = GenerateCall(*expr->op, args);
916
    GenerateAssignToLocation(location_ref, assignment_value);
917 918
  } else {
    assignment_value = Visit(expr->value);
919
    GenerateAssignToLocation(location_ref, assignment_value);
920
  }
921
  return scope.Yield(assignment_value);
922 923 924
}

VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
925
  const Type* result_type = TypeOracle::GetConstFloat64Type();
926 927 928 929 930 931 932 933 934
  if (expr->number >= std::numeric_limits<int32_t>::min() &&
      expr->number <= std::numeric_limits<int32_t>::max()) {
    int32_t i = static_cast<int32_t>(expr->number);
    if (i == expr->number) {
      if ((i >> 30) == (i >> 31)) {
        result_type = TypeOracle::GetConstInt31Type();
      } else {
        result_type = TypeOracle::GetConstInt32Type();
      }
935 936
    }
  }
937 938 939 940
  std::stringstream str;
  str << std::setprecision(std::numeric_limits<double>::digits10 + 1)
      << expr->number;
  return VisitResult{result_type, str.str()};
941 942
}

943 944
VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
  VisitResult result = Visit(expr->expression);
945 946
  const Type* result_type = SubtractType(
      result.type(), TypeVisitor::ComputeType(expr->excluded_type));
947 948 949
  if (result_type->IsNever()) {
    ReportError("unreachable code");
  }
950 951 952 953
  CHECK_EQ(LowerType(result_type), TypeVector{result_type});
  assembler().Emit(UnsafeCastInstruction{result_type});
  result.SetType(result_type);
  return result;
954 955
}

956
VisitResult ImplementationVisitor::Visit(StringLiteralExpression* expr) {
957 958 959
  return VisitResult{
      TypeOracle::GetConstStringType(),
      "\"" + expr->literal.substr(1, expr->literal.size() - 2) + "\""};
960 961
}

962
VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
963
  if (builtin->IsExternal() || builtin->kind() != Builtin::kStub) {
964 965 966
    ReportError(
        "creating function pointers is only allowed for internal builtins with "
        "stub linkage");
967
  }
968
  const Type* type = TypeOracle::GetBuiltinPointerType(
969
      builtin->signature().parameter_types.types,
970
      builtin->signature().return_type);
971 972
  assembler().Emit(
      PushBuiltinPointerInstruction{builtin->ExternalName(), type});
973
  return VisitResult(type, assembler().TopRange(1));
974 975
}

976
VisitResult ImplementationVisitor::Visit(LocationExpression* expr) {
977 978
  StackScope scope(this);
  return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
979 980
}

981 982 983 984 985 986 987 988 989 990 991
VisitResult ImplementationVisitor::Visit(FieldAccessExpression* expr) {
  StackScope scope(this);
  LocationReference location = GetLocationReference(expr);
  if (location.IsBitFieldAccess()) {
    if (auto* identifier = IdentifierExpression::DynamicCast(expr->object)) {
      bitfield_expressions_[expr] = identifier->name;
    }
  }
  return scope.Yield(GenerateFetchFromLocation(location));
}

992
const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
993
  Binding<LocalLabel>* label = LookupLabel(stmt->label->value);
994 995 996 997
  size_t parameter_count = label->parameter_types.size();
  if (stmt->arguments.size() != parameter_count) {
    ReportError("goto to label has incorrect number of parameters (expected ",
                parameter_count, " found ", stmt->arguments.size(), ")");
998 999
  }

1000 1001 1002 1003 1004
  if (GlobalContext::collect_language_server_data()) {
    LanguageServerData::AddDefinition(stmt->label->pos,
                                      label->declaration_position());
  }

1005
  size_t i = 0;
1006
  StackRange arguments = assembler().TopRange(0);
1007
  for (Expression* e : stmt->arguments) {
1008
    StackScope scope(this);
1009
    VisitResult result = Visit(e);
1010 1011
    const Type* parameter_type = label->parameter_types[i++];
    result = GenerateImplicitConvert(parameter_type, result);
1012
    arguments.Extend(scope.Yield(result).stack_range());
1013 1014
  }

1015
  assembler().Goto(label->block, arguments.Size());
1016
  return TypeOracle::GetNeverType();
1017 1018
}

1019
const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
1020 1021
  bool has_else = stmt->if_false.has_value();

1022 1023
  if (stmt->is_constexpr) {
    VisitResult expression_result = Visit(stmt->condition);
1024

1025
    if (!(expression_result.type() == TypeOracle::GetConstexprBoolType())) {
1026
      std::stringstream stream;
1027
      stream << "expression should return type constexpr bool "
1028
             << "but returns type " << *expression_result.type();
1029 1030 1031
      ReportError(stream.str());
    }

1032 1033 1034 1035 1036 1037 1038 1039
    Block* true_block = assembler().NewBlock();
    Block* false_block = assembler().NewBlock();
    Block* done_block = assembler().NewBlock();

    assembler().Emit(ConstexprBranchInstruction{
        expression_result.constexpr_value(), true_block, false_block});

    assembler().Bind(true_block);
1040
    const Type* left_result = Visit(stmt->if_true);
1041 1042 1043
    if (left_result == TypeOracle::GetVoidType()) {
      assembler().Goto(done_block);
    }
1044

1045 1046
    assembler().Bind(false_block);
    const Type* right_result = TypeOracle::GetVoidType();
1047
    if (has_else) {
1048 1049
      right_result = Visit(*stmt->if_false);
    }
1050 1051 1052 1053
    if (right_result == TypeOracle::GetVoidType()) {
      assembler().Goto(done_block);
    }

1054 1055 1056 1057 1058 1059
    if (left_result->IsNever() != right_result->IsNever()) {
      std::stringstream stream;
      stream << "either both or neither branches in a constexpr if statement "
                "must reach their end at"
             << PositionAsString(stmt->pos);
      ReportError(stream.str());
1060 1061
    }

1062 1063 1064
    if (left_result != TypeOracle::GetNeverType()) {
      assembler().Bind(done_block);
    }
1065
    return left_result;
1066
  } else {
1067 1068 1069 1070 1071 1072
    Block* true_block = assembler().NewBlock(assembler().CurrentStack(),
                                             IsDeferred(stmt->if_true));
    Block* false_block =
        assembler().NewBlock(assembler().CurrentStack(),
                             stmt->if_false && IsDeferred(*stmt->if_false));
    GenerateExpressionBranch(stmt->condition, true_block, false_block);
1073

1074
    Block* done_block;
1075 1076
    bool live = false;
    if (has_else) {
1077
      done_block = assembler().NewBlock();
1078
    } else {
1079
      done_block = false_block;
1080 1081
      live = true;
    }
1082 1083 1084 1085 1086 1087 1088 1089

    assembler().Bind(true_block);
    {
      const Type* result = Visit(stmt->if_true);
      if (result == TypeOracle::GetVoidType()) {
        live = true;
        assembler().Goto(done_block);
      }
1090
    }
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100

    if (has_else) {
      assembler().Bind(false_block);
      const Type* result = Visit(*stmt->if_false);
      if (result == TypeOracle::GetVoidType()) {
        live = true;
        assembler().Goto(done_block);
      }
    }

1101
    if (live) {
1102
      assembler().Bind(done_block);
1103
    }
1104
    return live ? TypeOracle::GetVoidType() : TypeOracle::GetNeverType();
1105 1106 1107
  }
}

1108
const Type* ImplementationVisitor::Visit(WhileStatement* stmt) {
1109 1110
  Block* body_block = assembler().NewBlock(assembler().CurrentStack());
  Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
1111

1112 1113 1114 1115
  Block* header_block = assembler().NewBlock();
  assembler().Goto(header_block);

  assembler().Bind(header_block);
1116
  GenerateExpressionBranch(stmt->condition, body_block, exit_block);
1117

1118 1119 1120 1121 1122 1123 1124 1125
  assembler().Bind(body_block);
  {
    BreakContinueActivator activator{exit_block, header_block};
    const Type* body_result = Visit(stmt->body);
    if (body_result != TypeOracle::GetNeverType()) {
      assembler().Goto(header_block);
    }
  }
1126

1127
  assembler().Bind(exit_block);
1128
  return TypeOracle::GetVoidType();
1129 1130
}

1131
const Type* ImplementationVisitor::Visit(BlockStatement* block) {
1132
  BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
1133
  const Type* type = TypeOracle::GetVoidType();
1134
  for (Statement* s : block->statements) {
1135
    CurrentSourcePosition::Scope source_position(s->pos);
1136
    if (type->IsNever()) {
1137 1138 1139 1140 1141 1142
      ReportError("statement after non-returning statement");
    }
    if (auto* var_declaration = VarDeclarationStatement::DynamicCast(s)) {
      type = Visit(var_declaration, &block_bindings);
    } else {
      type = Visit(s);
1143 1144 1145 1146 1147
    }
  }
  return type;
}

1148
const Type* ImplementationVisitor::Visit(DebugStatement* stmt) {
1149
#if defined(DEBUG)
1150 1151 1152
  assembler().Emit(PrintConstantStringInstruction{"halting because of '" +
                                                  stmt->reason + "' at " +
                                                  PositionAsString(stmt->pos)});
1153
#endif
1154 1155 1156
  assembler().Emit(AbortInstruction{stmt->never_continues
                                        ? AbortInstruction::Kind::kUnreachable
                                        : AbortInstruction::Kind::kDebugBreak});
1157
  if (stmt->never_continues) {
1158
    return TypeOracle::GetNeverType();
1159
  } else {
1160
    return TypeOracle::GetVoidType();
1161 1162 1163
  }
}

1164 1165 1166 1167 1168
namespace {

std::string FormatAssertSource(const std::string& str) {
  // Replace all whitespace characters with a space character.
  std::string str_no_newlines = str;
1169 1170 1171
  std::replace_if(
      str_no_newlines.begin(), str_no_newlines.end(),
      [](unsigned char c) { return isspace(c); }, ' ');
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

  // str might include indentation, squash multiple space characters into one.
  std::string result;
  std::unique_copy(str_no_newlines.begin(), str_no_newlines.end(),
                   std::back_inserter(result),
                   [](char a, char b) { return a == ' ' && b == ' '; });
  return result;
}

}  // namespace

1183
const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
  if (stmt->kind == AssertStatement::AssertKind::kStaticAssert) {
    std::string message =
        "static_assert(" + stmt->source + ") at " + ToString(stmt->pos);
    GenerateCall(QualifiedName({"", TORQUE_INTERNAL_NAMESPACE_STRING},
                               STATIC_ASSERT_MACRO_STRING),
                 Arguments{{Visit(stmt->expression),
                            VisitResult(TypeOracle::GetConstexprStringType(),
                                        StringLiteralQuote(message))},
                           {}});
    return TypeOracle::GetVoidType();
  }
  bool do_check = stmt->kind != AssertStatement::AssertKind::kAssert ||
                  GlobalContext::force_assert_statements();
1197
#if defined(DEBUG)
1198 1199
  do_check = true;
#endif
1200
  Block* resume_block;
1201

1202 1203 1204 1205 1206 1207
  if (!do_check) {
    Block* unreachable_block = assembler().NewBlock(assembler().CurrentStack());
    resume_block = assembler().NewBlock(assembler().CurrentStack());
    assembler().Goto(resume_block);
    assembler().Bind(unreachable_block);
  }
1208

1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
  // CSA_ASSERT & co. are not used here on purpose for two reasons. First,
  // Torque allows and handles two types of expressions in the if protocol
  // automagically, ones that return TNode<BoolT> and those that use the
  // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
  // handle this is embedded in the expression handling and to it's not
  // possible to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH
  // isn't trivial up-front. Secondly, on failure, the assert text should be
  // the corresponding Torque code, not the -gen.cc code, which would be the
  // case when using CSA_ASSERT_XXX.
  Block* true_block = assembler().NewBlock(assembler().CurrentStack());
  Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
  GenerateExpressionBranch(stmt->expression, true_block, false_block);
1221

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
  assembler().Bind(false_block);

  assembler().Emit(AbortInstruction{
      AbortInstruction::Kind::kAssertionFailure,
      "Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});

  assembler().Bind(true_block);

  if (!do_check) {
    assembler().Bind(resume_block);
1232
  }
1233

1234
  return TypeOracle::GetVoidType();
1235 1236
}

1237 1238
const Type* ImplementationVisitor::Visit(ExpressionStatement* stmt) {
  const Type* type = Visit(stmt->expression).type();
1239
  return type->IsNever() ? type : TypeOracle::GetVoidType();
1240 1241
}

1242
const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
1243
  Callable* current_callable = CurrentCallable::Get();
1244
  if (current_callable->signature().return_type->IsNever()) {
1245
    std::stringstream s;
1246
    s << "cannot return from a function with return type never";
1247 1248
    ReportError(s.str());
  }
1249
  LocalLabel* end =
1250
      current_callable->IsMacro() ? LookupLabel(kMacroEndLabelName) : nullptr;
1251 1252 1253 1254
  if (current_callable->HasReturnValue()) {
    if (!stmt->value) {
      std::stringstream s;
      s << "return expression needs to be specified for a return type of "
1255
        << *current_callable->signature().return_type;
1256 1257 1258 1259
      ReportError(s.str());
    }
    VisitResult expression_result = Visit(*stmt->value);
    VisitResult return_result = GenerateImplicitConvert(
1260
        current_callable->signature().return_type, expression_result);
1261
    if (current_callable->IsMacro()) {
1262 1263 1264 1265
      if (return_result.IsOnStack()) {
        StackRange return_value_range =
            GenerateLabelGoto(end, return_result.stack_range());
        SetReturnValue(VisitResult(return_result.type(), return_value_range));
1266
      } else {
1267 1268
        GenerateLabelGoto(end);
        SetReturnValue(return_result);
1269
      }
1270
    } else if (current_callable->IsBuiltin()) {
1271 1272
      assembler().Emit(ReturnInstruction{
          LoweredSlotCount(current_callable->signature().return_type)});
1273 1274 1275 1276 1277 1278 1279
    } else {
      UNREACHABLE();
    }
  } else {
    if (stmt->value) {
      std::stringstream s;
      s << "return expression can't be specified for a void or never return "
1280
           "type";
1281 1282 1283 1284 1285
      ReportError(s.str());
    }
    GenerateLabelGoto(end);
  }
  current_callable->IncrementReturns();
1286
  return TypeOracle::GetNeverType();
1287 1288
}

1289
VisitResult ImplementationVisitor::Visit(TryLabelExpression* expr) {
1290 1291 1292 1293
  size_t parameter_count = expr->label_block->parameters.names.size();
  std::vector<VisitResult> parameters;

  Block* label_block = nullptr;
1294
  Block* done_block = assembler().NewBlock();
1295
  VisitResult try_result;
1296 1297

  {
1298 1299 1300 1301 1302 1303 1304 1305
    CurrentSourcePosition::Scope source_position(expr->label_block->pos);
    if (expr->label_block->parameters.has_varargs) {
      ReportError("cannot use ... for label parameters");
    }
    Stack<const Type*> label_input_stack = assembler().CurrentStack();
    TypeVector parameter_types;
    for (size_t i = 0; i < parameter_count; ++i) {
      const Type* type =
1306
          TypeVisitor::ComputeType(expr->label_block->parameters.types[i]);
1307 1308 1309
      parameter_types.push_back(type);
      if (type->IsConstexpr()) {
        ReportError("no constexpr type allowed for label arguments");
1310
      }
1311 1312
      StackRange range = label_input_stack.PushMany(LowerType(type));
      parameters.push_back(VisitResult(type, range));
1313
    }
1314 1315 1316 1317 1318 1319
    label_block = assembler().NewBlock(label_input_stack,
                                       IsDeferred(expr->label_block->body));

    Binding<LocalLabel> label_binding{&LabelBindingsManager::Get(),
                                      expr->label_block->label,
                                      LocalLabel{label_block, parameter_types}};
1320 1321

    // Visit try
1322 1323 1324 1325 1326
    StackScope stack_scope(this);
    try_result = Visit(expr->try_expression);
    if (try_result.type() != TypeOracle::GetNeverType()) {
      try_result = stack_scope.Yield(try_result);
      assembler().Goto(done_block);
1327 1328 1329
    }
  }

1330 1331 1332 1333 1334 1335 1336 1337 1338
  // Visit and output the code for the label block. If the label block falls
  // through, then the try must not return a value. Also, if the try doesn't
  // fall through, but the label does, then overall the try-label block
  // returns type void.
  assembler().Bind(label_block);
  const Type* label_result;
  {
    BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
    for (size_t i = 0; i < parameter_count; ++i) {
1339 1340 1341 1342
      Identifier* name = expr->label_block->parameters.names[i];
      parameter_bindings.Add(name,
                             LocalValue{LocationReference::Temporary(
                                 parameters[i], "parameter " + name->value)});
1343
    }
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356

    label_result = Visit(expr->label_block->body);
  }
  if (!try_result.type()->IsVoidOrNever() && label_result->IsVoid()) {
    ReportError(
        "otherwise clauses cannot fall through in a non-void expression");
  }
  if (label_result != TypeOracle::GetNeverType()) {
    assembler().Goto(done_block);
  }
  if (label_result->IsVoid() && try_result.type()->IsNever()) {
    try_result =
        VisitResult(TypeOracle::GetVoidType(), try_result.stack_range());
1357 1358
  }

1359
  if (!try_result.type()->IsNever()) {
1360
    assembler().Bind(done_block);
1361 1362 1363 1364
  }
  return try_result;
}

1365 1366 1367 1368
VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
  return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
}

1369
InitializerResults ImplementationVisitor::VisitInitializerResults(
1370
    const ClassType* class_type,
1371
    const std::vector<NameAndExpression>& initializers) {
1372
  InitializerResults result;
1373 1374
  for (const NameAndExpression& initializer : initializers) {
    result.names.push_back(initializer.name);
1375
    Expression* e = initializer.expression;
1376
    const Field& field = class_type->LookupField(initializer.name->value);
1377
    bool has_index = field.index.has_value();
1378
    if (SpreadExpression* s = SpreadExpression::DynamicCast(e)) {
1379
      if (!has_index) {
1380 1381 1382 1383 1384 1385
        ReportError(
            "spread expressions can only be used to initialize indexed class "
            "fields ('",
            initializer.name->value, "' is not)");
      }
      e = s->spreadee;
1386
    } else if (has_index) {
1387 1388 1389 1390
      ReportError("the indexed class field '", initializer.name->value,
                  "' must be initialized with a spread operator");
    }
    result.field_value_map[field.name_and_type.name] = Visit(e);
1391 1392 1393 1394
  }
  return result;
}

1395
LocationReference ImplementationVisitor::GenerateFieldReference(
1396 1397
    VisitResult object, const Field& field, const ClassType* class_type,
    bool treat_optional_as_indexed) {
1398
  if (field.index.has_value()) {
1399
    LocationReference slice = LocationReference::HeapSlice(
1400
        GenerateCall(class_type->GetSliceMacroName(field), {{object}, {}}));
1401 1402 1403 1404 1405 1406 1407 1408
    if (field.index->optional && !treat_optional_as_indexed) {
      // This field was declared using optional syntax, so any reference to it
      // is implicitly a reference to the first item.
      return GenerateReferenceToItemInHeapSlice(
          slice, {TypeOracle::GetConstInt31Type(), "0"});
    } else {
      return slice;
    }
1409 1410
  }
  DCHECK(field.offset.has_value());
1411 1412
  StackRange result_range = assembler().TopRange(0);
  result_range.Extend(GenerateCopy(object).stack_range());
1413 1414 1415
  VisitResult offset =
      VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
  offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
1416
  result_range.Extend(offset.stack_range());
1417 1418 1419
  const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
                                                  field.const_qualified);
  return LocationReference::HeapReference(VisitResult(type, result_range));
1420 1421 1422 1423
}

// This is used to generate field references during initialization, where we can
// re-use the offsets used for computing the allocation size.
1424
LocationReference ImplementationVisitor::GenerateFieldReferenceForInit(
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
    VisitResult object, const Field& field,
    const LayoutForInitialization& layout) {
  StackRange result_range = assembler().TopRange(0);
  result_range.Extend(GenerateCopy(object).stack_range());
  VisitResult offset = GenerateImplicitConvert(
      TypeOracle::GetIntPtrType(), layout.offsets.at(field.name_and_type.name));
  result_range.Extend(offset.stack_range());
  if (field.index) {
    VisitResult length =
        GenerateCopy(layout.array_lengths.at(field.name_and_type.name));
    result_range.Extend(length.stack_range());
1436 1437
    const Type* slice_type =
        TypeOracle::GetMutableSliceType(field.name_and_type.type);
1438 1439
    return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
  } else {
1440
    // Const fields are writable during initialization.
1441
    VisitResult heap_reference(
1442 1443
        TypeOracle::GetMutableReferenceType(field.name_and_type.type),
        result_range);
1444 1445
    return LocationReference::HeapReference(heap_reference);
  }
1446 1447
}

1448 1449
void ImplementationVisitor::InitializeClass(
    const ClassType* class_type, VisitResult allocate_result,
1450 1451
    const InitializerResults& initializer_results,
    const LayoutForInitialization& layout) {
1452
  if (const ClassType* super = class_type->GetSuperClass()) {
1453
    InitializeClass(super, allocate_result, initializer_results, layout);
1454 1455
  }

1456
  for (Field f : class_type->fields()) {
1457
    VisitResult initializer_value =
1458
        initializer_results.field_value_map.at(f.name_and_type.name);
1459
    LocationReference field =
1460
        GenerateFieldReferenceForInit(allocate_result, f, layout);
1461
    if (f.index) {
1462 1463 1464 1465 1466
      DCHECK(field.IsHeapSlice());
      VisitResult slice = field.GetVisitResult();
      GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                                 "InitializeFieldsFromIterator"),
                   {{slice, initializer_value}, {}});
1467
    } else {
1468
      GenerateAssignToLocation(field, initializer_value);
1469 1470 1471 1472
    }
  }
}

1473 1474
VisitResult ImplementationVisitor::GenerateArrayLength(
    Expression* array_length, Namespace* nspace,
1475
    const std::map<std::string, LocalValue>& bindings) {
1476
  StackScope stack_scope(this);
1477
  CurrentSourcePosition::Scope pos_scope(array_length->pos);
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
  // Switch to the namespace where the class was declared.
  CurrentScope::Scope current_scope_scope(nspace);
  // Reset local bindings and install local binding for the preceding fields.
  BindingsManagersScope bindings_managers_scope;
  BlockBindings<LocalValue> field_bindings(&ValueBindingsManager::Get());
  for (auto& p : bindings) {
    field_bindings.Add(p.first, LocalValue{p.second}, true);
  }
  VisitResult length = Visit(array_length);
  VisitResult converted_length =
      GenerateCall("Convert", Arguments{{length}, {}},
                   {TypeOracle::GetIntPtrType(), length.type()}, false);
  return stack_scope.Yield(converted_length);
}

1493 1494 1495 1496 1497
VisitResult ImplementationVisitor::GenerateArrayLength(VisitResult object,
                                                       const Field& field) {
  DCHECK(field.index);

  StackScope stack_scope(this);
1498
  const ClassType* class_type = *object.type()->ClassSupertype();
1499
  std::map<std::string, LocalValue> bindings;
1500
  bool before_current = true;
1501
  for (Field f : class_type->ComputeAllFields()) {
1502 1503 1504
    if (field.name_and_type.name == f.name_and_type.name) {
      before_current = false;
    }
1505 1506 1507 1508 1509
    // We can't generate field references eagerly here, because some preceding
    // fields might be optional, and attempting to get a reference to an
    // optional field can crash the program if the field isn't present.
    // Instead, we use the lazy form of LocalValue to only generate field
    // references if they are used in the length expression.
1510
    bindings.insert(
1511 1512
        {f.name_and_type.name,
         f.const_qualified
1513
             ? (before_current
1514 1515 1516
                    ? LocalValue{[=]() {
                        return GenerateFieldReference(object, f, class_type);
                      }}
1517 1518
                    : LocalValue("Array lengths may only refer to fields "
                                 "defined earlier"))
1519 1520
             : LocalValue(
                   "Non-const fields cannot be used for array lengths.")});
1521 1522
  }
  return stack_scope.Yield(
1523
      GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
1524 1525 1526 1527 1528 1529
}

VisitResult ImplementationVisitor::GenerateArrayLength(
    const ClassType* class_type, const InitializerResults& initializer_results,
    const Field& field) {
  DCHECK(field.index);
1530

1531
  StackScope stack_scope(this);
1532
  std::map<std::string, LocalValue> bindings;
1533 1534 1535 1536
  for (Field f : class_type->ComputeAllFields()) {
    if (f.index) break;
    const std::string& fieldname = f.name_and_type.name;
    VisitResult value = initializer_results.field_value_map.at(fieldname);
1537 1538 1539 1540 1541 1542 1543
    bindings.insert(
        {fieldname,
         f.const_qualified
             ? LocalValue{LocationReference::Temporary(
                   value, "initial field " + fieldname)}
             : LocalValue(
                   "Non-const fields cannot be used for array lengths.")});
1544 1545
  }
  return stack_scope.Yield(
1546
      GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
1547 1548
}

1549
LayoutForInitialization ImplementationVisitor::GenerateLayoutForInitialization(
1550
    const ClassType* class_type,
1551
    const InitializerResults& initializer_results) {
1552 1553
  LayoutForInitialization layout;
  VisitResult offset;
1554
  for (Field f : class_type->ComputeAllFields()) {
1555 1556 1557 1558 1559
    if (f.offset.has_value()) {
      offset =
          VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
    }
    layout.offsets[f.name_and_type.name] = offset;
1560 1561 1562 1563 1564 1565 1566
    if (f.index) {
      size_t element_size;
      std::string element_size_string;
      std::tie(element_size, element_size_string) =
          *SizeOf(f.name_and_type.type);
      VisitResult array_element_size =
          VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
1567 1568 1569
      VisitResult array_length =
          GenerateArrayLength(class_type, initializer_results, f);
      layout.array_lengths[f.name_and_type.name] = array_length;
1570
      Arguments arguments;
1571 1572 1573 1574 1575 1576
      arguments.parameters = {offset, array_length, array_element_size};
      offset = GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                                          "AddIndexedFieldSizeToObjectSize"),
                            arguments);
    } else {
      DCHECK(f.offset.has_value());
1577 1578
    }
  }
1579 1580 1581 1582 1583 1584 1585 1586
  if (class_type->size().SingleValue()) {
    layout.size = VisitResult(TypeOracle::GetConstInt31Type(),
                              ToString(*class_type->size().SingleValue()));
  } else {
    layout.size = offset;
  }
  if ((size_t{1} << class_type->size().AlignmentLog2()) <
      TargetArchitecture::TaggedSize()) {
1587
    Arguments arguments;
1588 1589
    arguments.parameters = {layout.size};
    layout.size = GenerateCall(
1590 1591 1592
        QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AlignTagged"),
        arguments);
  }
1593
  return layout;
1594 1595
}

1596 1597
VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
  StackScope stack_scope(this);
1598
  const Type* type = TypeVisitor::ComputeType(expr->type);
1599 1600 1601 1602 1603
  const ClassType* class_type = ClassType::DynamicCast(type);
  if (class_type == nullptr) {
    ReportError("type for new expression must be a class, \"", *type,
                "\" is not");
  }
1604

1605 1606 1607 1608 1609 1610
  if (!class_type->AllowInstantiation()) {
    // Classes that are only used for testing should never be instantiated.
    ReportError(*class_type,
                " cannot be allocated with new (it's used for testing)");
  }

1611
  InitializerResults initializer_results =
1612
      VisitInitializerResults(class_type, expr->initializers);
1613

1614
  const Field& map_field = class_type->LookupField("map");
1615
  if (*map_field.offset != 0) {
1616 1617 1618 1619 1620
    ReportError("class initializers must have a map as first parameter");
  }
  const std::map<std::string, VisitResult>& initializer_fields =
      initializer_results.field_value_map;
  auto it_object_map = initializer_fields.find(map_field.name_and_type.name);
1621
  VisitResult object_map;
1622
  if (class_type->IsExtern()) {
1623
    if (it_object_map == initializer_fields.end()) {
1624 1625 1626
      ReportError("Constructor for ", class_type->name(),
                  " needs Map argument!");
    }
1627
    object_map = it_object_map->second;
1628
  } else {
1629 1630 1631 1632 1633 1634 1635 1636 1637
    if (it_object_map != initializer_fields.end()) {
      ReportError(
          "Constructor for ", class_type->name(),
          " must not specify Map argument; it is automatically inserted.");
    }
    Arguments get_struct_map_arguments;
    get_struct_map_arguments.parameters.push_back(
        VisitResult(TypeOracle::GetConstexprInstanceTypeType(),
                    CapifyStringWithUnderscores(class_type->name()) + "_TYPE"));
1638
    object_map = GenerateCall(
1639
        QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "GetInstanceTypeMap"),
1640
        get_struct_map_arguments, {}, false);
1641 1642 1643 1644 1645 1646
    CurrentSourcePosition::Scope current_pos(expr->pos);
    initializer_results.names.insert(initializer_results.names.begin(),
                                     MakeNode<Identifier>("map"));
    initializer_results.field_value_map[map_field.name_and_type.name] =
        object_map;
  }
1647 1648 1649 1650 1651

  CheckInitializersWellformed(class_type->name(),
                              class_type->ComputeAllFields(),
                              expr->initializers, !class_type->IsExtern());

1652 1653
  LayoutForInitialization layout =
      GenerateLayoutForInitialization(class_type, initializer_results);
1654 1655

  Arguments allocate_arguments;
1656
  allocate_arguments.parameters.push_back(layout.size);
1657
  allocate_arguments.parameters.push_back(object_map);
1658 1659
  allocate_arguments.parameters.push_back(
      GenerateBoolConstant(expr->pretenured));
1660
  VisitResult allocate_result = GenerateCall(
1661
      QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AllocateFromNew"),
1662
      allocate_arguments, {class_type}, false);
1663
  DCHECK(allocate_result.IsOnStack());
1664

1665
  InitializeClass(class_type, allocate_result, initializer_results, layout);
1666

1667 1668
  return stack_scope.Yield(GenerateCall(
      "%RawDownCast", Arguments{{allocate_result}, {}}, {class_type}));
1669 1670
}

1671
const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
1672 1673
  base::Optional<Binding<LocalLabel>*> break_label =
      TryLookupLabel(kBreakLabelName);
1674
  if (!break_label) {
1675
    ReportError("break used outside of loop");
1676
  }
1677
  assembler().Goto((*break_label)->block);
1678
  return TypeOracle::GetNeverType();
1679 1680
}

1681
const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
1682
  base::Optional<Binding<LocalLabel>*> continue_label =
1683
      TryLookupLabel(kContinueLabelName);
1684
  if (!continue_label) {
1685
    ReportError("continue used outside of loop");
1686
  }
1687
  assembler().Goto((*continue_label)->block);
1688
  return TypeOracle::GetNeverType();
1689 1690
}

1691
const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
1692
  BlockBindings<LocalValue> loop_bindings(&ValueBindingsManager::Get());
1693

1694 1695 1696 1697
  if (stmt->var_declaration) Visit(*stmt->var_declaration, &loop_bindings);

  Block* body_block = assembler().NewBlock(assembler().CurrentStack());
  Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
1698

1699 1700 1701
  Block* header_block = assembler().NewBlock();
  assembler().Goto(header_block);
  assembler().Bind(header_block);
1702

1703 1704
  // The continue label is where "continue" statements jump to. If no action
  // expression is provided, we jump directly to the header.
1705
  Block* continue_block = header_block;
1706

1707
  // The action label is only needed when an action expression was provided.
1708
  Block* action_block = nullptr;
1709
  if (stmt->action) {
1710
    action_block = assembler().NewBlock();
1711 1712

    // The action expression needs to be executed on a continue.
1713
    continue_block = action_block;
1714 1715 1716
  }

  if (stmt->test) {
1717
    GenerateExpressionBranch(*stmt->test, body_block, exit_block);
1718
  } else {
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
    assembler().Goto(body_block);
  }

  assembler().Bind(body_block);
  {
    BreakContinueActivator activator(exit_block, continue_block);
    const Type* body_result = Visit(stmt->body);
    if (body_result != TypeOracle::GetNeverType()) {
      assembler().Goto(continue_block);
    }
1729 1730
  }

1731
  if (stmt->action) {
1732
    assembler().Bind(action_block);
1733 1734 1735 1736
    const Type* action_result = Visit(*stmt->action);
    if (action_result != TypeOracle::GetNeverType()) {
      assembler().Goto(header_block);
    }
1737 1738
  }

1739
  assembler().Bind(exit_block);
1740
  return TypeOracle::GetVoidType();
1741 1742
}

1743 1744 1745 1746 1747 1748
VisitResult ImplementationVisitor::Visit(SpreadExpression* expr) {
  ReportError(
      "spread operators are only currently supported in indexed class field "
      "initialization expressions");
}

1749 1750
void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
  for (SourceId file : SourceFileMap::AllSources()) {
1751 1752 1753 1754
    std::string base_filename =
        dir + "/" + SourceFileMap::PathFromV8RootWithoutExtension(file);
    GlobalContext::PerFileStreams& streams =
        GlobalContext::GeneratedPerFile(file);
1755

1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
    std::string csa_cc = streams.csa_ccfile.str();
    // Insert missing builtin includes where the marker is.
    {
      auto pos = csa_cc.find(BuiltinIncludesMarker);
      CHECK_NE(pos, std::string::npos);
      std::string includes;
      for (const SourceId& include : streams.required_builtin_includes) {
        std::string include_file =
            SourceFileMap::PathFromV8RootWithoutExtension(include);
        includes += "#include \"torque-generated/";
        includes += include_file;
        includes += "-tq-csa.h\"\n";
      }
      csa_cc.replace(pos, strlen(BuiltinIncludesMarker), std::move(includes));
    }

1772
    // TODO(torque-builder): Pass file directly.
1773
    WriteFile(base_filename + "-tq-csa.cc", std::move(csa_cc));
1774 1775 1776
    WriteFile(base_filename + "-tq-csa.h", streams.csa_headerfile.str());
    WriteFile(base_filename + "-tq.inc",
              streams.class_definition_headerfile.str());
1777 1778 1779 1780 1781
    WriteFile(
        base_filename + "-tq-inl.inc",
        streams.class_definition_inline_headerfile_macro_declarations.str() +
            streams.class_definition_inline_headerfile_macro_definitions.str() +
            streams.class_definition_inline_headerfile.str());
1782
    WriteFile(base_filename + "-tq.cc", streams.class_definition_ccfile.str());
1783
  }
1784 1785 1786

  WriteFile(dir + "/debug-macros.h", debug_macros_h_.str());
  WriteFile(dir + "/debug-macros.cc", debug_macros_cc_.str());
1787 1788
}

1789 1790 1791 1792 1793 1794 1795 1796 1797
cpp::Function ImplementationVisitor::GenerateMacroFunctionDeclaration(
    Macro* macro) {
  return GenerateFunction(nullptr,
                          output_type_ == OutputType::kCC
                              ? macro->CCName()
                              : output_type_ == OutputType::kCCDebug
                                    ? macro->CCDebugName()
                                    : macro->ExternalName(),
                          macro->signature(), macro->parameter_names());
1798 1799
}

1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
cpp::Function ImplementationVisitor::GenerateFunction(
    cpp::Class* owner, const std::string& name, const Signature& signature,
    const NameVector& parameter_names, bool pass_code_assembler_state,
    std::vector<std::string>* generated_parameter_names) {
  cpp::Function f(owner, name);
  f.SetInline(output_type_ == OutputType::kCC);

  // Set return type.
  // TODO(torque-builder): Consider an overload of SetReturnType that handles
  // this.
1810
  if (signature.return_type->IsVoidOrNever()) {
1811 1812 1813 1814 1815 1816
    f.SetReturnType("void");
  } else if (output_type_ == OutputType::kCCDebug) {
    f.SetReturnType(std::string("Value<") +
                    signature.return_type->GetDebugType() + ">");
  } else if (output_type_ == OutputType::kCC) {
    f.SetReturnType(signature.return_type->GetRuntimeType());
1817
  } else {
1818 1819
    DCHECK_EQ(output_type_, OutputType::kCSA);
    f.SetReturnType(signature.return_type->GetGeneratedTypeName());
1820 1821
  }

1822
  bool ignore_first_parameter = true;
1823
  if (output_type_ == OutputType::kCCDebug) {
1824
    f.AddParameter("d::MemoryAccessor", "accessor");
1825
  } else if (output_type_ == OutputType::kCSA && pass_code_assembler_state) {
1826 1827 1828
    f.AddParameter("compiler::CodeAssemblerState*", "state_");
  } else {
    ignore_first_parameter = false;
1829 1830
  }

1831 1832
  // TODO(torque-builder): Consider an overload for AddParameter that handles
  // this.
1833
  DCHECK_GE(signature.types().size(), parameter_names.size());
1834
  for (std::size_t i = 0; i < signature.types().size(); ++i) {
1835
    const Type* parameter_type = signature.types()[i];
1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
    std::string type;
    if (output_type_ == OutputType::kCC) {
      type = parameter_type->GetRuntimeType();
    } else if (output_type_ == OutputType::kCCDebug) {
      type = parameter_type->GetDebugType();
    } else {
      DCHECK_EQ(output_type_, OutputType::kCSA);
      type = parameter_type->GetGeneratedTypeName();
    }
    f.AddParameter(std::move(type),
                   ExternalParameterName(i < parameter_names.size()
                                             ? parameter_names[i]->value
                                             : std::to_string(i)));
1849 1850
  }

1851
  for (const LabelDeclaration& label_info : signature.labels) {
1852 1853
    if (output_type_ == OutputType::kCC ||
        output_type_ == OutputType::kCCDebug) {
1854 1855
      ReportError("Macros that generate runtime code can't have label exits");
    }
1856 1857
    f.AddParameter("compiler::CodeAssemblerLabel*",
                   ExternalLabelName(label_info.name->value));
1858
    size_t i = 0;
1859
    for (const Type* type : label_info.types) {
1860
      std::string generated_type_name;
1861
      if (type->StructSupertype()) {
1862 1863 1864 1865 1866 1867
        generated_type_name = "\n#error no structs allowed in labels\n";
      } else {
        generated_type_name = "compiler::TypedCodeAssemblerVariable<";
        generated_type_name += type->GetGeneratedTNodeTypeName();
        generated_type_name += ">*";
      }
1868 1869
      f.AddParameter(generated_type_name,
                     ExternalLabelParameterName(label_info.name->value, i));
1870
      ++i;
1871 1872 1873
    }
  }

1874 1875 1876 1877 1878 1879 1880 1881
  if (generated_parameter_names) {
    *generated_parameter_names = f.GetParameterNames();
    if (ignore_first_parameter) {
      DCHECK(!generated_parameter_names->empty());
      generated_parameter_names->erase(generated_parameter_names->begin());
    }
  }
  return f;
1882 1883
}

1884 1885
namespace {

1886 1887 1888 1889 1890
void FailCallableLookup(
    const std::string& reason, const QualifiedName& name,
    const TypeVector& parameter_types,
    const std::vector<Binding<LocalLabel>*>& labels,
    const std::vector<Signature>& candidates,
1891
    const std::vector<std::pair<GenericCallable*, std::string>>
1892
        inapplicable_generics) {
1893
  std::stringstream stream;
1894 1895
  stream << "\n" << reason << ": \n  " << name << "(" << parameter_types << ")";
  if (labels.size() != 0) {
1896
    stream << " labels ";
1897 1898
    for (size_t i = 0; i < labels.size(); ++i) {
      stream << labels[i]->name() << "(" << labels[i]->parameter_types << ")";
1899 1900 1901
    }
  }
  stream << "\ncandidates are:";
1902 1903 1904 1905
  for (const Signature& signature : candidates) {
    stream << "\n  " << name;
    PrintSignature(stream, signature, false);
  }
1906 1907 1908
  if (inapplicable_generics.size() != 0) {
    stream << "\nfailed to instantiate all of these generic declarations:";
    for (auto& failure : inapplicable_generics) {
1909 1910
      GenericCallable* generic = failure.first;
      const std::string& reason = failure.second;
1911 1912 1913 1914
      stream << "\n  " << generic->name() << " defined at "
             << generic->Position() << ":\n    " << reason << "\n";
    }
  }
1915 1916 1917
  ReportError(stream.str());
}

1918 1919
Callable* GetOrCreateSpecialization(
    const SpecializationKey<GenericCallable>& key) {
1920
  if (base::Optional<Callable*> specialization =
1921
          key.generic->GetSpecialization(key.specialized_types)) {
1922 1923
    return *specialization;
  }
1924
  return DeclarationVisitor::SpecializeImplicit(key);
1925 1926
}

1927 1928
}  // namespace

1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
base::Optional<Binding<LocalValue>*> ImplementationVisitor::TryLookupLocalValue(
    const std::string& name) {
  return ValueBindingsManager::Get().TryLookup(name);
}

base::Optional<Binding<LocalLabel>*> ImplementationVisitor::TryLookupLabel(
    const std::string& name) {
  return LabelBindingsManager::Get().TryLookup(name);
}

Binding<LocalLabel>* ImplementationVisitor::LookupLabel(
    const std::string& name) {
  base::Optional<Binding<LocalLabel>*> label = TryLookupLabel(name);
  if (!label) ReportError("cannot find label ", name);
  return *label;
}

Block* ImplementationVisitor::LookupSimpleLabel(const std::string& name) {
  LocalLabel* label = LookupLabel(name);
  if (!label->parameter_types.empty()) {
    ReportError("label ", name,
                "was expected to have no parameters, but has parameters (",
                label->parameter_types, ")");
  }
  return label->block;
}

1956 1957 1958 1959 1960 1961 1962 1963 1964
// Try to lookup a callable with the provided argument types. Do not report
// an error if no matching callable was found, but return false instead.
// This is used to test the presence of overloaded field accessors.
bool ImplementationVisitor::TestLookupCallable(
    const QualifiedName& name, const TypeVector& parameter_types) {
  return LookupCallable(name, Declarations::TryLookup(name), parameter_types,
                        {}, {}, true) != nullptr;
}

1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
TypeArgumentInference ImplementationVisitor::InferSpecializationTypes(
    GenericCallable* generic, const TypeVector& explicit_specialization_types,
    const TypeVector& explicit_arguments) {
  std::vector<base::Optional<const Type*>> all_arguments;
  const ParameterList& parameters = generic->declaration()->parameters;
  for (size_t i = 0; i < parameters.implicit_count; ++i) {
    base::Optional<Binding<LocalValue>*> val =
        TryLookupLocalValue(parameters.names[i]->value);
    all_arguments.push_back(
        val ? (*val)->GetLocationReference(*val).ReferencedType()
            : base::nullopt);
  }
  for (const Type* explicit_argument : explicit_arguments) {
    all_arguments.push_back(explicit_argument);
  }
  return generic->InferSpecializationTypes(explicit_specialization_types,
                                           all_arguments);
}

1984
template <class Container>
1985
Callable* ImplementationVisitor::LookupCallable(
1986
    const QualifiedName& name, const Container& declaration_container,
1987 1988
    const TypeVector& parameter_types,
    const std::vector<Binding<LocalLabel>*>& labels,
1989
    const TypeVector& specialization_types, bool silence_errors) {
1990
  Callable* result = nullptr;
1991 1992 1993

  std::vector<Declarable*> overloads;
  std::vector<Signature> overload_signatures;
1994
  std::vector<std::pair<GenericCallable*, std::string>> inapplicable_generics;
1995
  for (auto* declarable : declaration_container) {
1996
    if (GenericCallable* generic = GenericCallable::DynamicCast(declarable)) {
1997 1998
      TypeArgumentInference inference = InferSpecializationTypes(
          generic, specialization_types, parameter_types);
1999 2000
      if (inference.HasFailed()) {
        inapplicable_generics.push_back(
2001
            std::make_pair(generic, inference.GetFailureReason()));
2002 2003
        continue;
      }
2004 2005
      overloads.push_back(generic);
      overload_signatures.push_back(
2006
          DeclarationVisitor::MakeSpecializedSignature(
2007 2008
              SpecializationKey<GenericCallable>{generic,
                                                 inference.GetResult()}));
2009 2010 2011 2012 2013 2014 2015 2016 2017
    } else if (Callable* callable = Callable::DynamicCast(declarable)) {
      overloads.push_back(callable);
      overload_signatures.push_back(callable->signature());
    }
  }
  // Indices of candidates in overloads/overload_signatures.
  std::vector<size_t> candidates;
  for (size_t i = 0; i < overloads.size(); ++i) {
    const Signature& signature = overload_signatures[i];
2018
    if (IsCompatibleSignature(signature, parameter_types, labels.size())) {
2019 2020 2021 2022
      candidates.push_back(i);
    }
  }

2023
  if (overloads.empty() && inapplicable_generics.empty()) {
2024
    if (silence_errors) return nullptr;
2025 2026 2027 2028
    std::stringstream stream;
    stream << "no matching declaration found for " << name;
    ReportError(stream.str());
  } else if (candidates.empty()) {
2029
    if (silence_errors) return nullptr;
2030
    FailCallableLookup("cannot find suitable callable with name", name,
2031 2032
                       parameter_types, labels, overload_signatures,
                       inapplicable_generics);
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
  }

  auto is_better_candidate = [&](size_t a, size_t b) {
    return ParameterDifference(overload_signatures[a].GetExplicitTypes(),
                               parameter_types)
        .StrictlyBetterThan(ParameterDifference(
            overload_signatures[b].GetExplicitTypes(), parameter_types));
  };

  size_t best = *std::min_element(candidates.begin(), candidates.end(),
                                  is_better_candidate);
  // This check is contained in libstdc++'s std::min_element.
  DCHECK(!is_better_candidate(best, best));
  for (size_t candidate : candidates) {
    if (candidate != best && !is_better_candidate(best, candidate)) {
      std::vector<Signature> candidate_signatures;
2049
      candidate_signatures.reserve(candidates.size());
2050 2051
      for (size_t i : candidates) {
        candidate_signatures.push_back(overload_signatures[i]);
2052
      }
2053
      FailCallableLookup("ambiguous callable ", name, parameter_types, labels,
2054
                         candidate_signatures, inapplicable_generics);
2055
    }
2056
  }
2057

2058 2059
  if (GenericCallable* generic =
          GenericCallable::DynamicCast(overloads[best])) {
2060 2061
    TypeArgumentInference inference = InferSpecializationTypes(
        generic, specialization_types, parameter_types);
2062
    result = GetOrCreateSpecialization(
2063
        SpecializationKey<GenericCallable>{generic, inference.GetResult()});
2064
  } else {
2065
    result = Callable::cast(overloads[best]);
2066 2067 2068
  }

  size_t caller_size = parameter_types.size();
2069 2070
  size_t callee_size =
      result->signature().types().size() - result->signature().implicit_count;
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
  if (caller_size != callee_size &&
      !result->signature().parameter_types.var_args) {
    std::stringstream stream;
    stream << "parameter count mismatch calling " << *result << " - expected "
           << std::to_string(callee_size) << ", found "
           << std::to_string(caller_size);
    ReportError(stream.str());
  }

  return result;
}

2083 2084 2085 2086 2087
template <class Container>
Callable* ImplementationVisitor::LookupCallable(
    const QualifiedName& name, const Container& declaration_container,
    const Arguments& arguments, const TypeVector& specialization_types) {
  return LookupCallable(name, declaration_container,
2088 2089
                        arguments.parameters.ComputeTypeVector(),
                        arguments.labels, specialization_types);
2090 2091 2092
}

Method* ImplementationVisitor::LookupMethod(
2093
    const std::string& name, const AggregateType* receiver_type,
2094
    const Arguments& arguments, const TypeVector& specialization_types) {
2095
  TypeVector types(arguments.parameters.ComputeTypeVector());
2096 2097 2098 2099
  types.insert(types.begin(), receiver_type);
  return Method::cast(LookupCallable({{}, name}, receiver_type->Methods(name),
                                     types, arguments.labels,
                                     specialization_types));
2100 2101
}

2102
const Type* ImplementationVisitor::GetCommonType(const Type* left,
2103
                                                 const Type* right) {
2104
  const Type* common_type;
2105
  if (IsAssignableFrom(left, right)) {
2106
    common_type = left;
2107
  } else if (IsAssignableFrom(right, left)) {
2108 2109
    common_type = right;
  } else {
2110
    common_type = TypeOracle::GetUnionType(left, right);
2111
  }
2112
  common_type = common_type->NonConstexprVersion();
2113 2114 2115 2116
  return common_type;
}

VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
2117 2118 2119 2120 2121
  if (to_copy.IsOnStack()) {
    return VisitResult(to_copy.type(),
                       assembler().Peek(to_copy.stack_range(), to_copy.type()));
  }
  return to_copy;
2122 2123
}

2124
VisitResult ImplementationVisitor::Visit(StructExpression* expr) {
2125
  StackScope stack_scope(this);
2126

2127 2128 2129 2130 2131
  auto& initializers = expr->initializers;
  std::vector<VisitResult> values;
  std::vector<const Type*> term_argument_types;
  values.reserve(initializers.size());
  term_argument_types.reserve(initializers.size());
2132

2133 2134 2135 2136 2137 2138
  // Compute values and types of all initializer arguments
  for (const NameAndExpression& initializer : initializers) {
    VisitResult value = Visit(initializer.expression);
    values.push_back(value);
    term_argument_types.push_back(value.type());
  }
2139

2140
  // Compute and check struct type from given struct name and argument types
2141
  const Type* type = TypeVisitor::ComputeTypeForStructExpression(
2142
      expr->type, term_argument_types);
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
  if (const auto* struct_type = StructType::DynamicCast(type)) {
    CheckInitializersWellformed(struct_type->name(), struct_type->fields(),
                                initializers);

    // Implicitly convert values and thereby build the struct on the stack
    StackRange struct_range = assembler().TopRange(0);
    auto& fields = struct_type->fields();
    for (size_t i = 0; i < values.size(); i++) {
      values[i] =
          GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
      struct_range.Extend(values[i].stack_range());
    }
2155

2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
    return stack_scope.Yield(VisitResult(struct_type, struct_range));
  } else {
    const auto* bitfield_struct_type = BitFieldStructType::cast(type);
    CheckInitializersWellformed(bitfield_struct_type->name(),
                                bitfield_struct_type->fields(), initializers);

    // Create a zero and cast it to the desired bitfield struct type.
    VisitResult result{TypeOracle::GetConstInt32Type(), "0"};
    result = GenerateImplicitConvert(TypeOracle::GetInt32Type(), result);
    result = GenerateCall("Unsigned", Arguments{{result}, {}}, {});
    result = GenerateCall("%RawDownCast", Arguments{{result}, {}},
                          {bitfield_struct_type});

    // Set each field in the result. If these fields are constexpr, then all of
    // this initialization will end up reduced to a single value during TurboFan
    // optimization.
    auto& fields = bitfield_struct_type->fields();
    for (size_t i = 0; i < values.size(); i++) {
      values[i] =
          GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
      result = GenerateSetBitField(bitfield_struct_type, fields[i], result,
                                   values[i], /*starts_as_zero=*/true);
    }

    return stack_scope.Yield(result);
2181
  }
2182
}
2183

2184 2185 2186 2187 2188 2189 2190 2191
VisitResult ImplementationVisitor::GenerateSetBitField(
    const Type* bitfield_struct_type, const BitField& bitfield,
    VisitResult bitfield_struct, VisitResult value, bool starts_as_zero) {
  GenerateCopy(bitfield_struct);
  GenerateCopy(value);
  assembler().Emit(
      StoreBitFieldInstruction{bitfield_struct_type, bitfield, starts_as_zero});
  return VisitResult(bitfield_struct_type, assembler().TopRange(1));
2192 2193
}

2194
LocationReference ImplementationVisitor::GetLocationReference(
2195
    Expression* location) {
2196 2197 2198 2199 2200 2201 2202 2203 2204
  switch (location->kind) {
    case AstNode::Kind::kIdentifierExpression:
      return GetLocationReference(static_cast<IdentifierExpression*>(location));
    case AstNode::Kind::kFieldAccessExpression:
      return GetLocationReference(
          static_cast<FieldAccessExpression*>(location));
    case AstNode::Kind::kElementAccessExpression:
      return GetLocationReference(
          static_cast<ElementAccessExpression*>(location));
2205 2206 2207
    case AstNode::Kind::kDereferenceExpression:
      return GetLocationReference(
          static_cast<DereferenceExpression*>(location));
2208
    default:
2209
      return LocationReference::Temporary(Visit(location), "expression");
2210 2211 2212
  }
}

2213 2214
LocationReference ImplementationVisitor::GetLocationReference(
    FieldAccessExpression* expr) {
2215
  return GenerateFieldAccess(GetLocationReference(expr->object),
2216
                             expr->field->value, false, expr->field->pos);
2217 2218 2219 2220
}

LocationReference ImplementationVisitor::GenerateFieldAccess(
    LocationReference reference, const std::string& fieldname,
2221
    bool ignore_stuct_field_constness, base::Optional<SourcePosition> pos) {
2222
  if (reference.IsVariableAccess() &&
2223 2224
      reference.variable().type()->StructSupertype()) {
    const StructType* type = *reference.variable().type()->StructSupertype();
2225
    const Field& field = type->LookupField(fieldname);
2226 2227
    if (GlobalContext::collect_language_server_data() && pos.has_value()) {
      LanguageServerData::AddDefinition(*pos, field.pos);
2228
    }
2229
    if (field.const_qualified) {
2230
      VisitResult t_value = ProjectStructField(reference.variable(), fieldname);
2231 2232 2233 2234
      return LocationReference::Temporary(
          t_value, "for constant field '" + field.name_and_type.name + "'");
    } else {
      return LocationReference::VariableAccess(
2235
          ProjectStructField(reference.variable(), fieldname));
2236
    }
2237
  }
2238 2239
  if (reference.IsTemporary() &&
      reference.temporary().type()->StructSupertype()) {
2240
    if (GlobalContext::collect_language_server_data() && pos.has_value()) {
2241
      const StructType* type = *reference.temporary().type()->StructSupertype();
2242
      const Field& field = type->LookupField(fieldname);
2243
      LanguageServerData::AddDefinition(*pos, field.pos);
2244
    }
2245
    return LocationReference::Temporary(
2246
        ProjectStructField(reference.temporary(), fieldname),
2247
        reference.temporary_description());
2248
  }
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
  if (base::Optional<const Type*> referenced_type =
          reference.ReferencedType()) {
    if ((*referenced_type)->IsBitFieldStructType()) {
      const BitFieldStructType* bitfield_struct =
          BitFieldStructType::cast(*referenced_type);
      const BitField& field = bitfield_struct->LookupField(fieldname);
      return LocationReference::BitFieldAccess(reference, field);
    }
    if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
            (*referenced_type), TypeOracle::GetSmiTaggedGeneric())) {
      const BitFieldStructType* bitfield_struct =
          BitFieldStructType::DynamicCast(*type_wrapped_in_smi);
      if (bitfield_struct == nullptr) {
        ReportError(
            "When a value of type SmiTagged<T> is used in a field access "
            "expression, T is expected to be a bitfield struct type. Instead, "
            "T "
            "is ",
            **type_wrapped_in_smi);
      }
      const BitField& field = bitfield_struct->LookupField(fieldname);
      return LocationReference::BitFieldAccess(reference, field);
2271 2272
    }
  }
2273 2274
  if (reference.IsHeapReference()) {
    VisitResult ref = reference.heap_reference();
2275 2276 2277
    bool is_const;
    auto generic_type =
        TypeOracle::MatchReferenceGeneric(ref.type(), &is_const);
2278 2279 2280 2281 2282 2283
    if (!generic_type) {
      ReportError(
          "Left-hand side of field access expression is marked as a reference "
          "but is not of type Reference<...>. Found type: ",
          ref.type()->ToString());
    }
2284 2285
    if (auto struct_type = (*generic_type)->StructSupertype()) {
      const Field& field = (*struct_type)->LookupField(fieldname);
2286 2287
      // Update the Reference's type to refer to the field type within the
      // struct.
2288 2289 2290 2291
      ref.SetType(TypeOracle::GetReferenceType(
          field.name_and_type.type,
          is_const ||
              (field.const_qualified && !ignore_stuct_field_constness)));
2292 2293 2294 2295
      if (!field.offset.has_value()) {
        Error("accessing field with unknown offset").Throw();
      }
      if (*field.offset != 0) {
2296 2297 2298 2299 2300 2301 2302
        // Copy the Reference struct up the stack and update the new copy's
        // |offset| value to point to the struct field.
        StackScope scope(this);
        ref = GenerateCopy(ref);
        VisitResult ref_offset = ProjectStructField(ref, "offset");
        VisitResult struct_offset{
            TypeOracle::GetIntPtrType()->ConstexprVersion(),
2303
            std::to_string(*field.offset)};
2304
        VisitResult updated_offset =
2305
            GenerateCall("+", Arguments{{ref_offset, struct_offset}, {}});
2306 2307 2308 2309 2310 2311 2312
        assembler().Poke(ref_offset.stack_range(), updated_offset.stack_range(),
                         ref_offset.type());
        ref = scope.Yield(ref);
      }
      return LocationReference::HeapReference(ref);
    }
  }
2313
  VisitResult object_result = GenerateFetchFromLocation(reference);
2314 2315 2316 2317 2318 2319 2320 2321
  if (base::Optional<const ClassType*> class_type =
          object_result.type()->ClassSupertype()) {
    // This is a hack to distinguish the situation where we want to use
    // overloaded field accessors from when we want to create a reference.
    bool has_explicit_overloads = TestLookupCallable(
        QualifiedName{"." + fieldname}, {object_result.type()});
    if ((*class_type)->HasField(fieldname) && !has_explicit_overloads) {
      const Field& field = (*class_type)->LookupField(fieldname);
2322 2323
      if (GlobalContext::collect_language_server_data() && pos.has_value()) {
        LanguageServerData::AddDefinition(*pos, field.pos);
2324
      }
2325
      return GenerateFieldReference(object_result, field, *class_type);
2326 2327
    }
  }
2328
  return LocationReference::FieldAccess(object_result, fieldname);
2329 2330
}

2331 2332
LocationReference ImplementationVisitor::GetLocationReference(
    ElementAccessExpression* expr) {
2333
  LocationReference reference = GetLocationReference(expr->array);
2334
  VisitResult index = Visit(expr->index);
2335
  if (reference.IsHeapSlice()) {
2336
    return GenerateReferenceToItemInHeapSlice(reference, index);
2337 2338 2339 2340
  } else {
    return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
                                          index);
  }
2341 2342
}

2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
LocationReference ImplementationVisitor::GenerateReferenceToItemInHeapSlice(
    LocationReference slice, VisitResult index) {
  DCHECK(slice.IsHeapSlice());
  Arguments arguments{{index}, {}};
  const StructType* slice_type = *slice.heap_slice().type()->StructSupertype();
  Method* method = LookupMethod("AtIndex", slice_type, arguments, {});
  // The reference has to be treated like a normal value when calling methods
  // on the underlying slice implementation.
  LocationReference slice_value =
      LocationReference::Temporary(slice.GetVisitResult(), "slice as value");
  return LocationReference::HeapReference(
      GenerateCall(method, std::move(slice_value), arguments, {}, false));
}

2357 2358
LocationReference ImplementationVisitor::GetLocationReference(
    IdentifierExpression* expr) {
2359 2360
  if (expr->namespace_qualification.empty()) {
    if (base::Optional<Binding<LocalValue>*> value =
2361
            TryLookupLocalValue(expr->name->value)) {
2362
      if (GlobalContext::collect_language_server_data()) {
2363
        LanguageServerData::AddDefinition(expr->name->pos,
2364 2365
                                          (*value)->declaration_position());
      }
2366 2367 2368 2369
      if (expr->generic_arguments.size() != 0) {
        ReportError("cannot have generic parameters on local name ",
                    expr->name);
      }
2370
      return (*value)->GetLocationReference(*value);
2371 2372 2373
    }
  }

2374 2375 2376
  if (expr->IsThis()) {
    ReportError("\"this\" cannot be qualified");
  }
2377 2378
  QualifiedName name =
      QualifiedName(expr->namespace_qualification, expr->name->value);
2379
  if (base::Optional<Builtin*> builtin = Declarations::TryLookupBuiltin(name)) {
2380
    if (GlobalContext::collect_language_server_data()) {
2381 2382
      LanguageServerData::AddDefinition(expr->name->pos,
                                        (*builtin)->Position());
2383
    }
2384
    return LocationReference::Temporary(GetBuiltinCode(*builtin),
2385
                                        "builtin " + expr->name->value);
2386 2387
  }
  if (expr->generic_arguments.size() != 0) {
2388
    GenericCallable* generic = Declarations::LookupUniqueGeneric(name);
2389
    Callable* specialization =
2390
        GetOrCreateSpecialization(SpecializationKey<GenericCallable>{
2391
            generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
2392 2393 2394
    if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
      DCHECK(!builtin->IsExternal());
      return LocationReference::Temporary(GetBuiltinCode(builtin),
2395
                                          "builtin " + expr->name->value);
2396 2397 2398 2399
    } else {
      ReportError("cannot create function pointer for non-builtin ",
                  generic->name());
    }
2400
  }
2401
  Value* value = Declarations::LookupValue(name);
2402 2403 2404 2405
  CHECK(value->Position().source.IsValid());
  if (auto stream = CurrentFileStreams::Get()) {
    stream->required_builtin_includes.insert(value->Position().source);
  }
2406 2407 2408
  if (GlobalContext::collect_language_server_data()) {
    LanguageServerData::AddDefinition(expr->name->pos, value->name()->pos);
  }
2409
  if (auto* constant = NamespaceConstant::DynamicCast(value)) {
2410 2411
    if (constant->type()->IsConstexpr()) {
      return LocationReference::Temporary(
2412
          VisitResult(constant->type(), constant->external_name() + "(state_)"),
2413
          "namespace constant " + expr->name->value);
2414
    }
2415
    assembler().Emit(NamespaceConstantInstruction{constant});
2416 2417 2418 2419
    StackRange stack_range =
        assembler().TopRange(LoweredSlotCount(constant->type()));
    return LocationReference::Temporary(
        VisitResult(constant->type(), stack_range),
2420
        "namespace constant " + expr->name->value);
2421
  }
2422 2423
  ExternConstant* constant = ExternConstant::cast(value);
  return LocationReference::Temporary(constant->value(),
2424
                                      "extern value " + expr->name->value);
2425 2426
}

2427 2428 2429
LocationReference ImplementationVisitor::GetLocationReference(
    DereferenceExpression* expr) {
  VisitResult ref = Visit(expr->reference);
2430 2431 2432 2433
  if (!TypeOracle::MatchReferenceGeneric(ref.type())) {
    Error("Operator * expects a reference type but found a value of type ",
          *ref.type())
        .Throw();
2434 2435 2436 2437
  }
  return LocationReference::HeapReference(ref);
}

2438 2439 2440 2441 2442 2443
VisitResult ImplementationVisitor::GenerateFetchFromLocation(
    const LocationReference& reference) {
  if (reference.IsTemporary()) {
    return GenerateCopy(reference.temporary());
  } else if (reference.IsVariableAccess()) {
    return GenerateCopy(reference.variable());
2444
  } else if (reference.IsHeapReference()) {
2445
    const Type* referenced_type = *reference.ReferencedType();
2446 2447 2448 2449
    if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
      return GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                                        "LoadFloat64OrHole"),
                          Arguments{{reference.heap_reference()}, {}});
2450
    } else if (auto struct_type = referenced_type->StructSupertype()) {
2451
      StackRange result_range = assembler().TopRange(0);
2452
      for (const Field& field : (*struct_type)->fields()) {
2453 2454 2455 2456 2457 2458 2459 2460 2461
        StackScope scope(this);
        const std::string& fieldname = field.name_and_type.name;
        VisitResult field_value = scope.Yield(GenerateFetchFromLocation(
            GenerateFieldAccess(reference, fieldname)));
        result_range.Extend(field_value.stack_range());
      }
      return VisitResult(referenced_type, result_range);
    } else {
      GenerateCopy(reference.heap_reference());
2462 2463 2464
      assembler().Emit(LoadReferenceInstruction{referenced_type});
      DCHECK_EQ(1, LoweredSlotCount(referenced_type));
      return VisitResult(referenced_type, assembler().TopRange(1));
2465
    }
2466 2467 2468 2469
  } else if (reference.IsBitFieldAccess()) {
    // First fetch the bitfield struct, then get the bits out of it.
    VisitResult bit_field_struct =
        GenerateFetchFromLocation(reference.bit_field_struct_location());
2470 2471
    assembler().Emit(LoadBitFieldInstruction{bit_field_struct.type(),
                                             reference.bit_field()});
2472
    return VisitResult(*reference.ReferencedType(), assembler().TopRange(1));
2473
  } else {
2474
    if (reference.IsHeapSlice()) {
2475 2476 2477
      ReportError(
          "fetching a value directly from an indexed field isn't allowed");
    }
2478 2479 2480
    DCHECK(reference.IsCallAccess());
    return GenerateCall(reference.eval_function(),
                        Arguments{reference.call_arguments(), {}});
2481 2482 2483
  }
}

2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495
void ImplementationVisitor::GenerateAssignToLocation(
    const LocationReference& reference, const VisitResult& assignment_value) {
  if (reference.IsCallAccess()) {
    Arguments arguments{reference.call_arguments(), {}};
    arguments.parameters.push_back(assignment_value);
    GenerateCall(reference.assign_function(), arguments);
  } else if (reference.IsVariableAccess()) {
    VisitResult variable = reference.variable();
    VisitResult converted_value =
        GenerateImplicitConvert(variable.type(), assignment_value);
    assembler().Poke(variable.stack_range(), converted_value.stack_range(),
                     variable.type());
2496 2497 2498 2499 2500 2501

    // Local variables are detected by the existence of a binding. Assignment
    // to local variables is recorded to support lint errors.
    if (reference.binding()) {
      (*reference.binding())->SetWritten();
    }
2502
  } else if (reference.IsHeapSlice()) {
2503
    ReportError("assigning a value directly to an indexed field isn't allowed");
2504
  } else if (reference.IsHeapReference()) {
2505
    const Type* referenced_type = *reference.ReferencedType();
2506 2507 2508
    if (reference.IsConst()) {
      Error("cannot assign to const value of type ", *referenced_type).Throw();
    }
2509 2510 2511 2512 2513
    if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
      GenerateCall(
          QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
                        "StoreFloat64OrHole"),
          Arguments{{reference.heap_reference(), assignment_value}, {}});
2514 2515
    } else if (auto struct_type = referenced_type->StructSupertype()) {
      if (!assignment_value.type()->IsSubtypeOf(referenced_type)) {
2516 2517 2518
        ReportError("Cannot assign to ", *referenced_type,
                    " with value of type ", *assignment_value.type());
      }
2519
      for (const Field& field : (*struct_type)->fields()) {
2520
        const std::string& fieldname = field.name_and_type.name;
2521 2522 2523
        // Allow assignment of structs even if they contain const fields.
        // Const on struct fields just disallows direct writes to them.
        bool ignore_stuct_field_constness = true;
2524
        GenerateAssignToLocation(
2525 2526
            GenerateFieldAccess(reference, fieldname,
                                ignore_stuct_field_constness),
2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
            ProjectStructField(assignment_value, fieldname));
      }
    } else {
      GenerateCopy(reference.heap_reference());
      VisitResult converted_assignment_value =
          GenerateImplicitConvert(referenced_type, assignment_value);
      if (referenced_type == TypeOracle::GetFloat64Type()) {
        VisitResult silenced_float_value = GenerateCall(
            "Float64SilenceNaN", Arguments{{assignment_value}, {}});
        assembler().Poke(converted_assignment_value.stack_range(),
                         silenced_float_value.stack_range(), referenced_type);
      }
      assembler().Emit(StoreReferenceInstruction{referenced_type});
    }
2541
  } else if (reference.IsBitFieldAccess()) {
2542 2543
    // First fetch the bitfield struct, then set the updated bits, then store
    // it back to where we found it.
2544 2545 2546
    VisitResult bit_field_struct =
        GenerateFetchFromLocation(reference.bit_field_struct_location());
    VisitResult converted_value =
2547
        GenerateImplicitConvert(*reference.ReferencedType(), assignment_value);
2548 2549 2550 2551 2552
    VisitResult updated_bit_field_struct =
        GenerateSetBitField(bit_field_struct.type(), reference.bit_field(),
                            bit_field_struct, converted_value);
    GenerateAssignToLocation(reference.bit_field_struct_location(),
                             updated_bit_field_struct);
2553
  } else {
2554
    DCHECK(reference.IsTemporary());
2555
    ReportError("cannot assign to const-bound or temporary ",
2556
                reference.temporary_description());
2557 2558 2559
  }
}

2560
VisitResult ImplementationVisitor::GeneratePointerCall(
2561
    Expression* callee, const Arguments& arguments, bool is_tailcall) {
2562
  StackScope scope(this);
2563
  TypeVector parameter_types(arguments.parameters.ComputeTypeVector());
2564
  VisitResult callee_result = Visit(callee);
2565
  if (!callee_result.type()->IsBuiltinPointerType()) {
2566 2567
    std::stringstream stream;
    stream << "Expected a function pointer type but found "
2568
           << *callee_result.type();
2569 2570
    ReportError(stream.str());
  }
2571 2572
  const BuiltinPointerType* type =
      BuiltinPointerType::cast(callee_result.type());
2573

2574 2575 2576
  if (type->parameter_types().size() != parameter_types.size()) {
    std::stringstream stream;
    stream << "parameter count mismatch calling function pointer with Type: "
2577
           << *type << " - expected "
2578 2579 2580 2581 2582 2583
           << std::to_string(type->parameter_types().size()) << ", found "
           << std::to_string(parameter_types.size());
    ReportError(stream.str());
  }

  ParameterTypes types{type->parameter_types(), false};
2584 2585
  Signature sig;
  sig.parameter_types = types;
2586
  if (!IsCompatibleSignature(sig, parameter_types, 0)) {
2587 2588 2589 2590 2591 2592 2593
    std::stringstream stream;
    stream << "parameters do not match function pointer signature. Expected: ("
           << type->parameter_types() << ") but got: (" << parameter_types
           << ")";
    ReportError(stream.str());
  }

2594 2595
  callee_result = GenerateCopy(callee_result);
  StackRange arg_range = assembler().TopRange(0);
2596 2597
  for (size_t current = 0; current < arguments.parameters.size(); ++current) {
    const Type* to_type = type->parameter_types()[current];
2598 2599 2600
    arg_range.Extend(
        GenerateImplicitConvert(to_type, arguments.parameters[current])
            .stack_range());
2601 2602
  }

2603 2604
  assembler().Emit(
      CallBuiltinPointerInstruction{is_tailcall, type, arg_range.Size()});
2605

2606 2607
  if (is_tailcall) {
    return VisitResult::NeverResult();
2608
  }
2609 2610
  DCHECK_EQ(1, LoweredSlotCount(type->return_type()));
  return scope.Yield(VisitResult(type->return_type(), assembler().TopRange(1)));
2611 2612
}

2613 2614 2615
void ImplementationVisitor::AddCallParameter(
    Callable* callable, VisitResult parameter, const Type* parameter_type,
    std::vector<VisitResult>* converted_arguments, StackRange* argument_range,
2616 2617 2618 2619 2620 2621 2622 2623
    std::vector<std::string>* constexpr_arguments, bool inline_macro) {
  VisitResult converted;
  if ((converted_arguments->size() < callable->signature().implicit_count) &&
      parameter.type()->IsTopType()) {
    converted = GenerateCopy(parameter);
  } else {
    converted = GenerateImplicitConvert(parameter_type, parameter);
  }
2624
  converted_arguments->push_back(converted);
2625
  if (!inline_macro) {
2626 2627 2628 2629 2630 2631 2632
    if (converted.IsOnStack()) {
      argument_range->Extend(converted.stack_range());
    } else {
      constexpr_arguments->push_back(converted.constexpr_value());
    }
  }
}
2633

2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
namespace {
std::pair<std::string, std::string> GetClassInstanceTypeRange(
    const ClassType* class_type) {
  std::pair<std::string, std::string> result;
  if (class_type->InstanceTypeRange()) {
    auto instance_type_range = *class_type->InstanceTypeRange();
    std::string instance_type_string_first =
        "static_cast<InstanceType>(" +
        std::to_string(instance_type_range.first) + ")";
    std::string instance_type_string_second =
        "static_cast<InstanceType>(" +
        std::to_string(instance_type_range.second) + ")";
    result =
        std::make_pair(instance_type_string_first, instance_type_string_second);
  } else {
    ReportError(
        "%Min/MaxInstanceType must take a class type that is either a string "
        "or has a generated instance type range");
  }
  return result;
}
}  // namespace

2657 2658 2659 2660
VisitResult ImplementationVisitor::GenerateCall(
    Callable* callable, base::Optional<LocationReference> this_reference,
    Arguments arguments, const TypeVector& specialization_types,
    bool is_tailcall) {
2661 2662 2663 2664 2665
  CHECK(callable->Position().source.IsValid());
  if (auto stream = CurrentFileStreams::Get()) {
    stream->required_builtin_includes.insert(callable->Position().source);
  }

2666
  const Type* return_type = callable->signature().return_type;
2667

2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
  if (is_tailcall) {
    if (Builtin* builtin = Builtin::DynamicCast(CurrentCallable::Get())) {
      const Type* outer_return_type = builtin->signature().return_type;
      if (!return_type->IsSubtypeOf(outer_return_type)) {
        Error("Cannot tailcall, type of result is ", *return_type,
              " but should be a subtype of ", *outer_return_type, ".");
      }
    } else {
      Error("Tail calls are only allowed from builtins");
    }
  }

2680
  bool inline_macro = callable->ShouldBeInlined(output_type_);
2681 2682 2683
  std::vector<VisitResult> implicit_arguments;
  for (size_t i = 0; i < callable->signature().implicit_count; ++i) {
    std::string implicit_name = callable->signature().parameter_names[i]->value;
2684 2685
    base::Optional<Binding<LocalValue>*> val =
        TryLookupLocalValue(implicit_name);
2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
    if (val) {
      implicit_arguments.push_back(
          GenerateFetchFromLocation((*val)->GetLocationReference(*val)));
    } else {
      VisitResult unititialized = VisitResult::TopTypeResult(
          "implicit parameter '" + implicit_name +
              "' is not defined when invoking " + callable->ReadableName() +
              " at " + PositionAsString(CurrentSourcePosition::Get()),
          callable->signature().parameter_types.types[i]);
      implicit_arguments.push_back(unititialized);
    }
    const Type* type = implicit_arguments.back().type();
    if (const TopType* top_type = TopType::DynamicCast(type)) {
      if (!callable->IsMacro() || callable->IsExternal()) {
        ReportError(
            "unititialized implicit parameters can only be passed to "
            "Torque-defined macros: the ",
            top_type->reason());
      }
      inline_macro = true;
2706
    }
2707 2708 2709 2710 2711 2712 2713 2714 2715
  }

  std::vector<VisitResult> converted_arguments;
  StackRange argument_range = assembler().TopRange(0);
  std::vector<std::string> constexpr_arguments;

  size_t current = 0;
  for (; current < callable->signature().implicit_count; ++current) {
    AddCallParameter(callable, implicit_arguments[current],
2716 2717
                     callable->signature().parameter_types.types[current],
                     &converted_arguments, &argument_range,
2718
                     &constexpr_arguments, inline_macro);
2719 2720
  }

2721 2722 2723
  if (this_reference) {
    DCHECK(callable->IsMethod());
    Method* method = Method::cast(callable);
2724 2725
    // By now, the this reference should either be a variable, a temporary or
    // a Slice. In either case the fetch of the VisitResult should succeed.
2726
    VisitResult this_value = this_reference->GetVisitResult();
2727
    if (inline_macro) {
2728 2729
      if (!this_value.type()->IsSubtypeOf(method->aggregate_type())) {
        ReportError("this parameter must be a subtype of ",
2730
                    *method->aggregate_type(), " but it is of type ",
2731
                    *this_value.type());
2732 2733 2734 2735
      }
    } else {
      AddCallParameter(callable, this_value, method->aggregate_type(),
                       &converted_arguments, &argument_range,
2736
                       &constexpr_arguments, inline_macro);
2737 2738 2739 2740 2741 2742 2743 2744 2745
    }
    ++current;
  }

  for (auto arg : arguments.parameters) {
    const Type* to_type = (current >= callable->signature().types().size())
                              ? TypeOracle::GetObjectType()
                              : callable->signature().types()[current++];
    AddCallParameter(callable, arg, to_type, &converted_arguments,
2746
                     &argument_range, &constexpr_arguments, inline_macro);
2747
  }
2748

2749
  size_t label_count = callable->signature().labels.size();
2750 2751
  if (label_count != arguments.labels.size()) {
    std::stringstream s;
2752 2753 2754
    s << "unexpected number of otherwise labels for "
      << callable->ReadableName() << " (expected "
      << std::to_string(label_count) << " found "
2755
      << std::to_string(arguments.labels.size()) << ")";
2756 2757
    ReportError(s.str());
  }
2758

2759
  if (callable->IsTransitioning()) {
2760
    if (!CurrentCallable::Get()->IsTransitioning()) {
2761
      std::stringstream s;
2762
      s << *CurrentCallable::Get()
2763 2764 2765 2766 2767 2768
        << " isn't marked transitioning but calls the transitioning "
        << *callable;
      ReportError(s.str());
    }
  }

2769
  if (auto* builtin = Builtin::DynamicCast(callable)) {
2770 2771 2772 2773
    base::Optional<Block*> catch_block = GetCatchBlock();
    assembler().Emit(CallBuiltinInstruction{
        is_tailcall, builtin, argument_range.Size(), catch_block});
    GenerateCatchBlock(catch_block);
2774 2775 2776 2777
    if (is_tailcall) {
      return VisitResult::NeverResult();
    } else {
      size_t slot_count = LoweredSlotCount(return_type);
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
      if (builtin->IsStub()) {
        if (slot_count < 1 || slot_count > 2) {
          ReportError(
              "Builtin with stub linkage is expected to return one or two "
              "values but returns ",
              slot_count);
        }
      } else {
        if (slot_count != 1) {
          ReportError(
              "Builtin with JS linkage is expected to return one value but "
              "returns ",
              slot_count);
        }
      }
2793
      return VisitResult(return_type, assembler().TopRange(slot_count));
2794
    }
2795 2796 2797
  } else if (auto* macro = Macro::DynamicCast(callable)) {
    if (is_tailcall) {
      ReportError("can't tail call a macro");
2798
    }
2799

2800
    macro->SetUsed();
2801 2802 2803

    // If we're currently generating a C++ macro and it's calling another macro,
    // then we need to make sure that we also generate C++ code for the called
2804
    // macro within the same -inl.inc file.
2805 2806
    if (output_type_ == OutputType::kCC && !inline_macro) {
      if (auto* torque_macro = TorqueMacro::DynamicCast(macro)) {
2807 2808 2809
        auto* streams = CurrentFileStreams::Get();
        SourceId file = streams ? streams->file : SourceId::Invalid();
        GlobalContext::EnsureInCCOutputList(torque_macro, file);
2810 2811 2812
      }
    }

2813
    // TODO(torque-builder): Consider a function builder here.
2814 2815 2816
    if (return_type->IsConstexpr()) {
      DCHECK_EQ(0, arguments.labels.size());
      std::stringstream result;
2817
      result << "(";
2818
      bool first = true;
2819 2820 2821 2822 2823 2824 2825
      if (auto* extern_macro = ExternMacro::DynamicCast(macro)) {
        result << extern_macro->external_assembler_name() << "(state_)."
               << extern_macro->ExternalName() << "(";
      } else {
        result << macro->ExternalName() << "(state_";
        first = false;
      }
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
      for (VisitResult arg : arguments.parameters) {
        DCHECK(!arg.IsOnStack());
        if (!first) {
          result << ", ";
        }
        first = false;
        result << arg.constexpr_value();
      }
      result << "))";
      return VisitResult(return_type, result.str());
2836
    } else if (inline_macro) {
2837 2838 2839 2840
      std::vector<Block*> label_blocks;
      for (Binding<LocalLabel>* label : arguments.labels) {
        label_blocks.push_back(label->block);
      }
2841 2842
      return InlineMacro(macro, this_reference, converted_arguments,
                         label_blocks);
2843 2844
    } else if (arguments.labels.empty() &&
               return_type != TypeOracle::GetNeverType()) {
2845 2846 2847 2848
      base::Optional<Block*> catch_block = GetCatchBlock();
      assembler().Emit(
          CallCsaMacroInstruction{macro, constexpr_arguments, catch_block});
      GenerateCatchBlock(catch_block);
2849 2850 2851 2852 2853 2854
      size_t return_slot_count = LoweredSlotCount(return_type);
      return VisitResult(return_type, assembler().TopRange(return_slot_count));
    } else {
      base::Optional<Block*> return_continuation;
      if (return_type != TypeOracle::GetNeverType()) {
        return_continuation = assembler().NewBlock();
2855 2856
      }

2857 2858 2859 2860 2861
      std::vector<Block*> label_blocks;

      for (size_t i = 0; i < label_count; ++i) {
        label_blocks.push_back(assembler().NewBlock());
      }
2862
      base::Optional<Block*> catch_block = GetCatchBlock();
2863
      assembler().Emit(CallCsaMacroAndBranchInstruction{
2864 2865 2866
          macro, constexpr_arguments, return_continuation, label_blocks,
          catch_block});
      GenerateCatchBlock(catch_block);
2867 2868

      for (size_t i = 0; i < label_count; ++i) {
2869
        Binding<LocalLabel>* label = arguments.labels[i];
2870 2871
        size_t callee_label_parameters =
            callable->signature().labels[i].types.size();
2872
        if (label->parameter_types.size() != callee_label_parameters) {
2873 2874 2875
          std::stringstream s;
          s << "label " << label->name()
            << " doesn't have the right number of parameters (found "
2876
            << std::to_string(label->parameter_types.size()) << " expected "
2877 2878 2879 2880 2881
            << std::to_string(callee_label_parameters) << ")";
          ReportError(s.str());
        }
        assembler().Bind(label_blocks[i]);
        assembler().Goto(
2882
            label->block,
2883 2884 2885 2886
            LowerParameterTypes(callable->signature().labels[i].types).size());

        size_t j = 0;
        for (auto t : callable->signature().labels[i].types) {
2887
          const Type* parameter_type = label->parameter_types[j];
2888 2889 2890 2891
          if (!t->IsSubtypeOf(parameter_type)) {
            ReportError("mismatch of label parameters (label expects ",
                        *parameter_type, " but macro produces ", *t,
                        " for parameter ", i + 1, ")");
2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
          }
          j++;
        }
      }

      if (return_continuation) {
        assembler().Bind(*return_continuation);
        size_t return_slot_count = LoweredSlotCount(return_type);
        return VisitResult(return_type,
                           assembler().TopRange(return_slot_count));
      } else {
        return VisitResult::NeverResult();
      }
    }
  } else if (auto* runtime_function = RuntimeFunction::DynamicCast(callable)) {
2907 2908 2909 2910
    base::Optional<Block*> catch_block = GetCatchBlock();
    assembler().Emit(CallRuntimeInstruction{
        is_tailcall, runtime_function, argument_range.Size(), catch_block});
    GenerateCatchBlock(catch_block);
2911
    if (is_tailcall || return_type == TypeOracle::GetNeverType()) {
2912 2913 2914 2915
      return VisitResult::NeverResult();
    } else {
      size_t slot_count = LoweredSlotCount(return_type);
      DCHECK_LE(slot_count, 1);
2916
      // TODO(turbofan): Actually, runtime functions have to return a value, so
2917 2918 2919
      // we should assert slot_count == 1 here.
      return VisitResult(return_type, assembler().TopRange(slot_count));
    }
2920
  } else if (auto* intrinsic = Intrinsic::DynamicCast(callable)) {
2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932
    if (intrinsic->ExternalName() == "%SizeOf") {
      if (specialization_types.size() != 1) {
        ReportError("%SizeOf must take a single type parameter");
      }
      const Type* type = specialization_types[0];
      std::string size_string;
      if (base::Optional<std::tuple<size_t, std::string>> size = SizeOf(type)) {
        size_string = std::get<1>(*size);
      } else {
        Error("size of ", *type, " is not known.");
      }
      return VisitResult(return_type, size_string);
2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
    } else if (intrinsic->ExternalName() == "%ClassHasMapConstant") {
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
        ReportError("%ClassHasMapConstant must take a class type parameter");
      }
      // If the class isn't actually used as the parameter to a TNode,
      // then we can't rely on the class existing in C++ or being of the same
      // type (e.g. it could be a template), so don't use the template CSA
      // machinery for accessing the class' map.
      if (class_type->name() != class_type->GetGeneratedTNodeTypeName()) {
        return VisitResult(return_type, std::string("false"));
      } else {
        return VisitResult(
            return_type,
            std::string("CodeStubAssembler(state_).ClassHasMapConstant<") +
                class_type->name() + ">()");
      }
    } else if (intrinsic->ExternalName() == "%MinInstanceType") {
      if (specialization_types.size() != 1) {
        ReportError("%MinInstanceType must take a single type parameter");
      }
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
        ReportError("%MinInstanceType must take a class type parameter");
      }
      std::pair<std::string, std::string> instance_types =
          GetClassInstanceTypeRange(class_type);
      return VisitResult(return_type, instance_types.first);
    } else if (intrinsic->ExternalName() == "%MaxInstanceType") {
      if (specialization_types.size() != 1) {
        ReportError("%MaxInstanceType must take a single type parameter");
      }
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
        ReportError("%MaxInstanceType must take a class type parameter");
      }
      std::pair<std::string, std::string> instance_types =
          GetClassInstanceTypeRange(class_type);
      return VisitResult(return_type, instance_types.second);
2975
    } else if (intrinsic->ExternalName() == "%RawConstexprCast") {
2976 2977 2978 2979 2980 2981
      if (intrinsic->signature().parameter_types.types.size() != 1 ||
          constexpr_arguments.size() != 1) {
        ReportError(
            "%RawConstexprCast must take a single parameter with constexpr "
            "type");
      }
2982 2983 2984 2985 2986 2987
      if (!return_type->IsConstexpr()) {
        std::stringstream s;
        s << *return_type
          << " return type for %RawConstexprCast is not constexpr";
        ReportError(s.str());
      }
2988 2989 2990 2991 2992
      std::stringstream result;
      result << "static_cast<" << return_type->GetGeneratedTypeName() << ">(";
      result << constexpr_arguments[0];
      result << ")";
      return VisitResult(return_type, result.str());
2993 2994 2995 2996 2997 2998 2999 3000 3001
    } else if (intrinsic->ExternalName() == "%IndexedFieldLength") {
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
        ReportError("%IndexedFieldLength must take a class type parameter");
      }
      const Field& field =
          class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
      return GenerateArrayLength(VisitResult(type, argument_range), field);
3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063
    } else if (intrinsic->ExternalName() == "%MakeLazy") {
      if (specialization_types[0]->IsStructType()) {
        ReportError("%MakeLazy can't use macros that return structs");
      }
      std::string getter_name = StringLiteralUnquote(constexpr_arguments[0]);

      // Normally the parser would split namespace names for us, but we
      // sidestepped it by putting the macro name in a string literal.
      QualifiedName qualified_getter_name = QualifiedName::Parse(getter_name);

      // converted_arguments contains all of the arguments to %MakeLazy. We're
      // looking for a function that takes all but the first.
      Arguments arguments_to_getter;
      arguments_to_getter.parameters.insert(
          arguments_to_getter.parameters.begin(),
          converted_arguments.begin() + 1, converted_arguments.end());

      Callable* callable = LookupCallable(
          qualified_getter_name, Declarations::Lookup(qualified_getter_name),
          arguments_to_getter, {});
      Macro* getter = Macro::DynamicCast(callable);
      if (!getter || getter->IsMethod()) {
        ReportError(
            "%MakeLazy expects a macro, not builtin or other type of callable");
      }
      if (!getter->signature().labels.empty()) {
        ReportError("%MakeLazy requires a macro with no labels");
      }
      if (!getter->signature().return_type->IsSubtypeOf(
              specialization_types[0])) {
        ReportError("%MakeLazy expected return type ", *specialization_types[0],
                    " but found ", *getter->signature().return_type);
      }
      if (getter->signature().implicit_count > 0) {
        ReportError("Implicit parameters are not yet supported in %MakeLazy");
      }

      getter->SetUsed();  // Prevent warnings about unused macros.

      // Now that we've looked up the getter macro, we have to convert the
      // arguments again, so that, for example, constexpr arguments can be
      // coerced to non-constexpr types and put on the stack.

      std::vector<VisitResult> converted_arguments_for_getter;
      StackRange argument_range_for_getter = assembler().TopRange(0);
      std::vector<std::string> constexpr_arguments_for_getter;

      size_t current = 0;
      for (auto arg : arguments_to_getter.parameters) {
        DCHECK_LT(current, getter->signature().types().size());
        const Type* to_type = getter->signature().types()[current++];
        AddCallParameter(getter, arg, to_type, &converted_arguments_for_getter,
                         &argument_range_for_getter,
                         &constexpr_arguments_for_getter,
                         /*inline_macro=*/false);
      }

      // Now that the arguments are prepared, emit the instruction that consumes
      // them.
      assembler().Emit(MakeLazyNodeInstruction{getter, return_type,
                                               constexpr_arguments_for_getter});
      return VisitResult(return_type, assembler().TopRange(1));
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078
    } else if (intrinsic->ExternalName() == "%FieldSlice") {
      const Type* type = specialization_types[0];
      const ClassType* class_type = ClassType::DynamicCast(type);
      if (!class_type) {
        ReportError("%FieldSlice must take a class type parameter");
      }
      const Field& field =
          class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
      LocationReference ref = GenerateFieldReference(
          VisitResult(type, argument_range), field, class_type,
          /*treat_optional_as_indexed=*/true);
      if (!ref.IsHeapSlice()) {
        ReportError("%FieldSlice expected an indexed or optional field");
      }
      return ref.heap_slice();
3079
    } else {
3080 3081
      assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
                                                constexpr_arguments});
3082 3083 3084 3085
      size_t return_slot_count =
          LoweredSlotCount(intrinsic->signature().return_type);
      return VisitResult(return_type, assembler().TopRange(return_slot_count));
    }
3086 3087
  } else {
    UNREACHABLE();
3088 3089 3090
  }
}

3091 3092 3093 3094
VisitResult ImplementationVisitor::GenerateCall(
    const QualifiedName& callable_name, Arguments arguments,
    const TypeVector& specialization_types, bool is_tailcall) {
  Callable* callable =
3095 3096
      LookupCallable(callable_name, Declarations::Lookup(callable_name),
                     arguments, specialization_types);
3097 3098 3099 3100
  return GenerateCall(callable, base::nullopt, arguments, specialization_types,
                      is_tailcall);
}

3101 3102
VisitResult ImplementationVisitor::Visit(CallExpression* expr,
                                         bool is_tailcall) {
3103
  StackScope scope(this);
3104 3105 3106 3107 3108

  if (expr->callee->name->value == "&" && expr->arguments.size() == 1) {
    if (auto* loc_expr = LocationExpression::DynamicCast(expr->arguments[0])) {
      LocationReference ref = GetLocationReference(loc_expr);
      if (ref.IsHeapReference()) return scope.Yield(ref.heap_reference());
3109
      if (ref.IsHeapSlice()) return scope.Yield(ref.heap_slice());
3110 3111 3112 3113
    }
    ReportError("Unable to create a heap reference.");
  }

3114
  Arguments arguments;
3115 3116
  QualifiedName name = QualifiedName(expr->callee->namespace_qualification,
                                     expr->callee->name->value);
3117
  TypeVector specialization_types =
3118
      TypeVisitor::ComputeTypeVector(expr->callee->generic_arguments);
3119 3120 3121 3122
  bool has_template_arguments = !specialization_types.empty();
  for (Expression* arg : expr->arguments)
    arguments.parameters.push_back(Visit(arg));
  arguments.labels = LabelsFromIdentifiers(expr->labels);
3123 3124
  if (!has_template_arguments && name.namespace_qualification.empty() &&
      TryLookupLocalValue(name.name)) {
3125
    return scope.Yield(
3126
        GeneratePointerCall(expr->callee, arguments, is_tailcall));
3127
  } else {
3128 3129 3130 3131
    if (GlobalContext::collect_language_server_data()) {
      Callable* callable = LookupCallable(name, Declarations::Lookup(name),
                                          arguments, specialization_types);
      LanguageServerData::AddDefinition(expr->callee->name->pos,
3132
                                        callable->IdentifierPosition());
3133
    }
3134 3135 3136 3137 3138 3139 3140 3141 3142 3143
    if (expr->callee->name->value == "!" && arguments.parameters.size() == 1) {
      PropagateBitfieldMark(expr->arguments[0], expr);
    }
    if (expr->callee->name->value == "==" && arguments.parameters.size() == 2) {
      if (arguments.parameters[0].type()->IsConstexpr()) {
        PropagateBitfieldMark(expr->arguments[1], expr);
      } else if (arguments.parameters[1].type()->IsConstexpr()) {
        PropagateBitfieldMark(expr->arguments[0], expr);
      }
    }
3144 3145
    return scope.Yield(
        GenerateCall(name, arguments, specialization_types, is_tailcall));
3146 3147 3148
  }
}

3149 3150 3151
VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
  StackScope scope(this);
  Arguments arguments;
3152
  std::string method_name = expr->method->name->value;
3153
  TypeVector specialization_types =
3154
      TypeVisitor::ComputeTypeVector(expr->method->generic_arguments);
3155 3156 3157
  LocationReference target = GetLocationReference(expr->target);
  if (!target.IsVariableAccess()) {
    VisitResult result = GenerateFetchFromLocation(target);
3158
    target = LocationReference::Temporary(result, "this parameter");
3159 3160
  }
  const AggregateType* target_type =
3161
      (*target.ReferencedType())->AggregateSupertype().value_or(nullptr);
3162 3163 3164 3165
  if (!target_type) {
    ReportError("target of method call not a struct or class type");
  }
  for (Expression* arg : expr->arguments) {
3166
    arguments.parameters.push_back(Visit(arg));
3167
  }
3168
  arguments.labels = LabelsFromIdentifiers(expr->labels);
3169
  TypeVector argument_types = arguments.parameters.ComputeTypeVector();
3170 3171
  DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
  QualifiedName qualified_name = QualifiedName(method_name);
3172
  Callable* callable = LookupMethod(method_name, target_type, arguments, {});
3173 3174 3175 3176
  if (GlobalContext::collect_language_server_data()) {
    LanguageServerData::AddDefinition(expr->method->name->pos,
                                      callable->IdentifierPosition());
  }
3177 3178 3179
  return scope.Yield(GenerateCall(callable, target, arguments, {}, false));
}

3180 3181 3182
VisitResult ImplementationVisitor::Visit(IntrinsicCallExpression* expr) {
  StackScope scope(this);
  Arguments arguments;
3183 3184
  TypeVector specialization_types =
      TypeVisitor::ComputeTypeVector(expr->generic_arguments);
3185 3186 3187
  for (Expression* arg : expr->arguments)
    arguments.parameters.push_back(Visit(arg));
  return scope.Yield(
3188
      GenerateCall(expr->name->value, arguments, specialization_types, false));
3189 3190
}

3191
void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
3192 3193
                                           Block* true_block,
                                           Block* false_block) {
3194 3195
  DCHECK_EQ(condition,
            VisitResult(TypeOracle::GetBoolType(), assembler().TopRange(1)));
3196 3197 3198
  assembler().Branch(true_block, false_block);
}

3199 3200 3201 3202
VisitResult ImplementationVisitor::GenerateBoolConstant(bool constant) {
  return GenerateImplicitConvert(TypeOracle::GetBoolType(),
                                 VisitResult(TypeOracle::GetConstexprBoolType(),
                                             constant ? "true" : "false"));
3203 3204
}

3205 3206 3207
void ImplementationVisitor::GenerateExpressionBranch(Expression* expression,
                                                     Block* true_block,
                                                     Block* false_block) {
3208 3209 3210 3211 3212
  StackScope stack_scope(this);
  VisitResult expression_result = this->Visit(expression);
  expression_result = stack_scope.Yield(
      GenerateImplicitConvert(TypeOracle::GetBoolType(), expression_result));
  GenerateBranch(expression_result, true_block, false_block);
3213 3214
}

3215
VisitResult ImplementationVisitor::GenerateImplicitConvert(
3216
    const Type* destination_type, VisitResult source) {
3217
  StackScope scope(this);
3218 3219 3220 3221
  if (source.type() == TypeOracle::GetNeverType()) {
    ReportError("it is not allowed to use a value of type never");
  }

3222
  if (destination_type == source.type()) {
3223
    return scope.Yield(GenerateCopy(source));
3224
  }
3225

3226 3227
  if (auto from = TypeOracle::ImplicitlyConvertableFrom(destination_type,
                                                        source.type())) {
3228 3229
    return scope.Yield(GenerateCall(kFromConstexprMacroName,
                                    Arguments{{source}, {}},
3230
                                    {destination_type, *from}, false));
3231
  } else if (IsAssignableFrom(destination_type, source.type())) {
3232
    source.SetType(destination_type);
3233
    return scope.Yield(GenerateCopy(source));
3234 3235
  } else {
    std::stringstream s;
3236 3237 3238 3239 3240 3241 3242
    if (const TopType* top_type = TopType::DynamicCast(source.type())) {
      s << "undefined expression of type " << *destination_type << ": the "
        << top_type->reason();
    } else {
      s << "cannot use expression of type " << *source.type()
        << " as a value of type " << *destination_type;
    }
3243 3244 3245 3246
    ReportError(s.str());
  }
}

3247
StackRange ImplementationVisitor::GenerateLabelGoto(
3248 3249
    LocalLabel* label, base::Optional<StackRange> arguments) {
  return assembler().Goto(label->block, arguments ? arguments->Size() : 0);
3250 3251
}

3252
std::vector<Binding<LocalLabel>*> ImplementationVisitor::LabelsFromIdentifiers(
3253
    const std::vector<Identifier*>& names) {
3254
  std::vector<Binding<LocalLabel>*> result;
3255
  result.reserve(names.size());
3256
  for (const auto& name : names) {
3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
    Binding<LocalLabel>* label = LookupLabel(name->value);
    result.push_back(label);

    // Link up labels in "otherwise" part of the call expression with
    // either the label in the signature of the calling macro or the label
    // block ofa surrounding "try".
    if (GlobalContext::collect_language_server_data()) {
      LanguageServerData::AddDefinition(name->pos,
                                        label->declaration_position());
    }
3267 3268 3269 3270
  }
  return result;
}

3271 3272 3273
StackRange ImplementationVisitor::LowerParameter(
    const Type* type, const std::string& parameter_name,
    Stack<std::string>* lowered_parameters) {
3274
  if (base::Optional<const StructType*> struct_type = type->StructSupertype()) {
3275
    StackRange range = lowered_parameters->TopRange(0);
3276
    for (auto& field : (*struct_type)->fields()) {
3277
      StackRange parameter_range = LowerParameter(
3278 3279
          field.name_and_type.type,
          parameter_name + "." + field.name_and_type.name, lowered_parameters);
3280 3281 3282 3283 3284 3285 3286 3287 3288
      range.Extend(parameter_range);
    }
    return range;
  } else {
    lowered_parameters->Push(parameter_name);
    return lowered_parameters->TopRange(1);
  }
}

3289 3290 3291
void ImplementationVisitor::LowerLabelParameter(
    const Type* type, const std::string& parameter_name,
    std::vector<std::string>* lowered_parameters) {
3292 3293
  if (base::Optional<const StructType*> struct_type = type->StructSupertype()) {
    for (auto& field : (*struct_type)->fields()) {
3294 3295 3296 3297 3298 3299 3300 3301 3302 3303
      LowerLabelParameter(
          field.name_and_type.type,
          "&((*" + parameter_name + ")." + field.name_and_type.name + ")",
          lowered_parameters);
    }
  } else {
    lowered_parameters->push_back(parameter_name);
  }
}

3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318
std::string ImplementationVisitor::ExternalLabelName(
    const std::string& label_name) {
  return "label_" + label_name;
}

std::string ImplementationVisitor::ExternalLabelParameterName(
    const std::string& label_name, size_t i) {
  return "label_" + label_name + "_parameter_" + std::to_string(i);
}

std::string ImplementationVisitor::ExternalParameterName(
    const std::string& name) {
  return std::string("p_") + name;
}

3319 3320 3321
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::ValueBindingsManager)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentCallable)
3322
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentFileStreams)
3323
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentReturnValue)
3324 3325

bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
3326
                           size_t label_count) {
3327 3328 3329
  auto i = sig.parameter_types.types.begin() + sig.implicit_count;
  if ((sig.parameter_types.types.size() - sig.implicit_count) > types.size())
    return false;
3330
  if (sig.labels.size() != label_count) return false;
3331 3332 3333 3334 3335 3336 3337 3338 3339
  for (auto current : types) {
    if (i == sig.parameter_types.types.end()) {
      if (!sig.parameter_types.var_args) return false;
      if (!IsAssignableFrom(TypeOracle::GetObjectType(), current)) return false;
    } else {
      if (!IsAssignableFrom(*i++, current)) return false;
    }
  }
  return true;
3340 3341
}

3342 3343 3344
base::Optional<Block*> ImplementationVisitor::GetCatchBlock() {
  base::Optional<Block*> catch_block;
  if (base::Optional<Binding<LocalLabel>*> catch_handler =
3345
          TryLookupLabel(kCatchLabelName)) {
3346 3347 3348 3349 3350 3351 3352 3353 3354
    catch_block = assembler().NewBlock(base::nullopt, true);
  }
  return catch_block;
}

void ImplementationVisitor::GenerateCatchBlock(
    base::Optional<Block*> catch_block) {
  if (catch_block) {
    base::Optional<Binding<LocalLabel>*> catch_handler =
3355
        TryLookupLabel(kCatchLabelName);
3356 3357 3358 3359 3360 3361 3362 3363 3364
    if (assembler().CurrentBlockIsComplete()) {
      assembler().Bind(*catch_block);
      assembler().Goto((*catch_handler)->block, 1);
    } else {
      CfgAssemblerScopedTemporaryBlock temp(&assembler(), *catch_block);
      assembler().Goto((*catch_handler)->block, 1);
    }
  }
}
3365
void ImplementationVisitor::VisitAllDeclarables() {
3366
  CurrentCallable::Scope current_callable(nullptr);
3367 3368
  const std::vector<std::unique_ptr<Declarable>>& all_declarables =
      GlobalContext::AllDeclarables();
3369

3370 3371 3372
  // This has to be an index-based loop because all_declarables can be extended
  // during the loop.
  for (size_t i = 0; i < all_declarables.size(); ++i) {
3373 3374 3375 3376 3377
    try {
      Visit(all_declarables[i].get());
    } catch (TorqueAbortCompilation&) {
      // Recover from compile errors here. The error is recorded already.
    }
3378
  }
3379 3380 3381

  // Do the same for macros which generate C++ code.
  output_type_ = OutputType::kCC;
3382
  const std::vector<std::pair<TorqueMacro*, SourceId>>& cc_macros =
3383 3384 3385
      GlobalContext::AllMacrosForCCOutput();
  for (size_t i = 0; i < cc_macros.size(); ++i) {
    try {
3386
      Visit(static_cast<Declarable*>(cc_macros[i].first), cc_macros[i].second);
3387 3388 3389 3390
    } catch (TorqueAbortCompilation&) {
      // Recover from compile errors here. The error is recorded already.
    }
  }
3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401

  // Do the same for macros which generate C++ debug code.
  // The set of macros is the same as C++ macros.
  output_type_ = OutputType::kCCDebug;
  for (size_t i = 0; i < cc_macros.size(); ++i) {
    try {
      Visit(static_cast<Declarable*>(cc_macros[i].first), cc_macros[i].second);
    } catch (TorqueAbortCompilation&) {
      // Recover from compile errors here. The error is recorded already.
    }
  }
3402
  output_type_ = OutputType::kCSA;
3403 3404
}

3405 3406
void ImplementationVisitor::Visit(Declarable* declarable,
                                  base::Optional<SourceId> file) {
3407
  CurrentScope::Scope current_scope(declarable->ParentScope());
3408
  CurrentSourcePosition::Scope current_source_position(declarable->Position());
3409
  CurrentFileStreams::Scope current_file_streams(
3410 3411
      &GlobalContext::GeneratedPerFile(file ? *file
                                            : declarable->Position().source));
3412
  if (Callable* callable = Callable::DynamicCast(declarable)) {
3413
    if (!callable->ShouldGenerateExternalCode(output_type_))
3414 3415
      CurrentFileStreams::Get() = nullptr;
  }
3416
  switch (declarable->kind()) {
3417 3418 3419 3420
    case Declarable::kExternMacro:
      return Visit(ExternMacro::cast(declarable));
    case Declarable::kTorqueMacro:
      return Visit(TorqueMacro::cast(declarable));
3421 3422
    case Declarable::kMethod:
      return Visit(Method::cast(declarable));
3423 3424 3425 3426
    case Declarable::kBuiltin:
      return Visit(Builtin::cast(declarable));
    case Declarable::kTypeAlias:
      return Visit(TypeAlias::cast(declarable));
3427 3428
    case Declarable::kNamespaceConstant:
      return Visit(NamespaceConstant::cast(declarable));
3429
    case Declarable::kRuntimeFunction:
3430
    case Declarable::kIntrinsic:
3431
    case Declarable::kExternConstant:
3432
    case Declarable::kNamespace:
3433 3434
    case Declarable::kGenericCallable:
    case Declarable::kGenericType:
3435 3436 3437 3438
      return;
  }
}

3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
std::string MachineTypeString(const Type* type) {
  if (type->IsSubtypeOf(TypeOracle::GetSmiType())) {
    return "MachineType::TaggedSigned()";
  }
  if (type->IsSubtypeOf(TypeOracle::GetHeapObjectType())) {
    return "MachineType::TaggedPointer()";
  }
  if (type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
    return "MachineType::AnyTagged()";
  }
  return "MachineTypeOf<" + type->GetGeneratedTNodeTypeName() + ">::value";
}

void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
3453
    const std::string& output_directory) {
3454
  std::stringstream builtin_definitions;
3455
  std::string builtin_definitions_file_name = "builtin-definitions.h";
3456 3457 3458 3459 3460

  // This file contains plain interface descriptor definitions and has to be
  // included in the middle of interface-descriptors.h. Thus it is not a normal
  // header file and uses the .inc suffix instead of the .h suffix.
  std::stringstream interface_descriptors;
3461
  std::string interface_descriptors_file_name = "interface-descriptors.inc";
3462
  {
3463 3464 3465 3466
    IncludeGuardScope builtin_definitions_include_guard(
        builtin_definitions, builtin_definitions_file_name);

    builtin_definitions
3467
        << "\n"
3468
           "#define BUILTIN_LIST_FROM_TORQUE(CPP, TFJ, TFC, TFS, TFH, "
3469 3470 3471 3472 3473 3474
           "ASM) "
           "\\\n";
    for (auto& declarable : GlobalContext::AllDeclarables()) {
      Builtin* builtin = Builtin::DynamicCast(declarable.get());
      if (!builtin || builtin->IsExternal()) continue;
      if (builtin->IsStub()) {
3475 3476 3477
        builtin_definitions << "TFC(" << builtin->ExternalName() << ", "
                            << builtin->ExternalName();
        std::string descriptor_name = builtin->ExternalName() + "Descriptor";
3478 3479
        bool has_context_parameter = builtin->signature().HasContextParameter();
        size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
3480
        TypeVector return_types = LowerType(builtin->signature().return_type);
3481

3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501
        interface_descriptors << "class " << descriptor_name
                              << " : public StaticCallInterfaceDescriptor<"
                              << descriptor_name << "> {\n";

        interface_descriptors << " public:\n";

        if (has_context_parameter) {
          interface_descriptors << "  DEFINE_RESULT_AND_PARAMETERS(";
        } else {
          interface_descriptors << "  DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(";
        }
        interface_descriptors << return_types.size();
        for (size_t i = kFirstNonContextParameter;
             i < builtin->parameter_names().size(); ++i) {
          Identifier* parameter = builtin->parameter_names()[i];
          interface_descriptors << ", k" << CamelifyString(parameter->value);
        }
        interface_descriptors << ")\n";

        interface_descriptors << "  DEFINE_RESULT_AND_PARAMETER_TYPES(";
3502 3503
        PrintCommaSeparatedList(interface_descriptors, return_types,
                                MachineTypeString);
3504 3505 3506
        for (size_t i = kFirstNonContextParameter;
             i < builtin->parameter_names().size(); ++i) {
          const Type* type = builtin->signature().parameter_types.types[i];
3507
          interface_descriptors << ", " << MachineTypeString(type);
3508
        }
3509 3510 3511 3512
        interface_descriptors << ")\n";

        interface_descriptors << "  DECLARE_DEFAULT_DESCRIPTOR("
                              << descriptor_name << ")\n";
3513
        interface_descriptors << "};\n\n";
3514
      } else {
3515
        builtin_definitions << "TFJ(" << builtin->ExternalName();
3516
        if (builtin->IsVarArgsJavaScript()) {
3517
          builtin_definitions << ", kDontAdaptArgumentsSentinel";
3518
        } else {
3519
          DCHECK(builtin->IsFixedArgsJavaScript());
3520 3521
          // FixedArg javascript builtins need to offer the parameter
          // count.
3522 3523
          int parameter_count =
              static_cast<int>(builtin->signature().ExplicitCount());
3524
          builtin_definitions << ", " << parameter_count;
3525
          // And the receiver is explicitly declared.
3526 3527 3528 3529 3530 3531
          builtin_definitions << ", kReceiver";
          for (size_t i = builtin->signature().implicit_count;
               i < builtin->parameter_names().size(); ++i) {
            Identifier* parameter = builtin->parameter_names()[i];
            builtin_definitions << ", k" << CamelifyString(parameter->value);
          }
3532 3533
        }
      }
3534
      builtin_definitions << ") \\\n";
3535
    }
3536
    builtin_definitions << "\n";
3537

3538
    builtin_definitions
3539 3540 3541 3542 3543 3544 3545 3546 3547 3548
        << "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n";
    for (const BuiltinPointerType* type :
         TypeOracle::AllBuiltinPointerTypes()) {
      Builtin* example_builtin =
          Declarations::FindSomeInternalBuiltinWithType(type);
      if (!example_builtin) {
        CurrentSourcePosition::Scope current_source_position(
            SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
        ReportError("unable to find any builtin with type \"", *type, "\"");
      }
3549
      builtin_definitions << "  V(" << type->function_pointer_type_id() << ","
3550
                          << example_builtin->ExternalName() << ")\\\n";
3551
    }
3552
    builtin_definitions << "\n";
3553
  }
3554 3555 3556 3557
  WriteFile(output_directory + "/" + builtin_definitions_file_name,
            builtin_definitions.str());
  WriteFile(output_directory + "/" + interface_descriptors_file_name,
            interface_descriptors.str());
3558 3559
}

3560 3561
namespace {

3562
enum class FieldSectionType : uint32_t {
3563
  kNoSection = 0,
3564 3565 3566
  kWeakSection = 1 << 0,
  kStrongSection = 2 << 0,
  kScalarSection = 3 << 0
3567 3568
};

3569 3570 3571
bool IsPointerSection(FieldSectionType type) {
  return type == FieldSectionType::kWeakSection ||
         type == FieldSectionType::kStrongSection;
3572 3573
}

3574 3575
using FieldSections = base::Flags<FieldSectionType>;

3576
std::string ToString(FieldSectionType type) {
3577 3578
  switch (type) {
    case FieldSectionType::kNoSection:
3579
      return "NoSection";
3580
    case FieldSectionType::kWeakSection:
3581
      return "WeakFields";
3582
    case FieldSectionType::kStrongSection:
3583
      return "StrongFields";
3584
    case FieldSectionType::kScalarSection:
3585
      return "ScalarFields";
3586
  }
3587
  UNREACHABLE();
3588 3589
}

3590 3591
class FieldOffsetsGenerator {
 public:
3592
  explicit FieldOffsetsGenerator(const ClassType* type) : type_(type) {}
3593

3594
  virtual void WriteField(const Field& f, const std::string& size_string) = 0;
3595
  virtual void WriteFieldOffsetGetter(const Field& f) = 0;
3596 3597 3598
  virtual void WriteMarker(const std::string& marker) = 0;

  virtual ~FieldOffsetsGenerator() { CHECK(is_finished_); }
3599 3600 3601 3602

  void RecordOffsetFor(const Field& f) {
    CHECK(!is_finished_);
    UpdateSection(f);
3603 3604

    // Emit kHeaderSize before any indexed field.
3605
    if (f.index.has_value() && !header_size_emitted_) {
3606
      WriteMarker("kHeaderSize");
3607
      header_size_emitted_ = true;
3608 3609
    }

3610 3611 3612 3613 3614 3615 3616
    // We don't know statically how much space an indexed field takes, so report
    // it as zero.
    std::string size_string = "0";
    if (!f.index.has_value()) {
      size_t field_size;
      std::tie(field_size, size_string) = f.GetFieldSizeInformation();
    }
3617 3618 3619 3620 3621
    if (f.offset.has_value()) {
      WriteField(f, size_string);
    } else {
      WriteFieldOffsetGetter(f);
    }
3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634
  }

  void Finish() {
    End(current_section_);
    if (!(completed_sections_ & FieldSectionType::kWeakSection)) {
      Begin(FieldSectionType::kWeakSection);
      End(FieldSectionType::kWeakSection);
    }
    if (!(completed_sections_ & FieldSectionType::kStrongSection)) {
      Begin(FieldSectionType::kStrongSection);
      End(FieldSectionType::kStrongSection);
    }
    is_finished_ = true;
3635 3636 3637

    // In the presence of indexed fields, we already emitted kHeaderSize before
    // the indexed field.
3638
    if (!type_->IsShape() && !header_size_emitted_) {
3639
      WriteMarker("kHeaderSize");
3640
    }
3641
    if (!type_->IsAbstract() && type_->HasStaticSize()) {
3642
      WriteMarker("kSize");
3643 3644 3645
    }
  }

3646 3647 3648
 protected:
  const ClassType* type_;

3649 3650
 private:
  FieldSectionType GetSectionFor(const Field& f) {
3651 3652
    const Type* field_type = f.name_and_type.type;
    if (field_type == TypeOracle::GetVoidType()) {
3653 3654 3655
      // Allow void type for marker constants of size zero.
      return current_section_;
    }
3656 3657
    StructType::Classification struct_contents =
        StructType::ClassificationFlag::kEmpty;
3658 3659
    if (auto field_as_struct = field_type->StructSupertype()) {
      struct_contents = (*field_as_struct)->ClassifyContents();
3660 3661 3662 3663 3664 3665 3666 3667 3668
    }
    if (struct_contents == StructType::ClassificationFlag::kMixed) {
      // We can't declare what section a struct goes in if it has multiple
      // categories of data within.
      Error(
          "Classes do not support fields which are structs containing both "
          "tagged and untagged data.")
          .Position(f.pos);
    }
3669 3670
    // Currently struct-valued fields are only allowed to have tagged data; see
    // TypeVisitor::VisitClassFieldsAndMethods.
3671 3672
    if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
        struct_contents == StructType::ClassificationFlag::kTagged) {
3673 3674 3675 3676
      if (f.is_weak) {
        return FieldSectionType::kWeakSection;
      } else {
        return FieldSectionType::kStrongSection;
3677
      }
3678 3679
    } else {
      return FieldSectionType::kScalarSection;
3680 3681
    }
  }
3682 3683 3684 3685 3686 3687 3688
  void UpdateSection(const Field& f) {
    FieldSectionType type = GetSectionFor(f);
    if (current_section_ == type) return;
    if (IsPointerSection(type)) {
      if (completed_sections_ & type) {
        std::stringstream s;
        s << "cannot declare field " << f.name_and_type.name << " in class "
3689
          << type_->name() << ", because section " << ToString(type)
3690 3691 3692
          << " to which it belongs has already been finished.";
        Error(s.str()).Position(f.pos);
      }
3693
    }
3694 3695 3696
    End(current_section_);
    current_section_ = type;
    Begin(current_section_);
3697
  }
3698 3699 3700
  void Begin(FieldSectionType type) {
    DCHECK(type != FieldSectionType::kNoSection);
    if (!IsPointerSection(type)) return;
3701
    WriteMarker("kStartOf" + ToString(type) + "Offset");
3702 3703 3704 3705
  }
  void End(FieldSectionType type) {
    if (!IsPointerSection(type)) return;
    completed_sections_ |= type;
3706
    WriteMarker("kEndOf" + ToString(type) + "Offset");
3707 3708 3709 3710 3711
  }

  FieldSectionType current_section_ = FieldSectionType::kNoSection;
  FieldSections completed_sections_ = FieldSectionType::kNoSection;
  bool is_finished_ = false;
3712
  bool header_size_emitted_ = false;
3713 3714
};

3715 3716 3717 3718 3719 3720 3721 3722
class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
 public:
  MacroFieldOffsetsGenerator(std::ostream& out, const ClassType* type)
      : FieldOffsetsGenerator(type), out_(out) {
    out_ << "#define ";
    out_ << "TORQUE_GENERATED_" << CapifyStringWithUnderscores(type_->name())
         << "_FIELDS(V) \\\n";
  }
3723
  void WriteField(const Field& f, const std::string& size_string) override {
3724 3725 3726
    out_ << "V(k" << CamelifyString(f.name_and_type.name) << "Offset, "
         << size_string << ") \\\n";
  }
3727 3728 3729
  void WriteFieldOffsetGetter(const Field& f) override {
    // Can't do anything here.
  }
3730
  void WriteMarker(const std::string& marker) override {
3731 3732 3733 3734 3735 3736
    out_ << "V(" << marker << ", 0) \\\n";
  }

 private:
  std::ostream& out_;
};
3737

3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750
void GenerateClassExport(const ClassType* type, std::ostream& header,
                         std::ostream& inl_header) {
  const ClassType* super = type->GetSuperClass();
  std::string parent = "TorqueGenerated" + type->name() + "<" + type->name() +
                       ", " + super->name() + ">";
  header << "class " << type->name() << " : public " << parent << " {\n";
  header << " public:\n";
  if (type->ShouldGenerateBodyDescriptor()) {
    header << "  class BodyDescriptor;\n";
  }
  header << "  TQ_OBJECT_CONSTRUCTORS(" << type->name() << ")\n";
  header << "};\n\n";
  inl_header << "TQ_OBJECT_CONSTRUCTORS_IMPL(" << type->name() << ")\n";
3751 3752
}

3753 3754
}  // namespace

3755 3756 3757
void ImplementationVisitor::GenerateClassFieldOffsets(
    const std::string& output_directory) {
  std::stringstream header;
3758
  std::string file_name = "field-offsets.h";
3759 3760 3761
  {
    IncludeGuardScope include_guard(header, file_name);

3762
    for (const ClassType* type : TypeOracle::GetClasses()) {
3763 3764
      // TODO(danno): Remove this once all classes use ClassFieldOffsetGenerator
      // to generate field offsets without the use of macros.
3765
      if (!type->GenerateCppClassDefinitions() && !type->HasUndefinedLayout()) {
3766 3767 3768 3769 3770 3771 3772
        MacroFieldOffsetsGenerator g(header, type);
        for (auto f : type->fields()) {
          CurrentSourcePosition::Scope scope(f.pos);
          g.RecordOffsetFor(f);
        }
        g.Finish();
        header << "\n";
3773 3774
      }
    }
3775

3776
    header << "#define TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(V)\\\n";
3777
    for (const ClassType* type : TypeOracle::GetClasses()) {
3778
      if (type->ShouldGenerateBodyDescriptor() && type->OwnInstanceType()) {
3779 3780
        std::string type_name =
            CapifyStringWithUnderscores(type->name()) + "_TYPE";
3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797
        header << "V(" << type_name << "," << type->name() << ")\\\n";
      }
    }
    header << "\n";

    header << "#define TORQUE_DATA_ONLY_VISITOR_ID_LIST(V)\\\n";
    for (const ClassType* type : TypeOracle::GetClasses()) {
      if (type->ShouldGenerateBodyDescriptor() && type->HasNoPointerSlots()) {
        header << "V(" << type->name() << ")\\\n";
      }
    }
    header << "\n";

    header << "#define TORQUE_POINTER_VISITOR_ID_LIST(V)\\\n";
    for (const ClassType* type : TypeOracle::GetClasses()) {
      if (type->ShouldGenerateBodyDescriptor() && !type->HasNoPointerSlots()) {
        header << "V(" << type->name() << ")\\\n";
3798 3799 3800
      }
    }
    header << "\n";
3801
  }
3802 3803
  const std::string output_header_path = output_directory + "/" + file_name;
  WriteFile(output_header_path, header.str());
3804 3805
}

3806 3807 3808
void ImplementationVisitor::GenerateBitFields(
    const std::string& output_directory) {
  std::stringstream header;
3809
  std::string file_name = "bit-fields.h";
3810 3811 3812 3813 3814 3815
  {
    IncludeGuardScope include_guard(header, file_name);
    header << "#include \"src/base/bit-field.h\"\n\n";
    NamespaceScope namespaces(header, {"v8", "internal"});

    for (const auto& type : TypeOracle::GetBitFieldStructTypes()) {
3816 3817
      bool all_single_bits = true;  // Track whether every field is one bit.

3818 3819
      header << "#define DEFINE_TORQUE_GENERATED_"
             << CapifyStringWithUnderscores(type->name()) << "() \\\n";
3820 3821 3822
      std::string type_name = type->GetConstexprGeneratedTypeName();
      for (const auto& field : type->fields()) {
        const char* suffix = field.num_bits == 1 ? "Bit" : "Bits";
3823
        all_single_bits = all_single_bits && field.num_bits == 1;
3824 3825 3826 3827 3828
        std::string field_type_name =
            field.name_and_type.type->GetConstexprGeneratedTypeName();
        header << "  using " << CamelifyString(field.name_and_type.name)
               << suffix << " = base::BitField<" << field_type_name << ", "
               << field.offset << ", " << field.num_bits << ", " << type_name
3829
               << ">; \\\n";
3830
      }
3831

3832 3833 3834 3835 3836 3837 3838 3839 3840
      // If every field is one bit, we can also generate a convenient enum.
      if (all_single_bits) {
        header << "  enum Flag { \\\n";
        header << "    kNone = 0, \\\n";
        for (const auto& field : type->fields()) {
          header << "    k" << CamelifyString(field.name_and_type.name)
                 << " = 1 << " << field.offset << ", \\\n";
        }
        header << "  }; \\\n";
3841
        header << "  using Flags = base::Flags<Flag>; \\\n";
3842 3843
        header << "  static constexpr int kFlagCount = "
               << type->fields().size() << "; \\\n";
3844 3845
      }

3846
      header << "\n";
3847 3848 3849 3850 3851 3852
    }
  }
  const std::string output_header_path = output_directory + "/" + file_name;
  WriteFile(output_header_path, header.str());
}

3853 3854
namespace {

3855 3856
class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
 public:
3857
  ClassFieldOffsetGenerator(std::ostream& header, std::ostream& inline_header,
3858
                            const ClassType* type, std::string gen_name)
3859 3860
      : FieldOffsetsGenerator(type),
        hdr_(header),
3861 3862
        inl_(inline_header),
        previous_field_end_("P::kHeaderSize"),
3863
        gen_name_(gen_name) {}
3864
  void WriteField(const Field& f, const std::string& size_string) override {
3865 3866 3867 3868 3869
    std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
    std::string field_end = field + "End";
    hdr_ << "  static constexpr int " << field << " = " << previous_field_end_
         << ";\n";
    hdr_ << "  static constexpr int " << field_end << " = " << field << " + "
3870 3871
         << size_string << " - 1;\n";
    previous_field_end_ = field_end + " + 1";
3872
  }
3873 3874 3875 3876 3877 3878 3879
  void WriteFieldOffsetGetter(const Field& f) override {
    // A static constexpr int is more convenient than a getter if the offset is
    // known.
    DCHECK(!f.offset.has_value());

    std::string function_name = CamelifyString(f.name_and_type.name) + "Offset";

3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891
    std::vector<cpp::TemplateParameter> params = {cpp::TemplateParameter("D"),
                                                  cpp::TemplateParameter("P")};
    cpp::Class owner(std::move(params), gen_name_);

    auto getter = cpp::Function::DefaultGetter("int", &owner, function_name);
    getter.PrintDeclaration(hdr_);
    getter.PrintDefinition(inl_, [&](std::ostream& stream) {
      // Item 1 in a flattened slice is the offset.
      stream << "  return static_cast<int>(std::get<1>("
             << Callable::PrefixNameForCCOutput(type_->GetSliceMacroName(f))
             << "(*static_cast<const D*>(this))));\n";
    });
3892
  }
3893
  void WriteMarker(const std::string& marker) override {
3894 3895 3896 3897 3898 3899
    hdr_ << "  static constexpr int " << marker << " = " << previous_field_end_
         << ";\n";
  }

 private:
  std::ostream& hdr_;
3900
  std::ostream& inl_;
3901
  std::string previous_field_end_;
3902
  std::string gen_name_;
3903 3904
};

3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925
class CppClassGenerator {
 public:
  CppClassGenerator(const ClassType* type, std::ostream& header,
                    std::ostream& inl_header, std::ostream& impl)
      : type_(type),
        super_(type->GetSuperClass()),
        name_(type->name()),
        gen_name_("TorqueGenerated" + name_),
        gen_name_T_(gen_name_ + "<D, P>"),
        gen_name_I_(gen_name_ + "<" + name_ + ", " + super_->name() + ">"),
        hdr_(header),
        inl_(inl_header),
        impl_(impl) {}
  const std::string template_decl() const {
    return "template <class D, class P>";
  }

  void GenerateClass();

 private:
  void GenerateClassConstructors();
3926 3927 3928 3929 3930 3931 3932

  // Generates getter and setter runtime member functions for the given class
  // field. Traverses depth-first through any nested struct fields to generate
  // accessors for them also; struct_fields represents the stack of currently
  // active struct fields.
  void GenerateFieldAccessors(const Field& class_field,
                              std::vector<const Field*>& struct_fields);
3933
  void EmitLoadFieldStatement(std::ostream& stream, const Field& class_field,
3934
                              std::vector<const Field*>& struct_fields);
3935
  void EmitStoreFieldStatement(std::ostream& stream, const Field& class_field,
3936
                               std::vector<const Field*>& struct_fields);
3937

3938 3939
  void GenerateClassCasts();

3940 3941
  std::string GetFieldOffsetForAccessor(const Field& f);

3942 3943 3944 3945 3946 3947
  // Gets the C++ type name that should be used in accessors for referring to
  // the value of a class field.
  std::string GetTypeNameForAccessor(const Field& f);

  bool CanContainHeapObjects(const Type* t);

3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958
  const ClassType* type_;
  const ClassType* super_;
  const std::string name_;
  const std::string gen_name_;
  const std::string gen_name_T_;
  const std::string gen_name_I_;
  std::ostream& hdr_;
  std::ostream& inl_;
  std::ostream& impl_;
};

3959 3960 3961 3962 3963 3964
base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
    const ClassType& type) {
  std::vector<Field> result;
  std::set<std::string> index_names;
  for (const Field& field : type.ComputeAllFields()) {
    if (field.index) {
3965
      auto name_and_type = ExtractSimpleFieldArraySize(type, field.index->expr);
3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981
      if (!name_and_type) {
        return base::nullopt;
      }
      index_names.insert(name_and_type->name);
    }
  }

  for (const Field& field : type.ComputeAllFields()) {
    if (index_names.count(field.name_and_type.name) != 0) {
      result.push_back(field);
    }
  }

  return result;
}

3982
void CppClassGenerator::GenerateClass() {
3983 3984 3985 3986 3987 3988 3989 3990
  // Is<name>_NonInline(HeapObject)
  {
    cpp::Function f("Is"s + name_ + "_NonInline");
    f.SetDescription("Alias for HeapObject::Is"s + name_ +
                     "() that avoids inlining.");
    f.SetExport(true);
    f.SetReturnType("bool");
    f.AddParameter("HeapObject", "o");
3991

3992 3993 3994 3995 3996
    f.PrintDeclaration(hdr_);
    f.PrintDefinition(impl_, [&](std::ostream& stream) {
      stream << "  return o.Is" << name_ << "();";
    });
  }
3997

3998 3999 4000 4001 4002 4003 4004
  hdr_ << template_decl() << "\n";
  hdr_ << "class " << gen_name_ << " : public P {\n";
  hdr_ << "  static_assert(std::is_same<" << name_ << ", D>::value,\n"
       << "    \"Use this class as direct base for " << name_ << ".\");\n";
  hdr_ << "  static_assert(std::is_same<" << super_->name() << ", P>::value,\n"
       << "    \"Pass in " << super_->name()
       << " as second template parameter for " << gen_name_ << ".\");\n";
4005
  hdr_ << " public: \n";
4006 4007
  hdr_ << "  using Super = P;\n";
  hdr_ << "  using TorqueGeneratedClass = " << gen_name_ << "<D,P>;\n\n";
4008 4009 4010
  if (!type_->ShouldExport() && !type_->IsExtern()) {
    hdr_ << " protected: // not extern or @export\n";
  }
4011
  for (const Field& f : type_->fields()) {
4012 4013
    std::vector<const Field*> struct_fields;
    GenerateFieldAccessors(f, struct_fields);
4014
  }
4015 4016 4017
  if (!type_->ShouldExport() && !type_->IsExtern()) {
    hdr_ << " public:\n";
  }
4018 4019 4020

  GenerateClassCasts();

4021 4022 4023 4024
  std::vector<cpp::TemplateParameter> templateArgs = {
      cpp::TemplateParameter("D"), cpp::TemplateParameter("P")};
  cpp::Class c(std::move(templateArgs), gen_name_);

4025 4026 4027 4028 4029 4030
  if (type_->ShouldGeneratePrint()) {
    hdr_ << "\n  DECL_PRINTER(" << name_ << ")\n";
  }

  if (type_->ShouldGenerateVerify()) {
    IfDefScope hdr_scope(hdr_, "VERIFY_HEAP");
4031 4032 4033 4034 4035 4036
    // V8_EXPORT_PRIVATE void Verify(Isolate*);
    cpp::Function f(&c, name_ + "Verify");
    f.SetExport();
    f.SetReturnType("void");
    f.AddParameter("Isolate*", "isolate");
    f.PrintDeclaration(hdr_);
4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047

    IfDefScope impl_scope(impl_, "VERIFY_HEAP");
    impl_ << "\ntemplate <>\n";
    impl_ << "void " << gen_name_I_ << "::" << name_
          << "Verify(Isolate* isolate) {\n";
    impl_ << "  TorqueGeneratedClassVerifiers::" << name_ << "Verify(" << name_
          << "::cast(*this), "
             "isolate);\n";
    impl_ << "}\n";
  }

4048
  hdr_ << "\n";
4049
  ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_);
4050 4051 4052 4053 4054 4055
  for (auto f : type_->fields()) {
    CurrentSourcePosition::Scope scope(f.pos);
    g.RecordOffsetFor(f);
  }
  g.Finish();
  hdr_ << "\n";
4056

4057 4058 4059 4060 4061
  auto index_fields = GetOrderedUniqueIndexFields(*type_);

  if (!index_fields.has_value()) {
    hdr_ << "  // SizeFor implementations not generated due to complex array "
            "lengths\n\n";
4062 4063 4064 4065

    const Field& last_field = type_->LastField();
    std::string last_field_item_size =
        std::get<1>(*SizeOf(last_field.name_and_type.type));
4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082

    // int AllocatedSize() const
    {
      cpp::Function f =
          cpp::Function::DefaultGetter("int", &c, "AllocatedSize");
      f.PrintDeclaration(hdr_, 2);

      f.PrintDefinition(inl_, [&](std::ostream& stream) {
        stream << "  auto slice = "
               << Callable::PrefixNameForCCOutput(
                      type_->GetSliceMacroName(last_field))
               << "(*static_cast<const D*>(this));\n";
        stream << "  return static_cast<int>(std::get<1>(slice)) + "
               << last_field_item_size
               << " * static_cast<int>(std::get<2>(slice));\n";
      });
    }
4083 4084 4085
  } else if (type_->ShouldGenerateBodyDescriptor() ||
             (!type_->IsAbstract() &&
              !type_->IsSubtypeOf(TypeOracle::GetJSObjectType()))) {
4086 4087 4088 4089
    cpp::Function f(&c, "SizeFor");
    f.SetReturnType("int32_t");
    f.SetFlags(cpp::Function::kStatic | cpp::Function::kConstexpr |
               cpp::Function::kV8Inline);
4090
    for (const Field& field : *index_fields) {
4091
      f.AddParameter("int", field.name_and_type.name);
4092
    }
4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127
    f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
      if (index_fields->empty()) {
        stream << "    DCHECK(kHeaderSize == kSize && kHeaderSize == "
               << *type_->size().SingleValue() << ");\n";
      }
      stream << "    int32_t size = kHeaderSize;\n";
      for (const Field& field : type_->ComputeAllFields()) {
        if (field.index) {
          auto index_name_and_type =
              *ExtractSimpleFieldArraySize(*type_, field.index->expr);
          stream << "    size += " << index_name_and_type.name << " * "
                 << std::get<0>(field.GetFieldSizeInformation()) << ";\n";
        }
      }
      if (type_->size().Alignment() < TargetArchitecture::TaggedSize()) {
        stream << "    size = OBJECT_POINTER_ALIGN(size);\n";
      }
      stream << "    return size;\n";
    });

    // V8_INLINE int32_t AllocatedSize() const
    {
      cpp::Function f =
          cpp::Function::DefaultGetter("int32_t", &c, "AllocatedSize");
      f.SetFlag(cpp::Function::kV8Inline);
      f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
        stream << "    return SizeFor(";
        bool first = true;
        for (auto field : *index_fields) {
          if (!first) stream << ", ";
          stream << "this->" << field.name_and_type.name << "()";
          first = false;
        }
        stream << ");\n";
      });
4128 4129 4130 4131 4132
    }
  }

  hdr_ << "  friend class Factory;\n\n";

4133 4134 4135
  GenerateClassConstructors();

  hdr_ << "};\n\n";
4136

4137
  if (type_->ShouldGenerateFullClassDefinition()) {
4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154
    // If this class extends from another class which is defined in the same tq
    // file, and that other class doesn't generate a full class definition, then
    // the resulting .inc file would be uncompilable due to ordering
    // requirements: the generated file must go before the hand-written
    // definition of the base class, but it must also go after that same
    // hand-written definition.
    base::Optional<const ClassType*> parent = type_->parent()->ClassSupertype();
    while (parent) {
      if ((*parent)->GenerateCppClassDefinitions() &&
          !(*parent)->ShouldGenerateFullClassDefinition() &&
          (*parent)->AttributedToFile() == type_->AttributedToFile()) {
        Error("Exported ", *type_,
              " cannot be in the same file as its parent extern ", **parent);
      }
      parent = (*parent)->parent()->ClassSupertype();
    }

4155 4156
    GenerateClassExport(type_, hdr_, inl_);
  }
4157 4158 4159
}

void CppClassGenerator::GenerateClassCasts() {
4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173
  cpp::Function f("cast");
  f.SetFlags(cpp::Function::kV8Inline | cpp::Function::kStatic);
  f.SetReturnType("D");
  f.AddParameter("Object", "object");

  // V8_INLINE static D cast(Object)
  f.PrintInlineDefinition(hdr_, [](std::ostream& stream) {
    stream << "  return D(object.ptr());\n";
  });
  // V8_INLINE static D unchecked_cast(Object)
  f.SetName("unchecked_cast");
  f.PrintInlineDefinition(hdr_, [](std::ostream& stream) {
    stream << "  return bit_cast<D>(object);\n";
  });
4174 4175 4176
}

void CppClassGenerator::GenerateClassConstructors() {
4177
  hdr_ << " public:\n";
4178 4179 4180 4181 4182 4183 4184 4185
  hdr_ << "  template <class DAlias = D>\n";
  hdr_ << "  constexpr " << gen_name_ << "() : P() {\n";
  hdr_ << "    static_assert(std::is_base_of<" << gen_name_ << ", \n";
  hdr_ << "      DAlias>::value,\n";
  hdr_ << "      \"class " << gen_name_ << " should be used as direct base for "
       << name_ << ".\");\n";
  hdr_ << "  }\n";

4186
  hdr_ << " protected:\n";
4187
  hdr_ << "  inline explicit " << gen_name_ << "(Address ptr);\n";
4188 4189 4190 4191 4192
  hdr_ << "  // Special-purpose constructor for subclasses that have fast "
          "paths where\n";
  hdr_ << "  // their ptr() is a Smi.\n";
  hdr_ << "  inline explicit " << gen_name_
       << "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi);\n";
4193 4194 4195 4196

  inl_ << "template<class D, class P>\n";
  inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
  inl_ << "  : P(ptr) {\n";
4197
  inl_ << "  SLOW_DCHECK(Is" << name_ << "_NonInline(*this));\n";
4198
  inl_ << "}\n";
4199 4200 4201 4202 4203

  inl_ << "template<class D, class P>\n";
  inl_ << "inline " << gen_name_T_ << "::" << gen_name_
       << "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi)\n";
  inl_ << "  : P(ptr, allow_smi) {\n";
4204 4205 4206 4207
  inl_ << "  SLOW_DCHECK("
       << "(allow_smi == HeapObject::AllowInlineSmiStorage::kAllowBeingASmi"
          " && this->IsSmi()) || Is"
       << name_ << "_NonInline(*this));\n";
4208
  inl_ << "}\n";
4209 4210
}

4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221
namespace {
std::string GenerateRuntimeTypeCheck(const Type* type,
                                     const std::string& value) {
  bool maybe_object = !type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
  std::stringstream type_check;
  bool at_start = true;
  // If weak pointers are allowed, then start by checking for a cleared value.
  if (maybe_object) {
    type_check << value << ".IsCleared()";
    at_start = false;
  }
4222
  for (const TypeChecker& runtime_type : type->GetTypeCheckers()) {
4223 4224 4225 4226
    if (!at_start) type_check << " || ";
    at_start = false;
    if (maybe_object) {
      bool strong = runtime_type.weak_ref_to.empty();
4227 4228 4229 4230
      if (strong && runtime_type.type == WEAK_HEAP_OBJECT) {
        // Rather than a generic Weak<T>, this is the basic type WeakHeapObject.
        // We can't validate anything more about the type of the object pointed
        // to, so just check that it's weak.
4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247
        type_check << value << ".IsWeak()";
      } else {
        type_check << "(" << (strong ? "!" : "") << value << ".IsWeak() && "
                   << value << ".GetHeapObjectOrSmi().Is"
                   << (strong ? runtime_type.type : runtime_type.weak_ref_to)
                   << "())";
      }
    } else {
      type_check << value << ".Is" << runtime_type.type << "()";
    }
  }
  return type_check.str();
}

void GenerateBoundsDCheck(std::ostream& os, const std::string& index,
                          const ClassType* type, const Field& f) {
  os << "  DCHECK_GE(" << index << ", 0);\n";
4248
  std::string length_expression;
4249
  if (base::Optional<NameAndType> array_length =
4250
          ExtractSimpleFieldArraySize(*type, f.index->expr)) {
4251 4252 4253 4254 4255 4256 4257
    length_expression = "this ->" + array_length->name + "()";
  } else {
    // The length is element 2 in the flattened field slice.
    length_expression =
        "static_cast<int>(std::get<2>(" +
        Callable::PrefixNameForCCOutput(type->GetSliceMacroName(f)) +
        "(*static_cast<const D*>(this))))";
4258
  }
4259
  os << "  DCHECK_LT(" << index << ", " << length_expression << ");\n";
4260 4261 4262
}
}  // namespace

4263
// TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
4264 4265 4266 4267 4268
void CppClassGenerator::GenerateFieldAccessors(
    const Field& class_field, std::vector<const Field*>& struct_fields) {
  const Field& innermost_field =
      struct_fields.empty() ? class_field : *struct_fields.back();
  const Type* field_type = innermost_field.name_and_type.type;
4269 4270
  if (field_type == TypeOracle::GetVoidType()) return;

4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284
  // float64_or_hole should be treated like float64. For now, we don't need it.
  if (field_type == TypeOracle::GetFloat64OrHoleType()) {
    return;
  }

  if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
    struct_fields.resize(struct_fields.size() + 1);
    for (const Field& struct_field : struct_type->fields()) {
      struct_fields[struct_fields.size() - 1] = &struct_field;
      GenerateFieldAccessors(class_field, struct_fields);
    }
    struct_fields.resize(struct_fields.size() - 1);
    return;
  }
4285

4286
  // TODO(v8:10391) Generate accessors for external pointers
4287
  if (field_type->IsSubtypeOf(TypeOracle::GetExternalPointerType())) {
4288 4289 4290
    return;
  }

4291
  bool indexed = class_field.index && !class_field.index->optional;
4292
  std::string type_name = GetTypeNameForAccessor(innermost_field);
4293
  bool can_contain_heap_objects = CanContainHeapObjects(field_type);
4294 4295 4296 4297 4298 4299 4300

  // Assemble an accessor name by accumulating together all of the nested field
  // names.
  std::string name = class_field.name_and_type.name;
  for (const Field* nested_struct_field : struct_fields) {
    name += "_" + nested_struct_field->name_and_type.name;
  }
4301

4302 4303 4304 4305 4306
  // Generate declarations in header.
  if (can_contain_heap_objects && !field_type->IsClassType() &&
      !field_type->IsStructType() &&
      field_type != TypeOracle::GetObjectType()) {
    hdr_ << "  // Torque type: " << field_type->ToString() << "\n";
4307 4308
  }

4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319
  std::vector<cpp::TemplateParameter> templateParameters = {
      cpp::TemplateParameter("D"), cpp::TemplateParameter("P")};
  cpp::Class owner(std::move(templateParameters), gen_name_);

  // getter
  {
    auto getter = cpp::Function::DefaultGetter(type_name, &owner, name);
    if (indexed) {
      getter.AddParameter("int", "i");
    }
    const char* tag_argument;
4320 4321
    switch (class_field.read_synchronization) {
      case FieldSynchronization::kNone:
4322
        tag_argument = "";
4323 4324
        break;
      case FieldSynchronization::kRelaxed:
4325 4326
        getter.AddParameter("RelaxedLoadTag");
        tag_argument = ", kRelaxedLoad";
4327 4328
        break;
      case FieldSynchronization::kAcquireRelease:
4329 4330
        getter.AddParameter("AcquireLoadTag");
        tag_argument = ", kAcquireLoad";
4331 4332 4333
        break;
    }

4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354
    getter.PrintDeclaration(hdr_);

    // For tagged data, generate the extra getter that derives an
    // PtrComprCageBase from the current object's pointer.
    if (can_contain_heap_objects) {
      getter.PrintDefinition(inl_, [&](auto& stream) {
        stream
            << "  PtrComprCageBase cage_base = GetPtrComprCageBase(*this);\n";
        stream << "  return " << gen_name_ << "::" << name << "(cage_base"
               << (indexed ? ", i" : "") << tag_argument << ");\n";
      });

      getter.InsertParameter(0, "PtrComprCageBase", "cage_base");
      getter.PrintDeclaration(hdr_);
    }

    getter.PrintDefinition(inl_, [&](auto& stream) {
      stream << "  " << type_name << " value;\n";
      EmitLoadFieldStatement(stream, class_field, struct_fields);
      stream << "  return value;\n";
    });
4355
  }
4356 4357 4358 4359 4360 4361 4362

  // setter
  {
    auto setter = cpp::Function::DefaultSetter(
        &owner, std::string("set_") + name, type_name, "value");
    if (indexed) {
      setter.InsertParameter(0, "int", "i");
4363
    }
4364
    switch (class_field.write_synchronization) {
4365 4366 4367
      case FieldSynchronization::kNone:
        break;
      case FieldSynchronization::kRelaxed:
4368
        setter.AddParameter("RelaxedStoreTag");
4369 4370
        break;
      case FieldSynchronization::kAcquireRelease:
4371
        setter.AddParameter("ReleaseStoreTag");
4372 4373
        break;
    }
4374 4375 4376 4377
    if (can_contain_heap_objects) {
      setter.AddParameter("WriteBarrierMode", "mode", "UPDATE_WRITE_BARRIER");
    }
    setter.PrintDeclaration(hdr_);
4378

4379 4380 4381
    setter.PrintDefinition(inl_, [&](auto& stream) {
      EmitStoreFieldStatement(stream, class_field, struct_fields);
    });
4382
  }
4383

4384
  hdr_ << "\n";
4385 4386
}

4387 4388 4389
std::string CppClassGenerator::GetFieldOffsetForAccessor(const Field& f) {
  if (f.offset.has_value()) {
    return "k" + CamelifyString(f.name_and_type.name) + "Offset";
4390
  }
4391 4392
  return CamelifyString(f.name_and_type.name) + "Offset()";
}
4393

4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407
std::string CppClassGenerator::GetTypeNameForAccessor(const Field& f) {
  const Type* field_type = f.name_and_type.type;
  if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
    const Type* constexpr_version = field_type->ConstexprVersion();
    if (!constexpr_version) {
      Error("Field accessor for ", type_->name(), ":: ", f.name_and_type.name,
            " cannot be generated because its type ", *field_type,
            " is neither a subclass of Object nor does the type have a "
            "constexpr "
            "version.")
          .Position(f.pos)
          .Throw();
    }
    return constexpr_version->GetGeneratedTypeName();
4408
  }
4409 4410 4411
  if (field_type->IsSubtypeOf(TypeOracle::GetSmiType())) {
    // Follow the convention to create Smi accessors with type int.
    return "int";
4412
  }
4413 4414
  return field_type->UnhandlifiedCppTypeName();
}
4415

4416 4417 4418
bool CppClassGenerator::CanContainHeapObjects(const Type* t) {
  return t->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
         !t->IsSubtypeOf(TypeOracle::GetSmiType());
4419 4420
}

4421
void CppClassGenerator::EmitLoadFieldStatement(
4422 4423
    std::ostream& stream, const Field& class_field,
    std::vector<const Field*>& struct_fields) {
4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440
  const Field& innermost_field =
      struct_fields.empty() ? class_field : *struct_fields.back();
  const Type* field_type = innermost_field.name_and_type.type;
  std::string type_name = GetTypeNameForAccessor(innermost_field);
  const std::string class_field_size =
      std::get<1>(class_field.GetFieldSizeInformation());

  // field_offset contains both the offset from the beginning of the object to
  // the class field and the combined offsets of any nested struct fields
  // within, but not the index adjustment.
  std::string field_offset = GetFieldOffsetForAccessor(class_field);
  for (const Field* nested_struct_field : struct_fields) {
    field_offset += " + " + std::to_string(*nested_struct_field->offset);
  }

  std::string offset = field_offset;
  if (class_field.index) {
4441
    const char* index = class_field.index->optional ? "0" : "i";
4442 4443 4444
    GenerateBoundsDCheck(stream, index, type_, class_field);
    stream << "  int offset = " << field_offset << " + " << index << " * "
           << class_field_size << ";\n";
4445
    offset = "offset";
4446
  }
4447

4448
  stream << "  value = ";
4449

4450
  if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4451 4452
    if (class_field.read_synchronization ==
        FieldSynchronization::kAcquireRelease) {
4453
      ReportError("Torque doesn't support @cppAcquireRead on untagged data");
4454 4455
    } else if (class_field.read_synchronization ==
               FieldSynchronization::kRelaxed) {
4456
      ReportError("Torque doesn't support @cppRelaxedRead on untagged data");
4457
    }
4458 4459
    stream << "this->template ReadField<" << type_name << ">(" << offset
           << ");\n";
4460
  } else {
4461
    const char* load;
4462
    switch (class_field.read_synchronization) {
4463 4464 4465 4466 4467 4468 4469 4470 4471 4472
      case FieldSynchronization::kNone:
        load = "load";
        break;
      case FieldSynchronization::kRelaxed:
        load = "Relaxed_Load";
        break;
      case FieldSynchronization::kAcquireRelease:
        load = "Acquire_Load";
        break;
    }
4473 4474 4475
    bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
    const std::string load_type = is_smi ? "Smi" : type_name;
    const char* postfix = is_smi ? ".value()" : "";
4476
    const char* optional_cage_base = is_smi ? "" : "cage_base, ";
4477

4478 4479 4480
    stream << "TaggedField<" << load_type << ">::" << load << "("
           << optional_cage_base << "*this, " << offset << ")" << postfix
           << ";\n";
4481
  }
4482 4483

  if (CanContainHeapObjects(field_type)) {
4484 4485
    stream << "  DCHECK(" << GenerateRuntimeTypeCheck(field_type, "value")
           << ");\n";
4486
  }
4487 4488
}

4489
void CppClassGenerator::EmitStoreFieldStatement(
4490 4491
    std::ostream& stream, const Field& class_field,
    std::vector<const Field*>& struct_fields) {
4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508
  const Field& innermost_field =
      struct_fields.empty() ? class_field : *struct_fields.back();
  const Type* field_type = innermost_field.name_and_type.type;
  std::string type_name = GetTypeNameForAccessor(innermost_field);
  const std::string class_field_size =
      std::get<1>(class_field.GetFieldSizeInformation());

  // field_offset contains both the offset from the beginning of the object to
  // the class field and the combined offsets of any nested struct fields
  // within, but not the index adjustment.
  std::string field_offset = GetFieldOffsetForAccessor(class_field);
  for (const Field* nested_struct_field : struct_fields) {
    field_offset += " + " + std::to_string(*nested_struct_field->offset);
  }

  std::string offset = field_offset;
  if (class_field.index) {
4509
    const char* index = class_field.index->optional ? "0" : "i";
4510 4511 4512
    GenerateBoundsDCheck(stream, index, type_, class_field);
    stream << "  int offset = " << field_offset << " + " << index << " * "
           << class_field_size << ";\n";
4513
    offset = "offset";
4514 4515 4516
  }

  if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4517 4518
    stream << "  this->template WriteField<" << type_name << ">(" << offset
           << ", value);\n";
4519
  } else {
4520 4521
    bool strong_pointer = field_type->IsSubtypeOf(TypeOracle::GetObjectType());
    bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
4522 4523
    const char* write_macro;
    if (!strong_pointer) {
4524 4525
      if (class_field.write_synchronization ==
          FieldSynchronization::kAcquireRelease) {
4526 4527 4528 4529
        ReportError("Torque doesn't support @releaseWrite on weak fields");
      }
      write_macro = "RELAXED_WRITE_WEAK_FIELD";
    } else {
4530
      switch (class_field.write_synchronization) {
4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541
        case FieldSynchronization::kNone:
          write_macro = "WRITE_FIELD";
          break;
        case FieldSynchronization::kRelaxed:
          write_macro = "RELAXED_WRITE_FIELD";
          break;
        case FieldSynchronization::kAcquireRelease:
          write_macro = "RELEASE_WRITE_FIELD";
          break;
      }
    }
4542 4543 4544
    const std::string value_to_write = is_smi ? "Smi::FromInt(value)" : "value";

    if (!is_smi) {
4545 4546
      stream << "  SLOW_DCHECK("
             << GenerateRuntimeTypeCheck(field_type, "value") << ");\n";
4547
    }
4548 4549
    stream << "  " << write_macro << "(*this, " << offset << ", "
           << value_to_write << ");\n";
4550 4551 4552 4553
    if (!is_smi) {
      const char* write_barrier = strong_pointer
                                      ? "CONDITIONAL_WRITE_BARRIER"
                                      : "CONDITIONAL_WEAK_WRITE_BARRIER";
4554 4555
      stream << "  " << write_barrier << "(*this, " << offset
             << ", value, mode);\n";
4556
    }
4557
  }
4558 4559
}

4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572
void GenerateStructLayoutDescription(std::ostream& header,
                                     const StructType* type) {
  header << "struct TorqueGenerated" << CamelifyString(type->name())
         << "Offsets {\n";
  for (const Field& field : type->fields()) {
    header << "  static constexpr int k"
           << CamelifyString(field.name_and_type.name)
           << "Offset = " << *field.offset << ";\n";
  }
  header << "  static constexpr int kSize = " << type->PackedSize() << ";\n";
  header << "};\n\n";
}

4573 4574
}  // namespace

4575 4576
void ImplementationVisitor::GenerateClassDefinitions(
    const std::string& output_directory) {
4577 4578
  std::stringstream factory_header;
  std::stringstream factory_impl;
4579
  std::string factory_basename = "factory";
4580

4581 4582
  std::stringstream forward_declarations;
  std::string forward_declarations_filename = "class-forward-declarations.h";
4583

4584
  {
4585 4586
    factory_impl << "#include \"src/heap/factory-base.h\"\n";
    factory_impl << "#include \"src/heap/factory-base-inl.h\"\n";
4587 4588
    factory_impl << "#include \"src/heap/heap.h\"\n";
    factory_impl << "#include \"src/heap/heap-inl.h\"\n";
4589
    factory_impl << "#include \"src/execution/isolate.h\"\n";
4590
    factory_impl << "#include "
4591
                    "\"src/objects/all-objects-inl.h\"\n\n";
4592 4593
    NamespaceScope factory_impl_namespaces(factory_impl, {"v8", "internal"});
    factory_impl << "\n";
4594

4595 4596 4597 4598
    IncludeGuardScope include_guard(forward_declarations,
                                    forward_declarations_filename);
    NamespaceScope forward_declarations_namespaces(forward_declarations,
                                                   {"v8", "internal"});
4599

4600
    std::set<const StructType*, TypeLess> structs_used_in_classes;
4601

4602 4603 4604 4605 4606 4607 4608 4609 4610
    // Emit forward declarations.
    for (const ClassType* type : TypeOracle::GetClasses()) {
      auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
      std::ostream& header = streams.class_definition_headerfile;
      header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
      forward_declarations << "class " << type->GetGeneratedTNodeTypeName()
                           << ";\n";
    }

4611
    for (const ClassType* type : TypeOracle::GetClasses()) {
4612 4613 4614 4615
      auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
      std::ostream& header = streams.class_definition_headerfile;
      std::ostream& inline_header = streams.class_definition_inline_headerfile;
      std::ostream& implementation = streams.class_definition_ccfile;
4616

4617 4618 4619 4620
      if (type->GenerateCppClassDefinitions()) {
        CppClassGenerator g(type, header, inline_header, implementation);
        g.GenerateClass();
      }
4621 4622
      for (const Field& f : type->fields()) {
        const Type* field_type = f.name_and_type.type;
4623 4624
        if (auto field_as_struct = field_type->StructSupertype()) {
          structs_used_in_classes.insert(*field_as_struct);
4625 4626
        }
      }
4627 4628
      if (type->ShouldExport() && !type->IsAbstract() &&
          !type->HasCustomMap()) {
4629 4630 4631
        std::string return_type = type->HandlifiedCppTypeName();
        std::string function_name = "New" + type->name();
        std::stringstream parameters;
4632 4633 4634 4635 4636
        for (const Field& f : type->ComputeAllFields()) {
          if (f.name_and_type.name == "map") continue;
          if (!f.index) {
            std::string type_string =
                f.name_and_type.type->HandlifiedCppTypeName();
4637
            parameters << type_string << " " << f.name_and_type.name << ", ";
4638 4639
          }
        }
4640
        parameters << "AllocationType allocation_type";
4641

4642 4643 4644 4645 4646 4647
        factory_header << return_type << " " << function_name << "("
                       << parameters.str() << ");\n";
        factory_impl << "template <typename Impl>\n";
        factory_impl << return_type
                     << " TorqueGeneratedFactory<Impl>::" << function_name
                     << "(" << parameters.str() << ") {\n";
4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667

        factory_impl << " int size = ";
        const ClassType* super = type->GetSuperClass();
        std::string gen_name = "TorqueGenerated" + type->name();
        std::string gen_name_T =
            gen_name + "<" + type->name() + ", " + super->name() + ">";
        factory_impl << gen_name_T << "::SizeFor(";

        bool first = true;
        auto index_fields = GetOrderedUniqueIndexFields(*type);
        CHECK(index_fields.has_value());
        for (auto index_field : *index_fields) {
          if (!first) {
            factory_impl << ", ";
          }
          factory_impl << index_field.name_and_type.name;
          first = false;
        }

        factory_impl << ");\n";
4668 4669
        factory_impl << "  Map map = factory()->read_only_roots()."
                     << SnakeifyString(type->name()) << "_map();";
4670
        factory_impl << "  HeapObject result =\n";
4671 4672
        factory_impl << "    factory()->AllocateRawWithImmortalMap(size, "
                        "allocation_type, map);\n";
4673 4674 4675
        factory_impl << "    WriteBarrierMode write_barrier_mode =\n"
                     << "       allocation_type == AllocationType::kYoung\n"
                     << "       ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;\n";
4676 4677
        factory_impl << "  " << type->HandlifiedCppTypeName()
                     << " result_handle(" << type->name()
4678
                     << "::cast(result), factory()->isolate());\n";
4679 4680 4681 4682

        for (const Field& f : type->ComputeAllFields()) {
          if (f.name_and_type.name == "map") continue;
          if (!f.index) {
4683
            factory_impl << "  result_handle->TorqueGeneratedClass::set_"
4684 4685 4686 4687 4688
                         << SnakeifyString(f.name_and_type.name) << "(";
            if (f.name_and_type.type->IsSubtypeOf(
                    TypeOracle::GetTaggedType()) &&
                !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
              factory_impl << "*" << f.name_and_type.name
4689
                           << ", write_barrier_mode";
4690 4691 4692 4693 4694 4695 4696 4697 4698
            } else {
              factory_impl << f.name_and_type.name;
            }
            factory_impl << ");\n";
          }
        }

        factory_impl << "  return result_handle;\n";
        factory_impl << "}\n\n";
4699 4700 4701 4702 4703 4704 4705 4706 4707 4708

        factory_impl << "template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) "
                     << return_type
                     << "TorqueGeneratedFactory<Factory>::" << function_name
                     << "(" << parameters.str() << ");\n";
        factory_impl << "template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) "
                     << return_type << "TorqueGeneratedFactory<LocalFactory>::"
                     << function_name << "(" << parameters.str() << ");\n";

        factory_impl << "\n\n";
4709
      }
4710 4711 4712
    }

    for (const StructType* type : structs_used_in_classes) {
4713 4714 4715
      std::ostream& header =
          GlobalContext::GeneratedPerFile(type->GetPosition().source)
              .class_definition_headerfile;
4716
      if (type != TypeOracle::GetFloat64OrHoleType()) {
4717
        GenerateStructLayoutDescription(header, type);
4718
      }
4719 4720
    }
  }
4721 4722 4723 4724 4725 4726
  WriteFile(output_directory + "/" + factory_basename + ".inc",
            factory_header.str());
  WriteFile(output_directory + "/" + factory_basename + ".cc",
            factory_impl.str());
  WriteFile(output_directory + "/" + forward_declarations_filename,
            forward_declarations.str());
4727 4728 4729 4730 4731 4732 4733 4734 4735 4736
}

namespace {
void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
                                      const std::string& gen_name,
                                      const std::string& gen_name_T,
                                      const std::string template_params) {
  impl << template_params << "\n";
  impl << "void " << gen_name_T << "::" << type->name()
       << "Print(std::ostream& os) {\n";
4737
  impl << "  this->PrintHeader(os, \"" << type->name() << "\");\n";
4738 4739 4740 4741 4742
  auto hierarchy = type->GetHierarchy();
  std::map<std::string, const AggregateType*> field_names;
  for (const AggregateType* aggregate_type : hierarchy) {
    for (const Field& f : aggregate_type->fields()) {
      if (f.name_and_type.name == "map") continue;
4743
      if (!f.index.has_value()) {
4744
        if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()) ||
4745
            !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4746 4747
          impl << "  os << \"\\n - " << f.name_and_type.name << ": \" << ";
          if (f.name_and_type.type->StructSupertype()) {
4748
            // TODO(turbofan): Print struct fields too.
4749 4750
            impl << "\" <struct field printing still unimplemented>\";\n";
          } else {
4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762
            impl << "this->" << f.name_and_type.name;
            switch (f.read_synchronization) {
              case FieldSynchronization::kNone:
                impl << "();\n";
                break;
              case FieldSynchronization::kRelaxed:
                impl << "(kRelaxedLoad);\n";
                break;
              case FieldSynchronization::kAcquireRelease:
                impl << "(kAcquireLoad);\n";
                break;
            }
4763
          }
4764 4765
        } else {
          impl << "  os << \"\\n - " << f.name_and_type.name << ": \" << "
4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777
               << "Brief(this->" << f.name_and_type.name;
          switch (f.read_synchronization) {
            case FieldSynchronization::kNone:
              impl << "());\n";
              break;
            case FieldSynchronization::kRelaxed:
              impl << "(kRelaxedLoad));\n";
              break;
            case FieldSynchronization::kAcquireRelease:
              impl << "(kAcquireLoad));\n";
              break;
          }
4778 4779
        }
      }
4780 4781
    }
  }
4782
  impl << "  os << '\\n';\n";
4783 4784 4785 4786
  impl << "}\n\n";
}
}  // namespace

4787 4788
void ImplementationVisitor::GeneratePrintDefinitions(
    const std::string& output_directory) {
4789
  std::stringstream impl;
4790
  std::string file_name = "objects-printer.cc";
4791
  {
4792
    IfDefScope object_print(impl, "OBJECT_PRINT");
4793

4794
    impl << "#include <iosfwd>\n\n";
4795
    impl << "#include \"src/objects/all-objects-inl.h\"\n\n";
4796

4797
    NamespaceScope impl_namespaces(impl, {"v8", "internal"});
4798

4799
    for (const ClassType* type : TypeOracle::GetClasses()) {
4800 4801
      if (!type->ShouldGeneratePrint()) continue;

4802
      if (type->GenerateCppClassDefinitions()) {
4803 4804 4805 4806 4807 4808 4809 4810 4811 4812
        const ClassType* super = type->GetSuperClass();
        std::string gen_name = "TorqueGenerated" + type->name();
        std::string gen_name_T =
            gen_name + "<" + type->name() + ", " + super->name() + ">";
        std::string template_decl = "template <>";
        GeneratePrintDefinitionsForClass(impl, type, gen_name, gen_name_T,
                                         template_decl);
      } else {
        GeneratePrintDefinitionsForClass(impl, type, type->name(), type->name(),
                                         "");
4813 4814 4815 4816
      }
    }
  }

4817
  std::string new_contents(impl.str());
4818
  WriteFile(output_directory + "/" + file_name, new_contents);
4819 4820
}

4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856
base::Optional<std::string> MatchSimpleBodyDescriptor(const ClassType* type) {
  std::vector<ObjectSlotKind> slots = type->ComputeHeaderSlotKinds();
  if (!type->HasStaticSize()) {
    slots.push_back(*type->ComputeArraySlotKind());
  }

  // Skip the map slot.
  size_t i = 1;
  while (i < slots.size() && slots[i] == ObjectSlotKind::kNoPointer) ++i;
  if (i == slots.size()) return "DataOnlyBodyDescriptor";
  bool has_weak_pointers = false;
  size_t start_index = i;
  for (; i < slots.size(); ++i) {
    if (slots[i] == ObjectSlotKind::kStrongPointer) {
      continue;
    } else if (slots[i] == ObjectSlotKind::kMaybeObjectPointer) {
      has_weak_pointers = true;
    } else if (slots[i] == ObjectSlotKind::kNoPointer) {
      break;
    } else {
      return base::nullopt;
    }
  }
  size_t end_index = i;
  for (; i < slots.size(); ++i) {
    if (slots[i] != ObjectSlotKind::kNoPointer) return base::nullopt;
  }
  size_t start_offset = start_index * TargetArchitecture::TaggedSize();
  size_t end_offset = end_index * TargetArchitecture::TaggedSize();
  // We pick a suffix-range body descriptor even in cases where the object size
  // is fixed, to reduce the amount of code executed for object visitation.
  if (end_index == slots.size()) {
    return ToString("SuffixRange", has_weak_pointers ? "Weak" : "",
                    "BodyDescriptor<", start_offset, ">");
  }
  if (!has_weak_pointers) {
4857 4858
    return ToString("FixedRangeBodyDescriptor<", start_offset, ", ", end_offset,
                    ">");
4859 4860 4861 4862
  }
  return base::nullopt;
}

4863 4864
void ImplementationVisitor::GenerateBodyDescriptors(
    const std::string& output_directory) {
4865
  std::string file_name = "objects-body-descriptors-inl.inc";
4866 4867 4868 4869 4870 4871
  std::stringstream h_contents;

    for (const ClassType* type : TypeOracle::GetClasses()) {
      std::string name = type->name();
      if (!type->ShouldGenerateBodyDescriptor()) continue;

4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882
      bool has_array_fields = !type->HasStaticSize();
      std::vector<ObjectSlotKind> header_slot_kinds =
          type->ComputeHeaderSlotKinds();
      base::Optional<ObjectSlotKind> array_slot_kind =
          type->ComputeArraySlotKind();
      DCHECK_EQ(has_array_fields, array_slot_kind.has_value());

      h_contents << "class " << name << "::BodyDescriptor final : public ";
      if (auto descriptor_name = MatchSimpleBodyDescriptor(type)) {
        h_contents << *descriptor_name << " {\n";
        h_contents << " public:\n";
4883
      } else {
4884 4885
        h_contents << "BodyDescriptorBase {\n";
        h_contents << " public:\n";
4886

4887 4888 4889 4890
        h_contents << "  static bool IsValidSlot(Map map, HeapObject obj, int "
                      "offset) {\n";
        if (has_array_fields) {
          h_contents << "    if (offset < kHeaderSize) {\n";
4891
        }
4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905
        h_contents << "      bool valid_slots[] = {";
        for (ObjectSlotKind slot : header_slot_kinds) {
          h_contents << (slot != ObjectSlotKind::kNoPointer ? "1" : "0") << ",";
        }
        h_contents << "};\n"
                   << "      return valid_slots[static_cast<unsigned "
                      "int>(offset)/kTaggedSize];\n";
        if (has_array_fields) {
          h_contents << "    }\n";
          bool array_is_tagged = *array_slot_kind != ObjectSlotKind::kNoPointer;
          h_contents << "    return " << (array_is_tagged ? "true" : "false")
                     << ";\n";
        }
        h_contents << "  }\n\n";
4906

4907
        h_contents << "  template <typename ObjectVisitor>\n";
4908
        h_contents
4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932
            << "  static inline void IterateBody(Map map, HeapObject obj, "
               "int object_size, ObjectVisitor* v) {\n";

        std::vector<ObjectSlotKind> slots = std::move(header_slot_kinds);
        if (has_array_fields) slots.push_back(*array_slot_kind);

        // Skip the map slot.
        slots.erase(slots.begin());
        size_t start_offset = TargetArchitecture::TaggedSize();

        size_t end_offset = start_offset;
        ObjectSlotKind section_kind;
        for (size_t i = 0; i <= slots.size(); ++i) {
          base::Optional<ObjectSlotKind> next_section_kind;
          bool finished_section = false;
          if (i == 0) {
            next_section_kind = slots[i];
          } else if (i < slots.size()) {
            if (auto combined = Combine(section_kind, slots[i])) {
              next_section_kind = *combined;
            } else {
              next_section_kind = slots[i];
              finished_section = true;
            }
4933
          } else {
4934
            finished_section = true;
4935
          }
4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964
          if (finished_section) {
            bool is_array_slot = i == slots.size() && has_array_fields;
            bool multiple_slots =
                is_array_slot ||
                (end_offset - start_offset > TargetArchitecture::TaggedSize());
            base::Optional<std::string> iterate_command;
            switch (section_kind) {
              case ObjectSlotKind::kStrongPointer:
                iterate_command = "IteratePointer";
                break;
              case ObjectSlotKind::kMaybeObjectPointer:
                iterate_command = "IterateMaybeWeakPointer";
                break;
              case ObjectSlotKind::kCustomWeakPointer:
                iterate_command = "IterateCustomWeakPointer";
                break;
              case ObjectSlotKind::kNoPointer:
                break;
            }
            if (iterate_command) {
              if (multiple_slots) *iterate_command += "s";
              h_contents << "    " << *iterate_command << "(obj, "
                         << start_offset;
              if (multiple_slots) {
                h_contents << ", "
                           << (i == slots.size() ? "object_size"
                                                 : std::to_string(end_offset));
              }
              h_contents << ", v);\n";
4965
            }
4966
            start_offset = end_offset;
4967
          }
4968 4969
          if (i < slots.size()) section_kind = *next_section_kind;
          end_offset += TargetArchitecture::TaggedSize();
4970 4971
        }

4972 4973
        h_contents << "  }\n\n";
      }
4974 4975 4976

      h_contents
          << "  static inline int SizeOf(Map map, HeapObject raw_object) {\n";
4977 4978 4979
      if (type->size().SingleValue()) {
        h_contents << "    return " << *type->size().SingleValue() << ";\n";
      } else {
4980 4981
        // We use an unchecked_cast here because this is used for concurrent
        // marking, where we shouldn't re-read the map.
4982
        h_contents << "    return " << name
4983
                   << "::unchecked_cast(raw_object).AllocatedSize();\n";
4984
      }
4985 4986 4987 4988 4989
      h_contents << "  }\n\n";

      h_contents << "};\n";
    }

4990
    WriteFile(output_directory + "/" + file_name, h_contents.str());
4991 4992
}

4993 4994
namespace {

4995 4996 4997
// Generate verification code for a single piece of class data, which might be
// nested within a struct or might be a single element in an indexed field (or
// both).
4998 4999 5000
void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
                                std::string offset, const Field& leaf_field,
                                std::string indexed_field_size,
5001
                                std::ostream& cc_contents, bool is_map) {
5002
  const Type* field_type = leaf_field.name_and_type.type;
5003

5004
  bool maybe_object =
5005
      !field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
5006
  const char* object_type = maybe_object ? "MaybeObject" : "Object";
5007
  const char* verify_fn =
5008
      maybe_object ? "VerifyMaybeObjectPointer" : "VerifyPointer";
5009 5010
  if (indexed) {
    offset += " + i * " + indexed_field_size;
5011
  }
5012
  // Name the local var based on the field name for nicer CHECK output.
5013
  const std::string value = leaf_field.name_and_type.name + "__value";
5014 5015

  // Read the field.
5016 5017 5018 5019 5020 5021
  if (is_map) {
    cc_contents << "    " << object_type << " " << value << " = o.map();\n";
  } else {
    cc_contents << "    " << object_type << " " << value << " = TaggedField<"
                << object_type << ">::load(o, " << offset << ");\n";
  }
5022 5023 5024 5025 5026 5027 5028 5029

  // Call VerifyPointer or VerifyMaybeObjectPointer on it.
  cc_contents << "    " << object_type << "::" << verify_fn << "(isolate, "
              << value << ");\n";

  // Check that the value is of an appropriate type. We can skip this part for
  // the Object type because it would not check anything beyond what we already
  // checked with VerifyPointer.
5030
  if (field_type != TypeOracle::GetObjectType()) {
5031 5032
    cc_contents << "    CHECK(" << GenerateRuntimeTypeCheck(field_type, value)
                << ");\n";
5033
  }
5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045
}

void GenerateClassFieldVerifier(const std::string& class_name,
                                const ClassType& class_type, const Field& f,
                                std::ostream& h_contents,
                                std::ostream& cc_contents) {
  if (!f.generate_verify) return;
  const Type* field_type = f.name_and_type.type;

  // We only verify tagged types, not raw numbers or pointers. Structs
  // consisting of tagged types are also included.
  if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
5046
      !field_type->StructSupertype())
5047
    return;
5048
  if (field_type == TypeOracle::GetFloat64OrHoleType()) return;
5049 5050 5051
  // Do not verify if the field may be uninitialized.
  if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;

5052
  std::string field_start_offset;
5053
  if (f.index) {
5054 5055 5056 5057 5058 5059 5060 5061
    field_start_offset = f.name_and_type.name + "__offset";
    std::string length = f.name_and_type.name + "__length";
    cc_contents << "  intptr_t " << field_start_offset << ", " << length
                << ";\n";
    cc_contents << "  std::tie(std::ignore, " << field_start_offset << ", "
                << length << ") = "
                << Callable::PrefixNameForCCOutput(
                       class_type.GetSliceMacroName(f))
5062
                << "(o);\n";
5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073

    // Slices use intptr, but TaggedField<T>.load() uses int, so verify that
    // such a cast is valid.
    cc_contents << "  CHECK_EQ(" << field_start_offset << ", static_cast<int>("
                << field_start_offset << "));\n";
    cc_contents << "  CHECK_EQ(" << length << ", static_cast<int>(" << length
                << "));\n";
    field_start_offset = "static_cast<int>(" + field_start_offset + ")";
    length = "static_cast<int>(" + length + ")";

    cc_contents << "  for (int i = 0; i < " << length << "; ++i) {\n";
5074
  } else {
5075 5076
    // Non-indexed fields have known offsets.
    field_start_offset = std::to_string(*f.offset);
5077 5078 5079
    cc_contents << "  {\n";
  }

5080
  if (auto struct_type = field_type->StructSupertype()) {
5081 5082 5083 5084 5085 5086 5087
    for (const Field& struct_field : (*struct_type)->fields()) {
      if (struct_field.name_and_type.type->IsSubtypeOf(
              TypeOracle::GetTaggedType())) {
        GenerateFieldValueVerifier(
            class_name, f.index.has_value(),
            field_start_offset + " + " + std::to_string(*struct_field.offset),
            struct_field, std::to_string((*struct_type)->PackedSize()),
5088
            cc_contents, f.name_and_type.name == "map");
5089
      }
5090 5091
    }
  } else {
5092 5093
    GenerateFieldValueVerifier(class_name, f.index.has_value(),
                               field_start_offset, f, "kTaggedSize",
5094
                               cc_contents, f.name_and_type.name == "map");
5095 5096
  }

5097 5098 5099 5100 5101 5102 5103
  cc_contents << "  }\n";
}

}  // namespace

void ImplementationVisitor::GenerateClassVerifiers(
    const std::string& output_directory) {
5104
  std::string file_name = "class-verifiers";
5105 5106
  std::stringstream h_contents;
  std::stringstream cc_contents;
5107 5108 5109 5110
  {
    IncludeGuardScope include_guard(h_contents, file_name + ".h");
    IfDefScope verify_heap_h(h_contents, "VERIFY_HEAP");
    IfDefScope verify_heap_cc(cc_contents, "VERIFY_HEAP");
5111

5112
    h_contents << "#include \"src/base/macros.h\"\n\n";
5113

5114 5115
    cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n\n";
    cc_contents << "#include \"src/objects/all-objects-inl.h\"\n";
5116

5117
    IncludeObjectMacrosScope object_macros(cc_contents);
5118

5119 5120
    NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
    NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});
5121

5122 5123 5124
    cc_contents
        << "#include \"torque-generated/test/torque/test-torque-tq-inl.inc\"\n";

5125 5126
    // Generate forward declarations to avoid including any headers.
    h_contents << "class Isolate;\n";
5127
    for (const ClassType* type : TypeOracle::GetClasses()) {
5128
      if (!type->ShouldGenerateVerify()) continue;
5129
      h_contents << "class " << type->name() << ";\n";
5130 5131
    }

5132 5133
    const char* verifier_class = "TorqueGeneratedClassVerifiers";

5134
    h_contents << "class V8_EXPORT_PRIVATE " << verifier_class << "{\n";
5135 5136
    h_contents << " public:\n";

5137
    for (const ClassType* type : TypeOracle::GetClasses()) {
5138
      std::string name = type->name();
5139
      if (!type->ShouldGenerateVerify()) continue;
5140

5141
      std::string method_name = name + "Verify";
5142

5143
      h_contents << "  static void " << method_name << "(" << name
5144
                 << " o, Isolate* isolate);\n";
5145

5146
      cc_contents << "void " << verifier_class << "::" << method_name << "("
5147
                  << name << " o, Isolate* isolate) {\n";
5148

5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164
      // First, do any verification for the super class. Not all classes have
      // verifiers, so skip to the nearest super class that has one.
      const ClassType* super_type = type->GetSuperClass();
      while (super_type && !super_type->ShouldGenerateVerify()) {
        super_type = super_type->GetSuperClass();
      }
      if (super_type) {
        std::string super_name = super_type->name();
        if (super_name == "HeapObject") {
          // Special case: HeapObjectVerify checks the Map type and dispatches
          // to more specific types, so calling it here would cause infinite
          // recursion. We could consider moving that behavior into a
          // different method to make the contract of *Verify methods more
          // consistent, but for now we'll just avoid the bad case.
          cc_contents << "  " << super_name << "Verify(o, isolate);\n";
        } else {
5165
          cc_contents << "  o." << super_name << "Verify(isolate);\n";
5166 5167
        }
      }
5168

5169
      // Second, verify that this object is what it claims to be.
5170
      cc_contents << "  CHECK(o.Is" << name << "());\n";
5171

5172 5173
      // Third, verify its properties.
      for (auto f : type->fields()) {
5174
        GenerateClassFieldVerifier(name, *type, f, h_contents, cc_contents);
5175 5176 5177 5178
      }

      cc_contents << "}\n";
    }
5179

5180 5181
    h_contents << "};\n";
  }
5182 5183
  WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
  WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
5184 5185
}

5186 5187
void ImplementationVisitor::GenerateEnumVerifiers(
    const std::string& output_directory) {
5188
  std::string file_name = "enum-verifiers";
5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217
  std::stringstream cc_contents;
  {
    cc_contents << "#include \"src/compiler/code-assembler.h\"\n";
    for (const std::string& include_path : GlobalContext::CppIncludes()) {
      cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
    }
    cc_contents << "\n";

    NamespaceScope cc_namespaces(cc_contents, {"v8", "internal", ""});

    cc_contents << "class EnumVerifier {\n";
    for (const auto& desc : GlobalContext::Get().ast()->EnumDescriptions()) {
      cc_contents << "  // " << desc.name << " (" << desc.pos << ")\n";
      cc_contents << "  void VerifyEnum_" << desc.name << "("
                  << desc.constexpr_generates
                  << " x) {\n"
                     "    switch(x) {\n";
      for (const auto& entry : desc.entries) {
        cc_contents << "      case " << entry << ": break;\n";
      }
      if (desc.is_open) cc_contents << "      default: break;\n";
      cc_contents << "    }\n  }\n\n";
    }
    cc_contents << "};\n";
  }

  WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
}

5218 5219
void ImplementationVisitor::GenerateExportedMacrosAssembler(
    const std::string& output_directory) {
5220
  std::string file_name = "exported-macros-assembler";
5221 5222 5223 5224 5225 5226 5227
  std::stringstream h_contents;
  std::stringstream cc_contents;
  {
    IncludeGuardScope include_guard(h_contents, file_name + ".h");

    h_contents << "#include \"src/compiler/code-assembler.h\"\n";
    h_contents << "#include \"src/execution/frames.h\"\n";
5228
    h_contents << "#include \"torque-generated/csa-types.h\"\n";
5229
    cc_contents << "#include \"src/objects/fixed-array-inl.h\"\n";
5230 5231
    cc_contents << "#include \"src/objects/free-space.h\"\n";
    cc_contents << "#include \"src/objects/js-regexp-string-iterator.h\"\n";
5232
    cc_contents << "#include \"src/objects/js-weak-refs.h\"\n";
5233 5234
    cc_contents << "#include \"src/objects/ordered-hash-table.h\"\n";
    cc_contents << "#include \"src/objects/property-descriptor-object.h\"\n";
5235
    cc_contents << "#include \"src/objects/stack-frame-info.h\"\n";
5236
    cc_contents << "#include \"src/objects/swiss-name-dictionary.h\"\n";
5237 5238
    cc_contents << "#include \"src/objects/synthetic-module.h\"\n";
    cc_contents << "#include \"src/objects/template-objects.h\"\n";
5239
    cc_contents << "#include \"src/objects/torque-defined-classes.h\"\n";
5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252
    {
      IfDefScope intl_scope(cc_contents, "V8_INTL_SUPPORT");
      cc_contents << "#include \"src/objects/js-break-iterator.h\"\n";
      cc_contents << "#include \"src/objects/js-collator.h\"\n";
      cc_contents << "#include \"src/objects/js-date-time-format.h\"\n";
      cc_contents << "#include \"src/objects/js-display-names.h\"\n";
      cc_contents << "#include \"src/objects/js-list-format.h\"\n";
      cc_contents << "#include \"src/objects/js-locale.h\"\n";
      cc_contents << "#include \"src/objects/js-number-format.h\"\n";
      cc_contents << "#include \"src/objects/js-plural-rules.h\"\n";
      cc_contents << "#include \"src/objects/js-relative-time-format.h\"\n";
      cc_contents << "#include \"src/objects/js-segment-iterator.h\"\n";
      cc_contents << "#include \"src/objects/js-segmenter.h\"\n";
5253
      cc_contents << "#include \"src/objects/js-segments.h\"\n";
5254
    }
5255
    cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
5256 5257 5258 5259 5260

    for (SourceId file : SourceFileMap::AllSources()) {
      cc_contents << "#include \"torque-generated/" +
                         SourceFileMap::PathFromV8RootWithoutExtension(file) +
                         "-tq-csa.h\"\n";
5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277
    }

    NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
    NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});

    h_contents << "class V8_EXPORT_PRIVATE "
                  "TorqueGeneratedExportedMacrosAssembler {\n"
               << " public:\n"
               << "  explicit TorqueGeneratedExportedMacrosAssembler"
                  "(compiler::CodeAssemblerState* state) : state_(state) {\n"
               << "    USE(state_);\n"
               << "  }\n";

    for (auto& declarable : GlobalContext::AllDeclarables()) {
      TorqueMacro* macro = TorqueMacro::DynamicCast(declarable.get());
      if (!(macro && macro->IsExportedToCSA())) continue;

5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291
      cpp::Class assembler("TorqueGeneratedExportedMacrosAssembler");
      std::vector<std::string> generated_parameter_names;
      cpp::Function f = GenerateFunction(
          &assembler, macro->ReadableName(), macro->signature(),
          macro->parameter_names(), false, &generated_parameter_names);

      f.PrintDeclaration(h_contents);
      f.PrintDefinition(cc_contents, [&](std::ostream& stream) {
        stream << "return " << macro->ExternalName() << "(state_";
        for (const auto& name : generated_parameter_names) {
          stream << ", " << name;
        }
        stream << ");";
      });
5292 5293 5294 5295 5296 5297 5298 5299 5300 5301
    }

    h_contents << " private:\n"
               << "  compiler::CodeAssemblerState* state_;\n"
               << "};\n";
  }
  WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
  WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
}

5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319
namespace {

void CollectAllFields(const std::string& path, const Field& field,
                      std::vector<std::string>& result) {
  if (field.name_and_type.type->StructSupertype()) {
    std::string next_path = path + field.name_and_type.name + ".";
    const StructType* struct_type =
        StructType::DynamicCast(field.name_and_type.type);
    for (const auto& inner_field : struct_type->fields()) {
      CollectAllFields(next_path, inner_field, result);
    }
  } else {
    result.push_back(path + field.name_and_type.name);
  }
}

}  // namespace

5320 5321
void ImplementationVisitor::GenerateCSATypes(
    const std::string& output_directory) {
5322
  std::string file_name = "csa-types";
5323 5324 5325 5326 5327 5328 5329
  std::stringstream h_contents;
  {
    IncludeGuardScope include_guard(h_contents, file_name + ".h");
    h_contents << "#include \"src/compiler/code-assembler.h\"\n\n";

    NamespaceScope h_namespaces(h_contents, {"v8", "internal"});

5330 5331
    // Generates headers for all structs in a topologically-sorted order, since
    // TypeOracle keeps them in the order of their resolution
5332
    for (const auto& type : TypeOracle::GetAggregateTypes()) {
5333
      const StructType* struct_type = StructType::DynamicCast(type.get());
5334
      if (!struct_type) continue;
5335 5336
      h_contents << "struct " << struct_type->GetGeneratedTypeNameImpl()
                 << " {\n";
5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349
      for (auto& field : struct_type->fields()) {
        h_contents << "  " << field.name_and_type.type->GetGeneratedTypeName();
        h_contents << " " << field.name_and_type.name << ";\n";
      }
      h_contents << "\n  std::tuple<";
      bool first = true;
      for (const Type* type : LowerType(struct_type)) {
        if (!first) {
          h_contents << ", ";
        }
        first = false;
        h_contents << type->GetGeneratedTypeName();
      }
5350
      std::vector<std::string> all_fields;
5351
      for (auto& field : struct_type->fields()) {
5352
        CollectAllFields("", field, all_fields);
5353
      }
5354 5355 5356
      h_contents << "> Flatten() const {\n"
                    "    return std::make_tuple(";
      PrintCommaSeparatedList(h_contents, all_fields);
5357 5358 5359 5360 5361 5362 5363 5364
      h_contents << ");\n";
      h_contents << "  }\n";
      h_contents << "};\n";
    }
  }
  WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
}

5365 5366 5367 5368 5369 5370 5371 5372 5373 5374
void ReportAllUnusedMacros() {
  for (const auto& declarable : GlobalContext::AllDeclarables()) {
    if (!declarable->IsMacro() || declarable->IsExternMacro()) continue;

    Macro* macro = Macro::cast(declarable.get());
    if (macro->IsUsed()) continue;

    if (macro->IsTorqueMacro() && TorqueMacro::cast(macro)->IsExportedToCSA()) {
      continue;
    }
5375 5376 5377 5378 5379 5380 5381 5382 5383 5384
    // TODO(gsps): Mark methods of generic structs used if they are used in any
    // instantiation
    if (Method* method = Method::DynamicCast(macro)) {
      if (StructType* struct_type =
              StructType::DynamicCast(method->aggregate_type())) {
        if (struct_type->GetSpecializedFrom().has_value()) {
          continue;
        }
      }
    }
5385 5386 5387 5388 5389

    std::vector<std::string> ignored_prefixes = {"Convert<", "Cast<",
                                                 "FromConstexpr<"};
    const std::string name = macro->ReadableName();
    const bool ignore =
5390
        StartsWithSingleUnderscore(name) ||
5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402
        std::any_of(ignored_prefixes.begin(), ignored_prefixes.end(),
                    [&name](const std::string& prefix) {
                      return StringStartsWith(name, prefix);
                    });

    if (!ignore) {
      Lint("Macro '", macro->ReadableName(), "' is never used.")
          .Position(macro->IdentifierPosition());
    }
  }
}

5403 5404 5405
}  // namespace torque
}  // namespace internal
}  // namespace v8