pipeline.cc 141 KB
Newer Older
1 2 3 4
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6 7
#include "src/compiler/pipeline.h"

#include <fstream>  // NOLINT(readability/streams)
8
#include <iostream>
9
#include <memory>
10 11
#include <sstream>

12
#include "src/base/optional.h"
13
#include "src/base/platform/elapsed-timer.h"
14
#include "src/builtins/profile-data-reader.h"
15 16 17 18
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/register-configuration.h"
19
#include "src/compiler/add-type-assertions-reducer.h"
20 21 22 23 24
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/frame-elider.h"
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/backend/jump-threading.h"
25
#include "src/compiler/backend/mid-tier-register-allocator.h"
26 27 28
#include "src/compiler/backend/move-optimizer.h"
#include "src/compiler/backend/register-allocator-verifier.h"
#include "src/compiler/backend/register-allocator.h"
29
#include "src/compiler/basic-block-instrumentor.h"
30
#include "src/compiler/branch-elimination.h"
31
#include "src/compiler/bytecode-graph-builder.h"
32
#include "src/compiler/checkpoint-elimination.h"
33
#include "src/compiler/common-operator-reducer.h"
34
#include "src/compiler/compilation-dependencies.h"
35
#include "src/compiler/compiler-source-position-table.h"
36
#include "src/compiler/constant-folding-reducer.h"
37
#include "src/compiler/control-flow-optimizer.h"
38
#include "src/compiler/csa-load-elimination.h"
39
#include "src/compiler/dead-code-elimination.h"
40
#include "src/compiler/decompression-optimizer.h"
41
#include "src/compiler/effect-control-linearizer.h"
42
#include "src/compiler/escape-analysis-reducer.h"
43
#include "src/compiler/escape-analysis.h"
44
#include "src/compiler/graph-trimmer.h"
45
#include "src/compiler/graph-visualizer.h"
46
#include "src/compiler/js-call-reducer.h"
47
#include "src/compiler/js-context-specialization.h"
48
#include "src/compiler/js-create-lowering.h"
49
#include "src/compiler/js-generic-lowering.h"
50
#include "src/compiler/js-heap-broker.h"
51
#include "src/compiler/js-heap-copy-reducer.h"
52
#include "src/compiler/js-inlining-heuristic.h"
53
#include "src/compiler/js-intrinsic-lowering.h"
54
#include "src/compiler/js-native-context-specialization.h"
55
#include "src/compiler/js-typed-lowering.h"
56
#include "src/compiler/load-elimination.h"
57 58
#include "src/compiler/loop-analysis.h"
#include "src/compiler/loop-peeling.h"
59
#include "src/compiler/loop-variable-optimizer.h"
60
#include "src/compiler/machine-graph-verifier.h"
61
#include "src/compiler/machine-operator-reducer.h"
62
#include "src/compiler/memory-optimizer.h"
63
#include "src/compiler/node-origin-table.h"
64
#include "src/compiler/osr.h"
65
#include "src/compiler/pipeline-statistics.h"
66
#include "src/compiler/redundancy-elimination.h"
67
#include "src/compiler/schedule.h"
68
#include "src/compiler/scheduled-machine-lowering.h"
69
#include "src/compiler/scheduler.h"
70
#include "src/compiler/select-lowering.h"
71
#include "src/compiler/serializer-for-background-compilation.h"
72
#include "src/compiler/simplified-lowering.h"
73
#include "src/compiler/simplified-operator-reducer.h"
74
#include "src/compiler/simplified-operator.h"
75
#include "src/compiler/store-store-elimination.h"
76
#include "src/compiler/type-narrowing-reducer.h"
77
#include "src/compiler/typed-optimization.h"
78
#include "src/compiler/typer.h"
79
#include "src/compiler/value-numbering-reducer.h"
80
#include "src/compiler/verifier.h"
81
#include "src/compiler/wasm-compiler.h"
82
#include "src/compiler/zone-stats.h"
83
#include "src/diagnostics/code-tracer.h"
84
#include "src/diagnostics/disassembler.h"
85
#include "src/execution/isolate-inl.h"
86
#include "src/heap/local-heap.h"
87
#include "src/init/bootstrapper.h"
88
#include "src/logging/counters.h"
89
#include "src/objects/shared-function-info.h"
90
#include "src/parsing/parse-info.h"
91
#include "src/tracing/trace-event.h"
92
#include "src/tracing/traced-value.h"
93 94
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
95
#include "src/wasm/function-body-decoder.h"
96
#include "src/wasm/function-compiler.h"
97
#include "src/wasm/wasm-engine.h"
98 99 100 101 102

namespace v8 {
namespace internal {
namespace compiler {

103 104 105 106 107 108 109 110 111 112 113
static constexpr char kCodegenZoneName[] = "codegen-zone";
static constexpr char kGraphZoneName[] = "graph-zone";
static constexpr char kInstructionZoneName[] = "instruction-zone";
static constexpr char kMachineGraphVerifierZoneName[] =
    "machine-graph-verifier-zone";
static constexpr char kPipelineCompilationJobZoneName[] =
    "pipeline-compilation-job-zone";
static constexpr char kRegisterAllocationZoneName[] =
    "register-allocation-zone";
static constexpr char kRegisterAllocatorVerifierZoneName[] =
    "register-allocator-verifier-zone";
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
namespace {

Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
  Context current = closure->context();
  size_t distance = 0;
  while (!current.IsNativeContext()) {
    if (current.IsModuleContext()) {
      return Just(
          OuterContext(handle(current, current.GetIsolate()), distance));
    }
    current = current.previous();
    distance++;
  }
  return Nothing<OuterContext>();
}

}  // anonymous namespace
131

132
class PipelineData {
133
 public:
134
  // For main entry point.
135 136
  PipelineData(ZoneStats* zone_stats, Isolate* isolate,
               OptimizedCompilationInfo* info,
137 138
               PipelineStatistics* pipeline_statistics,
               bool is_concurrent_inlining)
139
      : isolate_(isolate),
140
        allocator_(isolate->allocator()),
141
        info_(info),
142
        debug_name_(info_->GetDebugName()),
143
        may_have_unverifiable_graph_(false),
144
        zone_stats_(zone_stats),
145
        pipeline_statistics_(pipeline_statistics),
146 147 148
        roots_relative_addressing_enabled_(
            !isolate->serializer_enabled() &&
            !isolate->IsGeneratingEmbeddedBuiltins()),
149
        graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
150
        graph_zone_(graph_zone_scope_.zone()),
151
        instruction_zone_scope_(zone_stats_, kInstructionZoneName),
152
        instruction_zone_(instruction_zone_scope_.zone()),
153
        codegen_zone_scope_(zone_stats_, kCodegenZoneName),
154
        codegen_zone_(codegen_zone_scope_.zone()),
155 156 157
        broker_(new JSHeapBroker(isolate_, info_->zone(),
                                 info_->trace_heap_broker(),
                                 is_concurrent_inlining, info->code_kind())),
158 159
        register_allocation_zone_scope_(zone_stats_,
                                        kRegisterAllocationZoneName),
160 161
        register_allocation_zone_(register_allocation_zone_scope_.zone()),
        assembler_options_(AssemblerOptions::Default(isolate)) {
162
    PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData");
163 164 165 166 167 168 169
    graph_ = graph_zone_->New<Graph>(graph_zone_);
    source_positions_ = graph_zone_->New<SourcePositionTable>(graph_);
    node_origins_ = info->trace_turbo_json()
                        ? graph_zone_->New<NodeOriginTable>(graph_)
                        : nullptr;
    simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
    machine_ = graph_zone_->New<MachineOperatorBuilder>(
170
        graph_zone_, MachineType::PointerRepresentation(),
171 172
        InstructionSelector::SupportedMachineOperatorFlags(),
        InstructionSelector::AlignmentRequirements());
173 174 175 176
    common_ = graph_zone_->New<CommonOperatorBuilder>(graph_zone_);
    javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
    jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_, javascript_,
                                         simplified_, machine_);
177
    dependencies_ =
178
        info_->zone()->New<CompilationDependencies>(broker_, info_->zone());
179 180
  }

181
  // For WebAssembly compile entry point.
182 183 184
  PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine,
               OptimizedCompilationInfo* info, MachineGraph* mcgraph,
               PipelineStatistics* pipeline_statistics,
185
               SourcePositionTable* source_positions,
186
               NodeOriginTable* node_origins,
187
               const AssemblerOptions& assembler_options)
188
      : isolate_(nullptr),
189
        wasm_engine_(wasm_engine),
190
        allocator_(wasm_engine->allocator()),
191
        info_(info),
192
        debug_name_(info_->GetDebugName()),
193
        may_have_unverifiable_graph_(false),
194
        zone_stats_(zone_stats),
195
        pipeline_statistics_(pipeline_statistics),
196
        graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
197
        graph_zone_(graph_zone_scope_.zone()),
198
        graph_(mcgraph->graph()),
199
        source_positions_(source_positions),
200
        node_origins_(node_origins),
201 202 203
        machine_(mcgraph->machine()),
        common_(mcgraph->common()),
        mcgraph_(mcgraph),
204
        instruction_zone_scope_(zone_stats_, kInstructionZoneName),
205
        instruction_zone_(instruction_zone_scope_.zone()),
206
        codegen_zone_scope_(zone_stats_, kCodegenZoneName),
207
        codegen_zone_(codegen_zone_scope_.zone()),
208 209
        register_allocation_zone_scope_(zone_stats_,
                                        kRegisterAllocationZoneName),
210
        register_allocation_zone_(register_allocation_zone_scope_.zone()),
211
        assembler_options_(assembler_options) {}
212

213
  // For CodeStubAssembler and machine graph testing entry point.
214
  PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
215
               Isolate* isolate, AccountingAllocator* allocator, Graph* graph,
216 217
               JSGraph* jsgraph, Schedule* schedule,
               SourcePositionTable* source_positions,
218
               NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
219 220
               const AssemblerOptions& assembler_options,
               const ProfileDataFromFile* profile_data)
221
      : isolate_(isolate),
222
        wasm_engine_(isolate_->wasm_engine()),
223
        allocator_(allocator),
224
        info_(info),
225
        debug_name_(info_->GetDebugName()),
226
        zone_stats_(zone_stats),
227
        graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
228
        graph_zone_(graph_zone_scope_.zone()),
229
        graph_(graph),
230
        source_positions_(source_positions),
231
        node_origins_(node_origins),
232
        schedule_(schedule),
233
        instruction_zone_scope_(zone_stats_, kInstructionZoneName),
234
        instruction_zone_(instruction_zone_scope_.zone()),
235
        codegen_zone_scope_(zone_stats_, kCodegenZoneName),
236
        codegen_zone_(codegen_zone_scope_.zone()),
237 238
        register_allocation_zone_scope_(zone_stats_,
                                        kRegisterAllocationZoneName),
239
        register_allocation_zone_(register_allocation_zone_scope_.zone()),
240
        jump_optimization_info_(jump_opt),
241 242
        assembler_options_(assembler_options),
        profile_data_(profile_data) {
243 244 245 246 247 248 249
    if (jsgraph) {
      jsgraph_ = jsgraph;
      simplified_ = jsgraph->simplified();
      machine_ = jsgraph->machine();
      common_ = jsgraph->common();
      javascript_ = jsgraph->javascript();
    } else {
250 251
      simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
      machine_ = graph_zone_->New<MachineOperatorBuilder>(
252 253 254
          graph_zone_, MachineType::PointerRepresentation(),
          InstructionSelector::SupportedMachineOperatorFlags(),
          InstructionSelector::AlignmentRequirements());
255 256 257
      common_ = graph_zone_->New<CommonOperatorBuilder>(graph_zone_);
      javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
      jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_,
258 259
                                           javascript_, simplified_, machine_);
    }
260
  }
261

262
  // For register allocation testing entry point.
263 264
  PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
               Isolate* isolate, InstructionSequence* sequence)
265
      : isolate_(isolate),
266
        allocator_(isolate->allocator()),
267
        info_(info),
268
        debug_name_(info_->GetDebugName()),
269
        zone_stats_(zone_stats),
270
        graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
271
        instruction_zone_scope_(zone_stats_, kInstructionZoneName),
272 273
        instruction_zone_(sequence->zone()),
        sequence_(sequence),
274
        codegen_zone_scope_(zone_stats_, kCodegenZoneName),
275
        codegen_zone_(codegen_zone_scope_.zone()),
276 277
        register_allocation_zone_scope_(zone_stats_,
                                        kRegisterAllocationZoneName),
278 279
        register_allocation_zone_(register_allocation_zone_scope_.zone()),
        assembler_options_(AssemblerOptions::Default(isolate)) {}
280

281
  ~PipelineData() {
282 283
    // Must happen before zones are destroyed.
    delete code_generator_;
284
    code_generator_ = nullptr;
285
    DeleteTyper();
286
    DeleteRegisterAllocationZone();
287
    DeleteInstructionZone();
288
    DeleteCodegenZone();
289
    DeleteGraphZone();
290 291
  }

292 293 294
  PipelineData(const PipelineData&) = delete;
  PipelineData& operator=(const PipelineData&) = delete;

295
  Isolate* isolate() const { return isolate_; }
296
  AccountingAllocator* allocator() const { return allocator_; }
297
  OptimizedCompilationInfo* info() const { return info_; }
298
  ZoneStats* zone_stats() const { return zone_stats_; }
299
  CompilationDependencies* dependencies() const { return dependencies_; }
300
  PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
301
  OsrHelper* osr_helper() { return &(*osr_helper_); }
302 303
  bool compilation_failed() const { return compilation_failed_; }
  void set_compilation_failed() { compilation_failed_ = true; }
304 305 306 307

  bool verify_graph() const { return verify_graph_; }
  void set_verify_graph(bool value) { verify_graph_ = value; }

308 309
  MaybeHandle<Code> code() { return code_; }
  void set_code(MaybeHandle<Code> code) {
310 311 312
    DCHECK(code_.is_null());
    code_ = code;
  }
313

314 315
  CodeGenerator* code_generator() const { return code_generator_; }

316
  // RawMachineAssembler generally produces graphs which cannot be verified.
317
  bool MayHaveUnverifiableGraph() const { return may_have_unverifiable_graph_; }
318 319 320

  Zone* graph_zone() const { return graph_zone_; }
  Graph* graph() const { return graph_; }
321
  SourcePositionTable* source_positions() const { return source_positions_; }
322
  NodeOriginTable* node_origins() const { return node_origins_; }
323 324 325 326
  MachineOperatorBuilder* machine() const { return machine_; }
  CommonOperatorBuilder* common() const { return common_; }
  JSOperatorBuilder* javascript() const { return javascript_; }
  JSGraph* jsgraph() const { return jsgraph_; }
327
  MachineGraph* mcgraph() const { return mcgraph_; }
328
  Handle<NativeContext> native_context() const {
329 330 331 332
    return handle(info()->native_context(), isolate());
  }
  Handle<JSGlobalObject> global_object() const {
    return handle(info()->global_object(), isolate());
333
  }
334

335
  JSHeapBroker* broker() const { return broker_; }
336 337 338 339 340
  std::unique_ptr<JSHeapBroker> ReleaseBroker() {
    std::unique_ptr<JSHeapBroker> broker(broker_);
    broker_ = nullptr;
    return broker;
  }
341

342 343
  Schedule* schedule() const { return schedule_; }
  void set_schedule(Schedule* schedule) {
344
    DCHECK(!schedule_);
345 346
    schedule_ = schedule;
  }
347
  void reset_schedule() { schedule_ = nullptr; }
348 349

  Zone* instruction_zone() const { return instruction_zone_; }
350
  Zone* codegen_zone() const { return codegen_zone_; }
351 352
  InstructionSequence* sequence() const { return sequence_; }
  Frame* frame() const { return frame_; }
353 354

  Zone* register_allocation_zone() const { return register_allocation_zone_; }
355

356 357 358
  RegisterAllocationData* register_allocation_data() const {
    return register_allocation_data_;
  }
359 360 361
  TopTierRegisterAllocationData* top_tier_register_allocation_data() const {
    return TopTierRegisterAllocationData::cast(register_allocation_data_);
  }
362 363 364
  MidTierRegisterAllocationData* mid_tier_register_allocator_data() const {
    return MidTierRegisterAllocationData::cast(register_allocation_data_);
  }
365

366 367 368 369 370 371 372
  std::string const& source_position_output() const {
    return source_position_output_;
  }
  void set_source_position_output(std::string const& source_position_output) {
    source_position_output_ = source_position_output;
  }

373 374 375 376
  JumpOptimizationInfo* jump_optimization_info() const {
    return jump_optimization_info_;
  }

377 378 379 380
  const AssemblerOptions& assembler_options() const {
    return assembler_options_;
  }

381
  void ChooseSpecializationContext() {
382
    if (info()->function_context_specializing()) {
383 384 385 386 387 388 389 390 391 392 393 394
      DCHECK(info()->has_context());
      specialization_context_ =
          Just(OuterContext(handle(info()->context(), isolate()), 0));
    } else {
      specialization_context_ = GetModuleContext(info()->closure());
    }
  }

  Maybe<OuterContext> specialization_context() const {
    return specialization_context_;
  }

395 396 397 398 399 400
  size_t* address_of_max_unoptimized_frame_height() {
    return &max_unoptimized_frame_height_;
  }
  size_t max_unoptimized_frame_height() const {
    return max_unoptimized_frame_height_;
  }
401 402 403 404 405 406
  size_t* address_of_max_pushed_argument_count() {
    return &max_pushed_argument_count_;
  }
  size_t max_pushed_argument_count() const {
    return max_pushed_argument_count_;
  }
407

408 409 410 411 412
  CodeTracer* GetCodeTracer() const {
    return wasm_engine_ == nullptr ? isolate_->GetCodeTracer()
                                   : wasm_engine_->GetCodeTracer();
  }

413 414
  Typer* CreateTyper() {
    DCHECK_NULL(typer_);
415 416
    typer_ =
        new Typer(broker(), typer_flags_, graph(), &info()->tick_counter());
417 418 419
    return typer_;
  }

420 421 422 423 424
  void AddTyperFlag(Typer::Flag flag) {
    DCHECK_NULL(typer_);
    typer_flags_ |= flag;
  }

425 426 427 428 429
  void DeleteTyper() {
    delete typer_;
    typer_ = nullptr;
  }

430
  void DeleteGraphZone() {
431
    if (graph_zone_ == nullptr) return;
432
    graph_zone_scope_.Destroy();
433 434
    graph_zone_ = nullptr;
    graph_ = nullptr;
435
    source_positions_ = nullptr;
436
    node_origins_ = nullptr;
437
    simplified_ = nullptr;
438 439 440 441
    machine_ = nullptr;
    common_ = nullptr;
    javascript_ = nullptr;
    jsgraph_ = nullptr;
442
    mcgraph_ = nullptr;
443
    schedule_ = nullptr;
444 445 446
  }

  void DeleteInstructionZone() {
447
    if (instruction_zone_ == nullptr) return;
448
    instruction_zone_scope_.Destroy();
449 450
    instruction_zone_ = nullptr;
    sequence_ = nullptr;
451 452 453 454 455 456
  }

  void DeleteCodegenZone() {
    if (codegen_zone_ == nullptr) return;
    codegen_zone_scope_.Destroy();
    codegen_zone_ = nullptr;
457
    dependencies_ = nullptr;
458
    delete broker_;
459
    broker_ = nullptr;
460
    frame_ = nullptr;
461 462 463 464 465 466 467
  }

  void DeleteRegisterAllocationZone() {
    if (register_allocation_zone_ == nullptr) return;
    register_allocation_zone_scope_.Destroy();
    register_allocation_zone_ = nullptr;
    register_allocation_data_ = nullptr;
468 469
  }

470
  void InitializeInstructionSequence(const CallDescriptor* call_descriptor) {
471
    DCHECK_NULL(sequence_);
472 473 474
    InstructionBlocks* instruction_blocks =
        InstructionSequence::InstructionBlocksFor(instruction_zone(),
                                                  schedule());
475 476
    sequence_ = instruction_zone()->New<InstructionSequence>(
        isolate(), instruction_zone(), instruction_blocks);
477
    if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) {
478
      sequence_->instruction_blocks()[0]->mark_needs_frame();
479
    } else {
480 481
      DCHECK_EQ(0u, call_descriptor->CalleeSavedFPRegisters());
      DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters());
482
    }
483 484
  }

485
  void InitializeFrameData(CallDescriptor* call_descriptor) {
486
    DCHECK_NULL(frame_);
487
    int fixed_frame_size = 0;
488
    if (call_descriptor != nullptr) {
489 490
      fixed_frame_size =
          call_descriptor->CalculateFixedFrameSize(info()->code_kind());
491
    }
492
    frame_ = codegen_zone()->New<Frame>(fixed_frame_size);
493
    if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame());
494 495
  }

496 497 498
  void InitializeTopTierRegisterAllocationData(
      const RegisterConfiguration* config, CallDescriptor* call_descriptor,
      RegisterAllocationFlags flags) {
499
    DCHECK_NULL(register_allocation_data_);
500
    register_allocation_data_ =
501
        register_allocation_zone()->New<TopTierRegisterAllocationData>(
502 503
            config, register_allocation_zone(), frame(), sequence(), flags,
            &info()->tick_counter(), debug_name());
504 505
  }

506 507 508 509 510 511 512 513 514
  void InitializeMidTierRegisterAllocationData(
      const RegisterConfiguration* config, CallDescriptor* call_descriptor) {
    DCHECK_NULL(register_allocation_data_);
    register_allocation_data_ =
        register_allocation_zone()->New<MidTierRegisterAllocationData>(
            config, register_allocation_zone(), frame(), sequence(),
            &info()->tick_counter(), debug_name());
  }

515 516 517 518 519
  void InitializeOsrHelper() {
    DCHECK(!osr_helper_.has_value());
    osr_helper_.emplace(info());
  }

520 521 522 523 524
  void set_start_source_position(int position) {
    DCHECK_EQ(start_source_position_, kNoSourcePosition);
    start_source_position_ = position;
  }

525 526
  void InitializeCodeGenerator(Linkage* linkage,
                               std::unique_ptr<AssemblerBuffer> buffer) {
527
    DCHECK_NULL(code_generator_);
528 529 530
    code_generator_ = new CodeGenerator(
        codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
        osr_helper_, start_source_position_, jump_optimization_info_,
531
        info()->GetPoisoningMitigationLevel(), assembler_options_,
532
        info_->builtin_index(), max_unoptimized_frame_height(),
533 534
        max_pushed_argument_count(), std::move(buffer),
        FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr);
535 536
  }

537 538 539 540 541 542 543 544 545 546 547 548
  void BeginPhaseKind(const char* phase_kind_name) {
    if (pipeline_statistics() != nullptr) {
      pipeline_statistics()->BeginPhaseKind(phase_kind_name);
    }
  }

  void EndPhaseKind() {
    if (pipeline_statistics() != nullptr) {
      pipeline_statistics()->EndPhaseKind();
    }
  }

549 550
  const char* debug_name() const { return debug_name_.get(); }

551 552 553 554
  bool roots_relative_addressing_enabled() {
    return roots_relative_addressing_enabled_;
  }

555 556 557 558 559
  const ProfileDataFromFile* profile_data() const { return profile_data_; }
  void set_profile_data(const ProfileDataFromFile* profile_data) {
    profile_data_ = profile_data;
  }

560 561 562 563 564 565 566 567 568 569
  // RuntimeCallStats that is only available during job execution but not
  // finalization.
  // TODO(delphick): Currently even during execution this can be nullptr, due to
  // JSToWasmWrapperCompilationUnit::Execute. Once a table can be extracted
  // there, this method can DCHECK that it is never nullptr.
  RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
  void set_runtime_call_stats(RuntimeCallStats* stats) {
    runtime_call_stats_ = stats;
  }

570
 private:
571
  Isolate* const isolate_;
572
  wasm::WasmEngine* const wasm_engine_ = nullptr;
573
  AccountingAllocator* const allocator_;
574
  OptimizedCompilationInfo* const info_;
575
  std::unique_ptr<char[]> debug_name_;
576
  bool may_have_unverifiable_graph_ = true;
577
  ZoneStats* const zone_stats_;
578 579
  PipelineStatistics* pipeline_statistics_ = nullptr;
  bool compilation_failed_ = false;
580
  bool verify_graph_ = false;
581
  int start_source_position_ = kNoSourcePosition;
582
  base::Optional<OsrHelper> osr_helper_;
583
  MaybeHandle<Code> code_;
584
  CodeGenerator* code_generator_ = nullptr;
585
  Typer* typer_ = nullptr;
586
  Typer::Flags typer_flags_ = Typer::kNoFlags;
587
  bool roots_relative_addressing_enabled_ = false;
588 589

  // All objects in the following group of fields are allocated in graph_zone_.
590
  // They are all set to nullptr when the graph_zone_ is destroyed.
591
  ZoneStats::Scope graph_zone_scope_;
592 593 594
  Zone* graph_zone_ = nullptr;
  Graph* graph_ = nullptr;
  SourcePositionTable* source_positions_ = nullptr;
595
  NodeOriginTable* node_origins_ = nullptr;
596 597 598 599 600
  SimplifiedOperatorBuilder* simplified_ = nullptr;
  MachineOperatorBuilder* machine_ = nullptr;
  CommonOperatorBuilder* common_ = nullptr;
  JSOperatorBuilder* javascript_ = nullptr;
  JSGraph* jsgraph_ = nullptr;
601
  MachineGraph* mcgraph_ = nullptr;
602
  Schedule* schedule_ = nullptr;
603 604

  // All objects in the following group of fields are allocated in
605
  // instruction_zone_. They are all set to nullptr when the instruction_zone_
606
  // is destroyed.
607
  ZoneStats::Scope instruction_zone_scope_;
608
  Zone* instruction_zone_;
609
  InstructionSequence* sequence_ = nullptr;
610 611 612 613 614 615

  // All objects in the following group of fields are allocated in
  // codegen_zone_. They are all set to nullptr when the codegen_zone_
  // is destroyed.
  ZoneStats::Scope codegen_zone_scope_;
  Zone* codegen_zone_;
616
  CompilationDependencies* dependencies_ = nullptr;
617
  JSHeapBroker* broker_ = nullptr;
618
  Frame* frame_ = nullptr;
619 620

  // All objects in the following group of fields are allocated in
621
  // register_allocation_zone_. They are all set to nullptr when the zone is
622
  // destroyed.
623
  ZoneStats::Scope register_allocation_zone_scope_;
624
  Zone* register_allocation_zone_;
625
  RegisterAllocationData* register_allocation_data_ = nullptr;
626

627 628 629
  // Source position output for --trace-turbo.
  std::string source_position_output_;

630
  JumpOptimizationInfo* jump_optimization_info_ = nullptr;
631
  AssemblerOptions assembler_options_;
632
  Maybe<OuterContext> specialization_context_ = Nothing<OuterContext>();
633

634
  // The maximal combined height of all inlined frames in their unoptimized
635 636
  // state, and the maximal number of arguments pushed during function calls.
  // Calculated during instruction selection, applied during code generation.
637
  size_t max_unoptimized_frame_height_ = 0;
638 639
  size_t max_pushed_argument_count_ = 0;

640
  RuntimeCallStats* runtime_call_stats_ = nullptr;
641
  const ProfileDataFromFile* profile_data_ = nullptr;
642 643
};

644 645 646 647 648
class PipelineImpl final {
 public:
  explicit PipelineImpl(PipelineData* data) : data_(data) {}

  // Helpers for executing pipeline phases.
649 650
  template <typename Phase, typename... Args>
  void Run(Args&&... args);
651

652 653 654 655
  // Step A.1. Serialize the data needed for the compilation front-end.
  void Serialize();

  // Step A.2. Run the graph creation and initial optimization passes.
656 657
  bool CreateGraph();

658
  // Step B. Run the concurrent optimization passes.
659 660
  bool OptimizeGraph(Linkage* linkage);

661 662 663 664
  // Alternative step B. Run minimal concurrent optimization passes for
  // mid-tier.
  bool OptimizeGraphForMidTier(Linkage* linkage);

665 666 667 668 669 670 671
  // Substep B.1. Produce a scheduled graph.
  void ComputeScheduledGraph();

  // Substep B.2. Select instructions from a scheduled graph.
  bool SelectInstructions(Linkage* linkage);

  // Step C. Run the code assembly pass.
672 673
  void AssembleCode(Linkage* linkage,
                    std::unique_ptr<AssemblerBuffer> buffer = {});
674

675
  // Step D. Run the code finalization pass.
676
  MaybeHandle<Code> FinalizeCode(bool retire_broker = true);
677

678 679 680
  // Step E. Install any code dependencies.
  bool CommitDependencies(Handle<Code> code);

681
  void VerifyGeneratedCodeIsIdempotent();
682
  void RunPrintAndVerify(const char* phase, bool untyped = false);
683
  bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor);
684
  MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
685 686 687 688 689 690
  void AllocateRegistersForTopTier(const RegisterConfiguration* config,
                                   CallDescriptor* call_descriptor,
                                   bool run_verifier);
  void AllocateRegistersForMidTier(const RegisterConfiguration* config,
                                   CallDescriptor* call_descriptor,
                                   bool run_verifier);
691

692
  OptimizedCompilationInfo* info() const;
693
  Isolate* isolate() const;
694
  CodeGenerator* code_generator() const;
695

696
 private:
697 698
  PipelineData* const data_;
};
699

700 701
namespace {

702 703 704 705 706
class SourcePositionWrapper final : public Reducer {
 public:
  SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
      : reducer_(reducer), table_(table) {}
  ~SourcePositionWrapper() final = default;
707 708
  SourcePositionWrapper(const SourcePositionWrapper&) = delete;
  SourcePositionWrapper& operator=(const SourcePositionWrapper&) = delete;
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729

  const char* reducer_name() const override { return reducer_->reducer_name(); }

  Reduction Reduce(Node* node) final {
    SourcePosition const pos = table_->GetSourcePosition(node);
    SourcePositionTable::Scope position(table_, pos);
    return reducer_->Reduce(node);
  }

  void Finalize() final { reducer_->Finalize(); }

 private:
  Reducer* const reducer_;
  SourcePositionTable* const table_;
};

class NodeOriginsWrapper final : public Reducer {
 public:
  NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
      : reducer_(reducer), table_(table) {}
  ~NodeOriginsWrapper() final = default;
730 731
  NodeOriginsWrapper(const NodeOriginsWrapper&) = delete;
  NodeOriginsWrapper& operator=(const NodeOriginsWrapper&) = delete;
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746

  const char* reducer_name() const override { return reducer_->reducer_name(); }

  Reduction Reduce(Node* node) final {
    NodeOriginTable::Scope position(table_, reducer_name(), node);
    return reducer_->Reduce(node);
  }

  void Finalize() final { reducer_->Finalize(); }

 private:
  Reducer* const reducer_;
  NodeOriginTable* const table_;
};

747
class V8_NODISCARD PipelineRunScope {
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
 public:
  PipelineRunScope(
      PipelineData* data, const char* phase_name,
      RuntimeCallCounterId runtime_call_counter_id,
      RuntimeCallStats::CounterMode counter_mode = RuntimeCallStats::kExact)
      : phase_scope_(data->pipeline_statistics(), phase_name),
        zone_scope_(data->zone_stats(), phase_name),
        origin_scope_(data->node_origins(), phase_name),
        runtime_call_timer_scope(data->runtime_call_stats(),
                                 runtime_call_counter_id, counter_mode) {
    DCHECK_NOT_NULL(phase_name);
  }

  Zone* zone() { return zone_scope_.zone(); }

 private:
  PhaseScope phase_scope_;
  ZoneStats::Scope zone_scope_;
  NodeOriginTable::PhaseScope origin_scope_;
  RuntimeCallTimerScope runtime_call_timer_scope;
};

770 771
// LocalIsolateScope encapsulates the phase where persistent handles are
// attached to the LocalHeap inside {local_isolate}.
772
class V8_NODISCARD LocalIsolateScope {
773
 public:
774 775 776
  explicit LocalIsolateScope(JSHeapBroker* broker,
                             OptimizedCompilationInfo* info,
                             LocalIsolate* local_isolate)
777
      : broker_(broker), info_(info) {
778 779
    broker_->AttachLocalIsolate(info_, local_isolate);
    info_->tick_counter().AttachLocalHeap(local_isolate->heap());
780 781
  }

782
  ~LocalIsolateScope() {
783
    info_->tick_counter().DetachLocalHeap();
784
    broker_->DetachLocalIsolate(info_);
785 786 787 788
  }

 private:
  JSHeapBroker* broker_;
789
  OptimizedCompilationInfo* info_;
790 791
};

792 793
void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
                         int source_id, Handle<SharedFunctionInfo> shared) {
794
  if (!shared->script().IsUndefined(isolate)) {
795 796
    Handle<Script> script(Script::cast(shared->script()), isolate);

797
    if (!script->source().IsUndefined(isolate)) {
798
      CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
799
      Object source_name = script->name();
800
      auto& os = tracing_scope.stream();
801
      os << "--- FUNCTION SOURCE (";
802 803
      if (source_name.IsString()) {
        os << String::cast(source_name).ToCString().get() << ":";
804
      }
805
      os << shared->DebugNameCStr().get() << ") id{";
806
      os << info->optimization_id() << "," << source_id << "} start{";
807
      os << shared->StartPosition() << "} ---\n";
808
      {
809
        DisallowGarbageCollection no_gc;
810 811
        int start = shared->StartPosition();
        int len = shared->EndPosition() - start;
812 813
        SubStringRange source(String::cast(script->source()), no_gc, start,
                              len);
814 815 816 817 818 819 820 821 822 823 824 825
        for (const auto& c : source) {
          os << AsReversiblyEscapedUC16(c);
        }
      }

      os << "\n--- END ---\n";
    }
  }
}

// Print information for the given inlining: which function was inlined and
// where the inlining occurred.
826 827 828
void PrintInlinedFunctionInfo(
    OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
    int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
829 830
  CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
  auto& os = tracing_scope.stream();
831
  os << "INLINE (" << h.shared_info->DebugNameCStr().get() << ") id{"
832 833 834 835 836 837 838 839 840 841 842 843 844
     << info->optimization_id() << "," << source_id << "} AS " << inlining_id
     << " AT ";
  const SourcePosition position = h.position.position;
  if (position.IsKnown()) {
    os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
  } else {
    os << "<?>";
  }
  os << std::endl;
}

// Print the source of all functions that participated in this optimizing
// compilation. For inlined functions print source position of their inlining.
845 846 847 848
void PrintParticipatingSource(OptimizedCompilationInfo* info,
                              Isolate* isolate) {
  SourceIdAssigner id_assigner(info->inlined_functions().size());
  PrintFunctionSource(info, isolate, -1, info->shared_info());
849 850
  const auto& inlined = info->inlined_functions();
  for (unsigned id = 0; id < inlined.size(); id++) {
851 852
    const int source_id = id_assigner.GetIdFor(inlined[id].shared_info);
    PrintFunctionSource(info, isolate, source_id, inlined[id].shared_info);
853
    PrintInlinedFunctionInfo(info, isolate, source_id, id, inlined[id]);
854 855 856 857
  }
}

// Print the code after compiling it.
858 859
void PrintCode(Isolate* isolate, Handle<Code> code,
               OptimizedCompilationInfo* info) {
860
  if (FLAG_print_opt_source && info->IsOptimizing()) {
861
    PrintParticipatingSource(info, isolate);
862 863 864
  }

#ifdef ENABLE_DISASSEMBLER
865
  const bool print_code =
866
      FLAG_print_code ||
867
      (info->IsOptimizing() && FLAG_print_opt_code &&
868 869
       info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)) ||
      (info->IsNativeContextIndependent() && FLAG_print_nci_code);
870 871
  if (print_code) {
    std::unique_ptr<char[]> debug_name = info->GetDebugName();
872 873
    CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
    auto& os = tracing_scope.stream();
874 875

    // Print the source code if available.
876
    const bool print_source = info->IsOptimizing();
877 878
    if (print_source) {
      Handle<SharedFunctionInfo> shared = info->shared_info();
879 880
      if (shared->script().IsScript() &&
          !Script::cast(shared->script()).source().IsUndefined(isolate)) {
881 882
        os << "--- Raw source ---\n";
        StringCharacterStream stream(
883
            String::cast(Script::cast(shared->script()).source()),
884
            shared->StartPosition());
885 886
        // fun->end_position() points to the last character in the stream. We
        // need to compensate by adding one to calculate the length.
887
        int source_len = shared->EndPosition() - shared->StartPosition() + 1;
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
        for (int i = 0; i < source_len; i++) {
          if (stream.HasMore()) {
            os << AsReversiblyEscapedUC16(stream.GetNext());
          }
        }
        os << "\n\n";
      }
    }
    if (info->IsOptimizing()) {
      os << "--- Optimized code ---\n"
         << "optimization_id = " << info->optimization_id() << "\n";
    } else {
      os << "--- Code ---\n";
    }
    if (print_source) {
      Handle<SharedFunctionInfo> shared = info->shared_info();
904
      os << "source_position = " << shared->StartPosition() << "\n";
905
    }
906
    code->Disassemble(debug_name.get(), os, isolate);
907 908 909 910 911
    os << "--- End code ---\n";
  }
#endif  // ENABLE_DISASSEMBLER
}

912 913
void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
                            Schedule* schedule, const char* phase_name) {
914
  if (info->trace_turbo_json()) {
915
    UnparkedScopeIfNeeded scope(data->broker());
916
    AllowHandleDereference allow_deref;
917
    TurboJsonFile json_of(info, std::ios_base::app);
918 919
    json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"schedule\""
            << ",\"data\":\"";
920 921 922 923 924
    std::stringstream schedule_stream;
    schedule_stream << *schedule;
    std::string schedule_string(schedule_stream.str());
    for (const auto& c : schedule_string) {
      json_of << AsEscapedUC16ForJSON(c);
925
    }
926
    json_of << "\"},\n";
927
  }
928
  if (info->trace_turbo_graph() || FLAG_trace_turbo_scheduler) {
929
    UnparkedScopeIfNeeded scope(data->broker());
930
    AllowHandleDereference allow_deref;
931 932 933 934
    CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
    tracing_scope.stream()
        << "-- Schedule --------------------------------------\n"
        << *schedule;
935
  }
936

937 938
  if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
}
939

940 941
void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
                Reducer* reducer) {
942
  if (data->info()->source_positions()) {
943
    SourcePositionWrapper* const wrapper =
944 945
        data->graph_zone()->New<SourcePositionWrapper>(
            reducer, data->source_positions());
946 947
    reducer = wrapper;
  }
948
  if (data->info()->trace_turbo_json()) {
949
    NodeOriginsWrapper* const wrapper =
950 951
        data->graph_zone()->New<NodeOriginsWrapper>(reducer,
                                                    data->node_origins());
952
    reducer = wrapper;
953
  }
954 955

  graph_reducer->AddReducer(reducer);
956
}
957

958
PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
959
                                             OptimizedCompilationInfo* info,
960
                                             Isolate* isolate,
961
                                             ZoneStats* zone_stats) {
962 963
  PipelineStatistics* pipeline_statistics = nullptr;

964
  bool tracing_enabled;
965
  TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.turbofan"),
966 967
                                     &tracing_enabled);
  if (tracing_enabled || FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
968 969
    pipeline_statistics =
        new PipelineStatistics(info, isolate->GetTurboStatistics(), zone_stats);
970
    pipeline_statistics->BeginPhaseKind("V8.TFInitializing");
971 972
  }

973
  if (info->trace_turbo_json()) {
974
    TurboJsonFile json_of(info, std::ios_base::trunc);
975 976 977 978
    json_of << "{\"function\" : ";
    JsonPrintFunctionSource(json_of, -1, info->GetDebugName(), script, isolate,
                            info->shared_info());
    json_of << ",\n\"phases\":[";
979 980 981 982 983
  }

  return pipeline_statistics;
}

984 985 986 987
PipelineStatistics* CreatePipelineStatistics(
    wasm::WasmEngine* wasm_engine, wasm::FunctionBody function_body,
    const wasm::WasmModule* wasm_module, OptimizedCompilationInfo* info,
    ZoneStats* zone_stats) {
988 989
  PipelineStatistics* pipeline_statistics = nullptr;

990
  bool tracing_enabled;
991 992
  TRACE_EVENT_CATEGORY_GROUP_ENABLED(
      TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), &tracing_enabled);
993
  if (tracing_enabled || FLAG_turbo_stats_wasm) {
994 995
    pipeline_statistics = new PipelineStatistics(
        info, wasm_engine->GetOrCreateTurboStatistics(), zone_stats);
996
    pipeline_statistics->BeginPhaseKind("V8.WasmInitializing");
997 998
  }

999
  if (info->trace_turbo_json()) {
1000 1001 1002 1003 1004
    TurboJsonFile json_of(info, std::ios_base::trunc);
    std::unique_ptr<char[]> function_name = info->GetDebugName();
    json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
    AccountingAllocator allocator;
    std::ostringstream disassembly;
1005
    std::vector<int> source_positions;
1006
    wasm::PrintRawWasmCode(&allocator, function_body, wasm_module,
1007
                           wasm::kPrintLocals, disassembly, &source_positions);
1008 1009 1010
    for (const auto& c : disassembly.str()) {
      json_of << AsEscapedUC16ForJSON(c);
    }
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
    json_of << "\",\n\"sourceLineToBytecodePosition\" : [";
    bool insert_comma = false;
    for (auto val : source_positions) {
      if (insert_comma) {
        json_of << ", ";
      }
      json_of << val;
      insert_comma = true;
    }
    json_of << "],\n\"phases\":[";
1021 1022 1023 1024 1025
  }

  return pipeline_statistics;
}

1026 1027
}  // namespace

1028
class PipelineCompilationJob final : public OptimizedCompilationJob {
1029
 public:
1030 1031
  PipelineCompilationJob(Isolate* isolate,
                         Handle<SharedFunctionInfo> shared_info,
1032
                         Handle<JSFunction> function, BailoutId osr_offset,
1033
                         JavaScriptFrame* osr_frame, CodeKind code_kind);
1034
  ~PipelineCompilationJob() final;
1035 1036
  PipelineCompilationJob(const PipelineCompilationJob&) = delete;
  PipelineCompilationJob& operator=(const PipelineCompilationJob&) = delete;
1037 1038

 protected:
1039
  Status PrepareJobImpl(Isolate* isolate) final;
1040 1041
  Status ExecuteJobImpl(RuntimeCallStats* stats,
                        LocalIsolate* local_isolate) final;
1042
  Status FinalizeJobImpl(Isolate* isolate) final;
1043

1044
  // Registers weak object to optimized code dependencies.
1045 1046 1047
  void RegisterWeakObjectsInOptimizedCode(Isolate* isolate,
                                          Handle<NativeContext> context,
                                          Handle<Code> code);
1048

1049
 private:
1050
  Zone zone_;
1051
  ZoneStats zone_stats_;
1052
  OptimizedCompilationInfo compilation_info_;
1053
  std::unique_ptr<PipelineStatistics> pipeline_statistics_;
1054
  PipelineData data_;
1055
  PipelineImpl pipeline_;
1056
  Linkage* linkage_;
1057 1058
};

1059 1060 1061 1062 1063 1064 1065 1066 1067
namespace {

bool ShouldUseConcurrentInlining(CodeKind code_kind, bool is_osr) {
  if (is_osr) return false;
  return code_kind == CodeKind::TURBOPROP || FLAG_concurrent_inlining;
}

}  // namespace

1068 1069
PipelineCompilationJob::PipelineCompilationJob(
    Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
1070
    Handle<JSFunction> function, BailoutId osr_offset,
1071
    JavaScriptFrame* osr_frame, CodeKind code_kind)
1072 1073 1074
    // Note that the OptimizedCompilationInfo is not initialized at the time
    // we pass it to the CompilationJob constructor, but it is not
    // dereferenced there.
1075
    : OptimizedCompilationJob(&compilation_info_, "TurboFan"),
1076 1077
      zone_(function->GetIsolate()->allocator(),
            kPipelineCompilationJobZoneName),
1078
      zone_stats_(function->GetIsolate()->allocator()),
1079
      compilation_info_(&zone_, function->GetIsolate(), shared_info, function,
1080
                        code_kind),
1081 1082 1083 1084
      pipeline_statistics_(CreatePipelineStatistics(
          handle(Script::cast(shared_info->script()), isolate),
          compilation_info(), function->GetIsolate(), &zone_stats_)),
      data_(&zone_stats_, function->GetIsolate(), compilation_info(),
1085
            pipeline_statistics_.get(),
1086
            ShouldUseConcurrentInlining(code_kind, !osr_offset.IsNone())),
1087
      pipeline_(&data_),
1088 1089
      linkage_(nullptr) {
  compilation_info_.SetOptimizingForOsr(osr_offset, osr_frame);
1090 1091
}

1092
PipelineCompilationJob::~PipelineCompilationJob() = default;
1093

1094
namespace {
1095 1096 1097 1098
// Ensure that the RuntimeStats table is set on the PipelineData for
// duration of the job phase and unset immediately afterwards. Each job
// needs to set the correct RuntimeCallStats table depending on whether it
// is running on a background or foreground thread.
1099
class V8_NODISCARD PipelineJobScope {
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
 public:
  PipelineJobScope(PipelineData* data, RuntimeCallStats* stats) : data_(data) {
    data_->set_runtime_call_stats(stats);
  }

  ~PipelineJobScope() { data_->set_runtime_call_stats(nullptr); }

 private:
  PipelineData* data_;
};
}  // namespace

1112 1113
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
    Isolate* isolate) {
1114 1115 1116 1117
  // Ensure that the RuntimeCallStats table of main thread is available for
  // phases happening during PrepareJob.
  PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());

1118
  if (compilation_info()->bytecode_array()->length() >
1119
      FLAG_max_optimized_bytecode_size) {
1120 1121 1122
    return AbortOptimization(BailoutReason::kFunctionTooBig);
  }

1123
  if (!FLAG_always_opt && !compilation_info()->IsNativeContextIndependent()) {
1124
    compilation_info()->set_bailout_on_uninitialized();
1125 1126
  }
  if (FLAG_turbo_loop_peeling) {
1127
    compilation_info()->set_loop_peeling();
1128
  }
1129
  if (FLAG_turbo_inlining && !compilation_info()->IsTurboprop() &&
1130
      !compilation_info()->IsNativeContextIndependent()) {
1131
    compilation_info()->set_inlining();
1132
  }
1133

1134 1135
  // This is the bottleneck for computing and setting poisoning level in the
  // optimizing compiler.
1136 1137
  PoisoningMitigationLevel load_poisoning =
      PoisoningMitigationLevel::kDontPoison;
1138
  if (FLAG_untrusted_code_mitigations) {
1139 1140 1141
    // For full mitigations, this can be changed to
    // PoisoningMitigationLevel::kPoisonAll.
    load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly;
1142
  }
1143 1144
  compilation_info()->SetPoisoningMitigationLevel(load_poisoning);

1145
  if (FLAG_turbo_allocation_folding) {
1146
    compilation_info()->set_allocation_folding();
1147
  }
1148

1149 1150 1151
  // Determine whether to specialize the code for the function's context.
  // We can't do this in the case of OSR, because we want to cache the
  // generated code on the native context keyed on SharedFunctionInfo.
1152
  // We also can't do this for native context independent code (yet).
1153 1154
  // TODO(mythria): Check if it is better to key the OSR cache on JSFunction and
  // allow context specialization for OSR code.
1155
  if (compilation_info()->closure()->raw_feedback_cell().map() ==
1156
          ReadOnlyRoots(isolate).one_closure_cell_map() &&
1157
      !compilation_info()->is_osr() &&
1158 1159
      !compilation_info()->IsNativeContextIndependent() &&
      !compilation_info()->IsTurboprop()) {
1160
    compilation_info()->set_function_context_specializing();
1161
    data_.ChooseSpecializationContext();
1162
  }
1163

1164
  if (compilation_info()->source_positions()) {
1165 1166 1167 1168
    SharedFunctionInfo::EnsureSourcePositionsAvailable(
        isolate, compilation_info()->shared_info());
  }

1169
  data_.set_start_source_position(
1170
      compilation_info()->shared_info()->StartPosition());
1171

1172
  linkage_ = compilation_info()->zone()->New<Linkage>(
1173
      Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info()));
1174

1175
  if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
1176

1177 1178
  pipeline_.Serialize();

1179
  if (!data_.broker()->is_concurrent_inlining()) {
1180 1181 1182 1183
    if (!pipeline_.CreateGraph()) {
      CHECK(!isolate->has_pending_exception());
      return AbortOptimization(BailoutReason::kGraphBuildingFailed);
    }
1184 1185
  }

1186 1187 1188
  return SUCCEEDED;
}

1189
PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
1190
    RuntimeCallStats* stats, LocalIsolate* local_isolate) {
1191 1192
  // Ensure that the RuntimeCallStats table is only available during execution
  // and not during finalization as that might be on a different thread.
1193
  PipelineJobScope scope(&data_, stats);
1194 1195
  LocalIsolateScope local_isolate_scope(data_.broker(), data_.info(),
                                        local_isolate);
1196

1197 1198 1199
  if (data_.broker()->is_concurrent_inlining()) {
    if (!pipeline_.CreateGraph()) {
      return AbortOptimization(BailoutReason::kGraphBuildingFailed);
1200
    }
1201 1202
  }

1203 1204 1205 1206 1207 1208 1209 1210 1211
  // We selectively Unpark inside OptimizeGraph*.
  bool success;
  if (compilation_info_.code_kind() == CodeKind::TURBOPROP) {
    success = pipeline_.OptimizeGraphForMidTier(linkage_);
  } else {
    success = pipeline_.OptimizeGraph(linkage_);
  }
  if (!success) return FAILED;

1212
  pipeline_.AssembleCode(linkage_);
1213

1214 1215 1216
  return SUCCEEDED;
}

1217 1218
PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
    Isolate* isolate) {
1219 1220 1221
  // Ensure that the RuntimeCallStats table of main thread is available for
  // phases happening during PrepareJob.
  PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
1222 1223
  RuntimeCallTimerScope runtimeTimer(
      isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
1224 1225 1226
  MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
  Handle<Code> code;
  if (!maybe_code.ToHandle(&code)) {
1227 1228
    if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
      return AbortOptimization(BailoutReason::kCodeGenerationFailed);
1229
    }
1230
    return FAILED;
1231
  }
1232
  if (!pipeline_.CommitDependencies(code)) {
1233 1234
    return RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
  }
1235

1236
  compilation_info()->SetCode(code);
1237
  Handle<NativeContext> context(compilation_info()->native_context(), isolate);
1238
  if (CodeKindCanDeoptimize(code->kind())) context->AddOptimizedCode(*code);
1239
  RegisterWeakObjectsInOptimizedCode(isolate, context, code);
1240 1241 1242
  return SUCCEEDED;
}

1243
void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
1244
    Isolate* isolate, Handle<NativeContext> context, Handle<Code> code) {
1245
  std::vector<Handle<Map>> maps;
1246 1247
  DCHECK(code->is_optimized_code());
  {
1248
    DisallowGarbageCollection no_gc;
1249
    int const mode_mask = RelocInfo::EmbeddedObjectModeMask();
1250
    for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
1251 1252
      DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
      if (code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
1253
        Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
1254
                                  isolate);
1255
        if (object->IsMap()) {
1256
          maps.push_back(Handle<Map>::cast(object));
1257 1258 1259 1260
        }
      }
    }
  }
1261
  for (Handle<Map> map : maps) {
1262
    isolate->heap()->AddRetainedMap(context, map);
1263 1264 1265 1266
  }
  code->set_can_have_weak_objects(true);
}

1267 1268
class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
 public:
1269 1270
  WasmHeapStubCompilationJob(Isolate* isolate, wasm::WasmEngine* wasm_engine,
                             CallDescriptor* call_descriptor,
1271
                             std::unique_ptr<Zone> zone, Graph* graph,
1272
                             CodeKind kind, std::unique_ptr<char[]> debug_name,
1273 1274 1275 1276 1277
                             const AssemblerOptions& options,
                             SourcePositionTable* source_positions)
      // Note that the OptimizedCompilationInfo is not initialized at the time
      // we pass it to the CompilationJob constructor, but it is not
      // dereferenced there.
1278 1279
      : OptimizedCompilationJob(&info_, "TurboFan",
                                CompilationJob::State::kReadyToExecute),
1280 1281 1282
        debug_name_(std::move(debug_name)),
        info_(CStrVector(debug_name_.get()), graph->zone(), kind),
        call_descriptor_(call_descriptor),
1283
        zone_stats_(zone->allocator()),
1284 1285
        zone_(std::move(zone)),
        graph_(graph),
1286
        data_(&zone_stats_, &info_, isolate, wasm_engine->allocator(), graph_,
1287
              nullptr, nullptr, source_positions,
1288
              zone_->New<NodeOriginTable>(graph_), nullptr, options, nullptr),
1289 1290
        pipeline_(&data_),
        wasm_engine_(wasm_engine) {}
1291

1292 1293 1294 1295
  WasmHeapStubCompilationJob(const WasmHeapStubCompilationJob&) = delete;
  WasmHeapStubCompilationJob& operator=(const WasmHeapStubCompilationJob&) =
      delete;

1296 1297
 protected:
  Status PrepareJobImpl(Isolate* isolate) final;
1298 1299
  Status ExecuteJobImpl(RuntimeCallStats* stats,
                        LocalIsolate* local_isolate) final;
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
  Status FinalizeJobImpl(Isolate* isolate) final;

 private:
  std::unique_ptr<char[]> debug_name_;
  OptimizedCompilationInfo info_;
  CallDescriptor* call_descriptor_;
  ZoneStats zone_stats_;
  std::unique_ptr<Zone> zone_;
  Graph* graph_;
  PipelineData data_;
  PipelineImpl pipeline_;
1311
  wasm::WasmEngine* wasm_engine_;
1312 1313 1314 1315
};

// static
std::unique_ptr<OptimizedCompilationJob>
1316 1317 1318
Pipeline::NewWasmHeapStubCompilationJob(
    Isolate* isolate, wasm::WasmEngine* wasm_engine,
    CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
1319
    CodeKind kind, std::unique_ptr<char[]> debug_name,
1320
    const AssemblerOptions& options, SourcePositionTable* source_positions) {
1321
  return std::make_unique<WasmHeapStubCompilationJob>(
1322
      isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind,
1323 1324 1325 1326 1327
      std::move(debug_name), options, source_positions);
}

CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
    Isolate* isolate) {
1328 1329 1330
  UNREACHABLE();
}

1331
CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
1332
    RuntimeCallStats* stats, LocalIsolate* local_isolate) {
1333 1334 1335
  std::unique_ptr<PipelineStatistics> pipeline_statistics;
  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
    pipeline_statistics.reset(new PipelineStatistics(
1336
        &info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_));
1337 1338
    pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
  }
1339
  if (info_.trace_turbo_json() || info_.trace_turbo_graph()) {
1340 1341 1342 1343 1344
    CodeTracer::StreamScope tracing_scope(data_.GetCodeTracer());
    tracing_scope.stream()
        << "---------------------------------------------------\n"
        << "Begin compiling method " << info_.GetDebugName().get()
        << " using TurboFan" << std::endl;
1345
  }
1346
  if (info_.trace_turbo_graph()) {  // Simple textual RPO.
1347
    StdoutStream{} << "-- wasm stub " << CodeKindToString(info_.code_kind())
1348 1349 1350 1351
                   << " graph -- " << std::endl
                   << AsRPO(*data_.graph());
  }

1352
  if (info_.trace_turbo_json()) {
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
    TurboJsonFile json_of(&info_, std::ios_base::trunc);
    json_of << "{\"function\":\"" << info_.GetDebugName().get()
            << "\", \"source\":\"\",\n\"phases\":[";
  }
  pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
  pipeline_.ComputeScheduledGraph();
  if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
    return CompilationJob::SUCCEEDED;
  }
  return CompilationJob::FAILED;
}

CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
    Isolate* isolate) {
  Handle<Code> code;
1368 1369 1370 1371 1372
  if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
    V8::FatalProcessOutOfMemory(isolate,
                                "WasmHeapStubCompilationJob::FinalizeJobImpl");
  }
  if (pipeline_.CommitDependencies(code)) {
1373 1374 1375
    info_.SetCode(code);
#ifdef ENABLE_DISASSEMBLER
    if (FLAG_print_opt_code) {
1376 1377 1378
      CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
      code->Disassemble(compilation_info()->GetDebugName().get(),
                        tracing_scope.stream(), isolate);
1379 1380 1381 1382 1383 1384 1385
    }
#endif
    return SUCCEEDED;
  }
  return FAILED;
}

1386 1387
template <typename Phase, typename... Args>
void PipelineImpl::Run(Args&&... args) {
1388 1389
  PipelineRunScope scope(this->data_, Phase::phase_name(),
                         Phase::kRuntimeCallCounterId, Phase::kCounterMode);
1390
  Phase phase;
1391
  phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
1392
}
1393

1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
#define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode)        \
  static const char* phase_name() { return "V8.TF" #Name; }     \
  static constexpr RuntimeCallCounterId kRuntimeCallCounterId = \
      RuntimeCallCounterId::kOptimize##Name;                    \
  static constexpr RuntimeCallStats::CounterMode kCounterMode = Mode;

#define DECL_PIPELINE_PHASE_CONSTANTS(Name) \
  DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kThreadSpecific)

#define DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Name) \
  DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kExact)

1406
struct GraphBuilderPhase {
1407
  DECL_PIPELINE_PHASE_CONSTANTS(BytecodeGraphBuilder)
1408

1409
  void Run(PipelineData* data, Zone* temp_zone) {
1410
    BytecodeGraphBuilderFlags flags;
1411
    if (data->info()->analyze_environment_liveness()) {
1412 1413
      flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
    }
1414
    if (data->info()->bailout_on_uninitialized()) {
1415
      flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
1416
    }
1417 1418

    JSFunctionRef closure(data->broker(), data->info()->closure());
1419
    CallFrequency frequency(1.0f);
1420
    BuildGraphFromBytecode(
1421 1422 1423 1424 1425
        data->broker(), temp_zone, closure.shared(),
        closure.raw_feedback_cell(), data->info()->osr_offset(),
        data->jsgraph(), frequency, data->source_positions(),
        SourcePosition::kNotInlined, data->info()->code_kind(), flags,
        &data->info()->tick_counter());
1426 1427 1428 1429
  }
};

struct InliningPhase {
1430
  DECL_PIPELINE_PHASE_CONSTANTS(Inlining)
1431 1432

  void Run(PipelineData* data, Zone* temp_zone) {
1433
    OptimizedCompilationInfo* info = data->info();
1434
    GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
1435
                               data->broker(), data->jsgraph()->Dead());
1436
    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1437
                                              data->common(), temp_zone);
1438
    CheckpointElimination checkpoint_elimination(&graph_reducer);
1439
    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1440
                                         data->broker(), data->common(),
1441
                                         data->machine(), temp_zone);
1442
    JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
1443
    if (data->info()->bailout_on_uninitialized()) {
1444 1445
      call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
    }
1446
    JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
1447
                               temp_zone, call_reducer_flags,
1448
                               data->dependencies());
1449
    JSContextSpecialization context_specialization(
1450
        &graph_reducer, data->jsgraph(), data->broker(),
1451
        data->specialization_context(),
1452
        data->info()->function_context_specializing()
1453 1454
            ? data->info()->closure()
            : MaybeHandle<JSFunction>());
1455 1456
    JSNativeContextSpecialization::Flags flags =
        JSNativeContextSpecialization::kNoFlags;
1457
    if (data->info()->bailout_on_uninitialized()) {
1458 1459
      flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
    }
1460 1461 1462
    // Passing the OptimizedCompilationInfo's shared zone here as
    // JSNativeContextSpecialization allocates out-of-heap objects
    // that need to live until code generation.
1463
    JSNativeContextSpecialization native_context_specialization(
1464
        &graph_reducer, data->jsgraph(), data->broker(), flags,
1465
        data->dependencies(), temp_zone, info->zone());
1466 1467
    JSInliningHeuristic inlining(&graph_reducer,
                                 temp_zone, data->info(), data->jsgraph(),
1468
                                 data->broker(), data->source_positions());
1469

1470
    JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
1471
                                           data->broker());
1472
    AddReducer(data, &graph_reducer, &dead_code_elimination);
1473
    AddReducer(data, &graph_reducer, &checkpoint_elimination);
1474
    AddReducer(data, &graph_reducer, &common_reducer);
1475
    if (!data->info()->IsNativeContextIndependent()) {
1476 1477 1478
      AddReducer(data, &graph_reducer, &native_context_specialization);
      AddReducer(data, &graph_reducer, &context_specialization);
    }
1479
    AddReducer(data, &graph_reducer, &intrinsic_lowering);
1480
    AddReducer(data, &graph_reducer, &call_reducer);
1481
    if (data->info()->inlining()) {
1482 1483
      AddReducer(data, &graph_reducer, &inlining);
    }
1484
    graph_reducer.ReduceGraph();
1485
    info->set_inlined_bytecode_size(inlining.total_inlined_bytecode_size());
1486 1487 1488 1489 1490
  }
};


struct TyperPhase {
1491
  DECL_PIPELINE_PHASE_CONSTANTS(Typer)
1492

1493 1494 1495
  void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
    NodeVector roots(temp_zone);
    data->jsgraph()->GetCachedNodes(&roots);
1496 1497 1498 1499 1500

    // Make sure we always type True and False. Needed for escape analysis.
    roots.push_back(data->jsgraph()->TrueConstant());
    roots.push_back(data->jsgraph()->FalseConstant());

1501 1502 1503
    LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
                                         data->common(), temp_zone);
    if (FLAG_turbo_loop_variable) induction_vars.Run();
1504 1505 1506

    // The typer inspects heap objects, so we need to unpark the local heap.
    UnparkedScopeIfNeeded scope(data->broker());
1507
    typer->Run(roots, &induction_vars);
1508
  }
1509 1510
};

1511
struct UntyperPhase {
1512
  DECL_PIPELINE_PHASE_CONSTANTS(Untyper)
1513 1514 1515 1516

  void Run(PipelineData* data, Zone* temp_zone) {
    class RemoveTypeReducer final : public Reducer {
     public:
1517
      const char* reducer_name() const override { return "RemoveTypeReducer"; }
1518 1519 1520 1521 1522 1523 1524 1525 1526
      Reduction Reduce(Node* node) final {
        if (NodeProperties::IsTyped(node)) {
          NodeProperties::RemoveType(node);
          return Changed(node);
        }
        return NoChange();
      }
    };

1527 1528 1529 1530 1531 1532
    NodeVector roots(temp_zone);
    data->jsgraph()->GetCachedNodes(&roots);
    for (Node* node : roots) {
      NodeProperties::RemoveType(node);
    }

1533
    GraphReducer graph_reducer(temp_zone, data->graph(),
1534
                               &data->info()->tick_counter(), data->broker(),
1535
                               data->jsgraph()->Dead());
1536 1537 1538 1539 1540 1541
    RemoveTypeReducer remove_type_reducer;
    AddReducer(data, &graph_reducer, &remove_type_reducer);
    graph_reducer.ReduceGraph();
  }
};

1542
struct HeapBrokerInitializationPhase {
1543
  DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(HeapBrokerInitialization)
1544 1545

  void Run(PipelineData* data, Zone* temp_zone) {
1546
    data->broker()->InitializeAndStartSerializing(data->native_context());
1547
  }
1548 1549 1550
};

struct CopyMetadataForConcurrentCompilePhase {
1551
  DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(SerializeMetadata)
1552 1553 1554

  void Run(PipelineData* data, Zone* temp_zone) {
    GraphReducer graph_reducer(temp_zone, data->graph(),
1555
                               &data->info()->tick_counter(), data->broker(),
1556
                               data->jsgraph()->Dead());
1557
    JSHeapCopyReducer heap_copy_reducer(data->broker());
1558 1559
    AddReducer(data, &graph_reducer, &heap_copy_reducer);
    graph_reducer.ReduceGraph();
1560 1561 1562 1563 1564

    // Some nodes that are no longer in the graph might still be in the cache.
    NodeVector cached_nodes(temp_zone);
    data->jsgraph()->GetCachedNodes(&cached_nodes);
    for (Node* const node : cached_nodes) graph_reducer.ReduceNode(node);
1565 1566 1567
  }
};

1568
struct SerializationPhase {
1569
  DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Serialization)
1570 1571

  void Run(PipelineData* data, Zone* temp_zone) {
1572
    SerializerForBackgroundCompilationFlags flags;
1573
    if (data->info()->bailout_on_uninitialized()) {
1574 1575
      flags |= SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized;
    }
1576
    if (data->info()->source_positions()) {
1577 1578
      flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
    }
1579
    if (data->info()->analyze_environment_liveness()) {
1580 1581
      flags |=
          SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
1582
    }
1583
    if (data->info()->inlining()) {
1584 1585
      flags |= SerializerForBackgroundCompilationFlag::kEnableTurboInlining;
    }
1586 1587 1588
    RunSerializerForBackgroundCompilation(
        data->zone_stats(), data->broker(), data->dependencies(),
        data->info()->closure(), flags, data->info()->osr_offset());
1589 1590 1591 1592
    if (data->specialization_context().IsJust()) {
      ContextRef(data->broker(),
                 data->specialization_context().FromJust().context);
    }
1593 1594 1595
  }
};

1596
struct TypedLoweringPhase {
1597
  DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
1598 1599

  void Run(PipelineData* data, Zone* temp_zone) {
1600
    GraphReducer graph_reducer(temp_zone, data->graph(),
1601
                               &data->info()->tick_counter(), data->broker(),
1602
                               data->jsgraph()->Dead());
1603
    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1604
                                              data->common(), temp_zone);
1605
    JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
1606
                                     data->jsgraph(), data->broker(),
1607
                                     temp_zone);
1608
    JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
1609
                                   data->broker(), temp_zone);
1610
    ConstantFoldingReducer constant_folding_reducer(
1611
        &graph_reducer, data->jsgraph(), data->broker());
1612
    TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1613
                                         data->jsgraph(), data->broker());
1614
    SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1615
                                             data->broker());
1616
    CheckpointElimination checkpoint_elimination(&graph_reducer);
1617
    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1618
                                         data->broker(), data->common(),
1619
                                         data->machine(), temp_zone);
1620
    AddReducer(data, &graph_reducer, &dead_code_elimination);
1621

1622
    if (!data->info()->IsNativeContextIndependent()) {
1623 1624
      AddReducer(data, &graph_reducer, &create_lowering);
    }
1625
    AddReducer(data, &graph_reducer, &constant_folding_reducer);
1626
    AddReducer(data, &graph_reducer, &typed_lowering);
1627
    AddReducer(data, &graph_reducer, &typed_optimization);
1628
    AddReducer(data, &graph_reducer, &simple_reducer);
1629
    AddReducer(data, &graph_reducer, &checkpoint_elimination);
1630
    AddReducer(data, &graph_reducer, &common_reducer);
1631 1632 1633

    // ConstantFoldingReducer, JSCreateLowering, JSTypedLowering, and
    // TypedOptimization access the heap.
1634
    UnparkedScopeIfNeeded scope(data->broker());
1635

1636 1637 1638 1639 1640
    graph_reducer.ReduceGraph();
  }
};


1641
struct EscapeAnalysisPhase {
1642
  DECL_PIPELINE_PHASE_CONSTANTS(EscapeAnalysis)
1643 1644

  void Run(PipelineData* data, Zone* temp_zone) {
1645 1646
    EscapeAnalysis escape_analysis(data->jsgraph(),
                                   &data->info()->tick_counter(), temp_zone);
1647
    escape_analysis.ReduceGraph();
1648

1649
    GraphReducer reducer(temp_zone, data->graph(),
1650
                         &data->info()->tick_counter(), data->broker(),
1651
                         data->jsgraph()->Dead());
1652 1653 1654
    EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
                                         escape_analysis.analysis_result(),
                                         temp_zone);
1655

1656
    AddReducer(data, &reducer, &escape_reducer);
1657 1658 1659 1660

    // EscapeAnalysisReducer accesses the heap.
    UnparkedScopeIfNeeded scope(data->broker());

1661 1662 1663
    reducer.ReduceGraph();
    // TODO(tebbi): Turn this into a debug mode check once we have confidence.
    escape_reducer.VerifyReplacement();
1664 1665 1666
  }
};

1667
struct TypeAssertionsPhase {
1668
  DECL_PIPELINE_PHASE_CONSTANTS(TypeAssertions)
1669 1670 1671

  void Run(PipelineData* data, Zone* temp_zone) {
    GraphReducer graph_reducer(temp_zone, data->graph(),
1672
                               &data->info()->tick_counter(), data->broker(),
1673 1674 1675 1676 1677 1678 1679 1680
                               data->jsgraph()->Dead());
    AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
                                             temp_zone);
    AddReducer(data, &graph_reducer, &type_assertions);
    graph_reducer.ReduceGraph();
  }
};

1681
struct SimplifiedLoweringPhase {
1682
  DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
1683

1684
  void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1685 1686
    SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
                                data->source_positions(), data->node_origins(),
1687
                                data->info()->GetPoisoningMitigationLevel(),
1688
                                &data->info()->tick_counter(), linkage);
1689

1690
    // RepresentationChanger accesses the heap.
1691
    UnparkedScopeIfNeeded scope(data->broker());
1692

1693
    lowering.LowerAllNodes();
1694 1695
  }
};
1696

1697
struct LoopPeelingPhase {
1698
  DECL_PIPELINE_PHASE_CONSTANTS(LoopPeeling)
1699 1700 1701 1702 1703 1704 1705

  void Run(PipelineData* data, Zone* temp_zone) {
    GraphTrimmer trimmer(temp_zone, data->graph());
    NodeVector roots(temp_zone);
    data->jsgraph()->GetCachedNodes(&roots);
    trimmer.TrimGraph(roots.begin(), roots.end());

1706 1707
    LoopTree* loop_tree = LoopFinder::BuildLoopTree(
        data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
1708 1709 1710
    // We call the typer inside of PeelInnerLoopsOfTree which inspects heap
    // objects, so we need to unpark the local heap.
    UnparkedScopeIfNeeded scope(data->broker());
1711
    LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
1712
               data->source_positions(), data->node_origins())
1713
        .PeelInnerLoopsOfTree();
1714 1715 1716
  }
};

1717
struct LoopExitEliminationPhase {
1718
  DECL_PIPELINE_PHASE_CONSTANTS(LoopExitElimination)
1719 1720 1721 1722 1723 1724

  void Run(PipelineData* data, Zone* temp_zone) {
    LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
  }
};

1725
struct GenericLoweringPhase {
1726
  DECL_PIPELINE_PHASE_CONSTANTS(GenericLowering)
1727 1728

  void Run(PipelineData* data, Zone* temp_zone) {
1729
    GraphReducer graph_reducer(temp_zone, data->graph(),
1730
                               &data->info()->tick_counter(), data->broker(),
1731
                               data->jsgraph()->Dead());
1732 1733
    JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer,
                                       data->broker());
1734
    AddReducer(data, &graph_reducer, &generic_lowering);
1735 1736 1737 1738

    // JSGEnericLowering accesses the heap due to ObjectRef's type checks.
    UnparkedScopeIfNeeded scope(data->broker());

1739 1740 1741 1742
    graph_reducer.ReduceGraph();
  }
};

1743
struct EarlyOptimizationPhase {
1744
  DECL_PIPELINE_PHASE_CONSTANTS(EarlyOptimization)
1745 1746

  void Run(PipelineData* data, Zone* temp_zone) {
1747
    GraphReducer graph_reducer(temp_zone, data->graph(),
1748
                               &data->info()->tick_counter(), data->broker(),
1749
                               data->jsgraph()->Dead());
1750
    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1751
                                              data->common(), temp_zone);
1752
    SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1753
                                             data->broker());
1754
    RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1755
    ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1756
    MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1757
    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1758
                                         data->broker(), data->common(),
1759
                                         data->machine(), temp_zone);
1760
    AddReducer(data, &graph_reducer, &dead_code_elimination);
1761
    AddReducer(data, &graph_reducer, &simple_reducer);
1762
    AddReducer(data, &graph_reducer, &redundancy_elimination);
1763 1764
    AddReducer(data, &graph_reducer, &machine_reducer);
    AddReducer(data, &graph_reducer, &common_reducer);
1765
    AddReducer(data, &graph_reducer, &value_numbering);
1766 1767 1768 1769
    graph_reducer.ReduceGraph();
  }
};

1770
struct ControlFlowOptimizationPhase {
1771
  DECL_PIPELINE_PHASE_CONSTANTS(ControlFlowOptimization)
1772 1773

  void Run(PipelineData* data, Zone* temp_zone) {
1774
    ControlFlowOptimizer optimizer(data->graph(), data->common(),
1775 1776
                                   data->machine(),
                                   &data->info()->tick_counter(), temp_zone);
1777 1778 1779 1780
    optimizer.Optimize();
  }
};

1781
struct EffectControlLinearizationPhase {
1782
  DECL_PIPELINE_PHASE_CONSTANTS(EffectLinearization)
1783 1784

  void Run(PipelineData* data, Zone* temp_zone) {
1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
    {
      // The scheduler requires the graphs to be trimmed, so trim now.
      // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
      // graphs.
      GraphTrimmer trimmer(temp_zone, data->graph());
      NodeVector roots(temp_zone);
      data->jsgraph()->GetCachedNodes(&roots);
      trimmer.TrimGraph(roots.begin(), roots.end());

      // Schedule the graph without node splitting so that we can
      // fix the effect and control flow for nodes with low-level side
      // effects (such as changing representation to tagged or
      // 'floating' allocation regions.)
1798 1799
      Schedule* schedule = Scheduler::ComputeSchedule(
          temp_zone, data->graph(), Scheduler::kTempSchedule,
1800
          &data->info()->tick_counter(), data->profile_data());
1801 1802
      TraceScheduleAndVerify(data->info(), data, schedule,
                             "effect linearization schedule");
1803

1804
      MaskArrayIndexEnable mask_array_index =
1805 1806
          (data->info()->GetPoisoningMitigationLevel() !=
           PoisoningMitigationLevel::kDontPoison)
1807 1808
              ? MaskArrayIndexEnable::kMaskArrayIndex
              : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
1809 1810 1811 1812 1813
      // Post-pass for wiring the control/effects
      // - connect allocating representation changes into the control&effect
      //   chains and lower them,
      // - get rid of the region markers,
      // - introduce effect phis and rewire effects to get SSA again.
1814 1815
      LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
                             data->source_positions(), data->node_origins(),
1816 1817
                             mask_array_index, MaintainSchedule::kDiscard,
                             data->broker());
1818 1819 1820 1821 1822 1823 1824
    }
    {
      // The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
      // run {DeadCodeElimination} to prune these parts of the graph.
      // Also, the following store-store elimination phase greatly benefits from
      // doing a common operator reducer and dead code elimination just before
      // it, to eliminate conditional deopts with a constant condition.
1825
      GraphReducer graph_reducer(temp_zone, data->graph(),
1826
                                 &data->info()->tick_counter(), data->broker(),
1827
                                 data->jsgraph()->Dead());
1828 1829
      DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                                data->common(), temp_zone);
1830 1831 1832
      CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                           data->broker(), data->common(),
                                           data->machine(), temp_zone);
1833 1834 1835 1836
      AddReducer(data, &graph_reducer, &dead_code_elimination);
      AddReducer(data, &graph_reducer, &common_reducer);
      graph_reducer.ReduceGraph();
    }
1837 1838 1839
  }
};

1840
struct StoreStoreEliminationPhase {
1841
  DECL_PIPELINE_PHASE_CONSTANTS(StoreStoreElimination)
1842 1843

  void Run(PipelineData* data, Zone* temp_zone) {
1844 1845 1846 1847 1848
    GraphTrimmer trimmer(temp_zone, data->graph());
    NodeVector roots(temp_zone);
    data->jsgraph()->GetCachedNodes(&roots);
    trimmer.TrimGraph(roots.begin(), roots.end());

1849 1850
    StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
                               temp_zone);
1851 1852 1853
  }
};

1854
struct LoadEliminationPhase {
1855
  DECL_PIPELINE_PHASE_CONSTANTS(LoadElimination)
1856 1857

  void Run(PipelineData* data, Zone* temp_zone) {
1858
    GraphReducer graph_reducer(temp_zone, data->graph(),
1859
                               &data->info()->tick_counter(), data->broker(),
1860
                               data->jsgraph()->Dead());
1861
    BranchElimination branch_condition_elimination(&graph_reducer,
1862 1863
                                                   data->jsgraph(), temp_zone,
                                                   BranchElimination::kEARLY);
1864
    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1865
                                              data->common(), temp_zone);
1866
    RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1867 1868
    LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
                                     temp_zone);
1869
    CheckpointElimination checkpoint_elimination(&graph_reducer);
1870
    ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1871
    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1872
                                         data->broker(), data->common(),
1873
                                         data->machine(), temp_zone);
1874 1875
    TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
                                         data->jsgraph(), data->broker());
1876
    ConstantFoldingReducer constant_folding_reducer(
1877
        &graph_reducer, data->jsgraph(), data->broker());
1878
    TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
1879
                                                data->broker());
1880

1881 1882
    AddReducer(data, &graph_reducer, &branch_condition_elimination);
    AddReducer(data, &graph_reducer, &dead_code_elimination);
1883 1884
    AddReducer(data, &graph_reducer, &redundancy_elimination);
    AddReducer(data, &graph_reducer, &load_elimination);
1885 1886
    AddReducer(data, &graph_reducer, &type_narrowing_reducer);
    AddReducer(data, &graph_reducer, &constant_folding_reducer);
1887
    AddReducer(data, &graph_reducer, &typed_optimization);
1888
    AddReducer(data, &graph_reducer, &checkpoint_elimination);
1889
    AddReducer(data, &graph_reducer, &common_reducer);
1890
    AddReducer(data, &graph_reducer, &value_numbering);
1891 1892 1893 1894

    // ConstantFoldingReducer and TypedOptimization access the heap.
    UnparkedScopeIfNeeded scope(data->broker());

1895
    graph_reducer.ReduceGraph();
1896 1897 1898
  }
};

1899
struct MemoryOptimizationPhase {
1900
  DECL_PIPELINE_PHASE_CONSTANTS(MemoryOptimization)
1901 1902

  void Run(PipelineData* data, Zone* temp_zone) {
1903 1904 1905 1906 1907 1908 1909
    // The memory optimizer requires the graphs to be trimmed, so trim now.
    GraphTrimmer trimmer(temp_zone, data->graph());
    NodeVector roots(temp_zone);
    data->jsgraph()->GetCachedNodes(&roots);
    trimmer.TrimGraph(roots.begin(), roots.end());

    // Optimize allocations and load/store operations.
1910
    MemoryOptimizer optimizer(
1911
        data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
1912
        data->info()->allocation_folding()
1913 1914
            ? MemoryLowering::AllocationFolding::kDoAllocationFolding
            : MemoryLowering::AllocationFolding::kDontAllocationFolding,
1915
        data->debug_name(), &data->info()->tick_counter());
1916 1917 1918 1919
    optimizer.Optimize();
  }
};

1920
struct LateOptimizationPhase {
1921
  DECL_PIPELINE_PHASE_CONSTANTS(LateOptimization)
1922 1923

  void Run(PipelineData* data, Zone* temp_zone) {
1924
    GraphReducer graph_reducer(temp_zone, data->graph(),
1925
                               &data->info()->tick_counter(), data->broker(),
1926
                               data->jsgraph()->Dead());
1927 1928
    BranchElimination branch_condition_elimination(&graph_reducer,
                                                   data->jsgraph(), temp_zone);
1929
    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1930
                                              data->common(), temp_zone);
1931
    ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1932
    MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1933
    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1934
                                         data->broker(), data->common(),
1935
                                         data->machine(), temp_zone);
1936
    JSGraphAssembler graph_assembler(data->jsgraph(), temp_zone);
1937
    SelectLowering select_lowering(&graph_assembler, data->graph());
1938
    AddReducer(data, &graph_reducer, &branch_condition_elimination);
1939 1940 1941
    AddReducer(data, &graph_reducer, &dead_code_elimination);
    AddReducer(data, &graph_reducer, &machine_reducer);
    AddReducer(data, &graph_reducer, &common_reducer);
1942
    AddReducer(data, &graph_reducer, &select_lowering);
1943
    AddReducer(data, &graph_reducer, &value_numbering);
1944 1945 1946 1947
    graph_reducer.ReduceGraph();
  }
};

1948
struct MachineOperatorOptimizationPhase {
1949
  DECL_PIPELINE_PHASE_CONSTANTS(MachineOperatorOptimization)
1950 1951 1952

  void Run(PipelineData* data, Zone* temp_zone) {
    GraphReducer graph_reducer(temp_zone, data->graph(),
1953
                               &data->info()->tick_counter(), data->broker(),
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
                               data->jsgraph()->Dead());
    ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
    MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());

    AddReducer(data, &graph_reducer, &machine_reducer);
    AddReducer(data, &graph_reducer, &value_numbering);
    graph_reducer.ReduceGraph();
  }
};

1964
struct DecompressionOptimizationPhase {
1965
  DECL_PIPELINE_PHASE_CONSTANTS(DecompressionOptimization)
1966 1967

  void Run(PipelineData* data, Zone* temp_zone) {
1968
    if (COMPRESS_POINTERS_BOOL) {
1969 1970
      DecompressionOptimizer decompression_optimizer(
          temp_zone, data->graph(), data->common(), data->machine());
1971 1972 1973 1974 1975
      decompression_optimizer.Reduce();
    }
  }
};

1976
struct ScheduledEffectControlLinearizationPhase {
1977
  DECL_PIPELINE_PHASE_CONSTANTS(ScheduledEffectControlLinearization)
1978 1979

  void Run(PipelineData* data, Zone* temp_zone) {
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
    MaskArrayIndexEnable mask_array_index =
        (data->info()->GetPoisoningMitigationLevel() !=
         PoisoningMitigationLevel::kDontPoison)
            ? MaskArrayIndexEnable::kMaskArrayIndex
            : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
    // Post-pass for wiring the control/effects
    // - connect allocating representation changes into the control&effect
    //   chains and lower them,
    // - get rid of the region markers,
    // - introduce effect phis and rewire effects to get SSA again.
    LinearizeEffectControl(data->jsgraph(), data->schedule(), temp_zone,
                           data->source_positions(), data->node_origins(),
1992 1993
                           mask_array_index, MaintainSchedule::kMaintain,
                           data->broker());
1994 1995 1996

    // TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
    Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
1997 1998 1999
    if (FLAG_turbo_verify) Scheduler::GenerateDominatorTree(data->schedule());
    TraceScheduleAndVerify(data->info(), data, data->schedule(),
                           "effect linearization schedule");
2000 2001
  }
};
2002

2003
struct ScheduledMachineLoweringPhase {
2004
  DECL_PIPELINE_PHASE_CONSTANTS(ScheduledMachineLowering)
2005 2006 2007 2008 2009 2010 2011 2012 2013

  void Run(PipelineData* data, Zone* temp_zone) {
    ScheduledMachineLowering machine_lowering(
        data->jsgraph(), data->schedule(), temp_zone, data->source_positions(),
        data->node_origins(), data->info()->GetPoisoningMitigationLevel());
    machine_lowering.Run();

    // TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
    Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
2014
    Scheduler::GenerateDominatorTree(data->schedule());
2015 2016
    TraceScheduleAndVerify(data->info(), data, data->schedule(),
                           "machine lowered schedule");
2017 2018 2019
  }
};

2020
struct CsaEarlyOptimizationPhase {
2021
  DECL_PIPELINE_PHASE_CONSTANTS(CSAEarlyOptimization)
2022 2023 2024

  void Run(PipelineData* data, Zone* temp_zone) {
    GraphReducer graph_reducer(temp_zone, data->graph(),
2025
                               &data->info()->tick_counter(), data->broker(),
2026
                               data->jsgraph()->Dead());
2027
    MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
2028 2029 2030 2031 2032 2033 2034 2035
    BranchElimination branch_condition_elimination(&graph_reducer,
                                                   data->jsgraph(), temp_zone);
    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                              data->common(), temp_zone);
    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                         data->broker(), data->common(),
                                         data->machine(), temp_zone);
    ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2036 2037
    CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
                                        temp_zone);
2038
    AddReducer(data, &graph_reducer, &machine_reducer);
2039 2040 2041 2042
    AddReducer(data, &graph_reducer, &branch_condition_elimination);
    AddReducer(data, &graph_reducer, &dead_code_elimination);
    AddReducer(data, &graph_reducer, &common_reducer);
    AddReducer(data, &graph_reducer, &value_numbering);
2043 2044 2045 2046 2047
    AddReducer(data, &graph_reducer, &load_elimination);
    graph_reducer.ReduceGraph();
  }
};

2048
struct CsaOptimizationPhase {
2049
  DECL_PIPELINE_PHASE_CONSTANTS(CSAOptimization)
2050 2051 2052

  void Run(PipelineData* data, Zone* temp_zone) {
    GraphReducer graph_reducer(temp_zone, data->graph(),
2053
                               &data->info()->tick_counter(), data->broker(),
2054 2055 2056 2057 2058 2059 2060 2061 2062
                               data->jsgraph()->Dead());
    BranchElimination branch_condition_elimination(&graph_reducer,
                                                   data->jsgraph(), temp_zone);
    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                              data->common(), temp_zone);
    MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                         data->broker(), data->common(),
                                         data->machine(), temp_zone);
2063
    ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2064 2065 2066 2067
    AddReducer(data, &graph_reducer, &branch_condition_elimination);
    AddReducer(data, &graph_reducer, &dead_code_elimination);
    AddReducer(data, &graph_reducer, &machine_reducer);
    AddReducer(data, &graph_reducer, &common_reducer);
2068
    AddReducer(data, &graph_reducer, &value_numbering);
2069 2070 2071 2072
    graph_reducer.ReduceGraph();
  }
};

2073
struct EarlyGraphTrimmingPhase {
2074 2075
  DECL_PIPELINE_PHASE_CONSTANTS(EarlyTrimming)

2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
  void Run(PipelineData* data, Zone* temp_zone) {
    GraphTrimmer trimmer(temp_zone, data->graph());
    NodeVector roots(temp_zone);
    data->jsgraph()->GetCachedNodes(&roots);
    trimmer.TrimGraph(roots.begin(), roots.end());
  }
};


struct LateGraphTrimmingPhase {
2086 2087
  DECL_PIPELINE_PHASE_CONSTANTS(LateGraphTrimming)

2088 2089 2090
  void Run(PipelineData* data, Zone* temp_zone) {
    GraphTrimmer trimmer(temp_zone, data->graph());
    NodeVector roots(temp_zone);
2091 2092 2093
    if (data->jsgraph()) {
      data->jsgraph()->GetCachedNodes(&roots);
    }
2094 2095 2096 2097 2098
    trimmer.TrimGraph(roots.begin(), roots.end());
  }
};


2099
struct ComputeSchedulePhase {
2100
  DECL_PIPELINE_PHASE_CONSTANTS(Scheduling)
2101 2102

  void Run(PipelineData* data, Zone* temp_zone) {
2103
    Schedule* schedule = Scheduler::ComputeSchedule(
2104
        temp_zone, data->graph(),
2105 2106
        data->info()->splitting() ? Scheduler::kSplitNodes
                                  : Scheduler::kNoFlags,
2107
        &data->info()->tick_counter(), data->profile_data());
2108 2109 2110 2111
    data->set_schedule(schedule);
  }
};

2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142
struct InstructionRangesAsJSON {
  const InstructionSequence* sequence;
  const ZoneVector<std::pair<int, int>>* instr_origins;
};

std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
  const int max = static_cast<int>(s.sequence->LastInstructionIndex());

  out << ", \"nodeIdToInstructionRange\": {";
  bool need_comma = false;
  for (size_t i = 0; i < s.instr_origins->size(); ++i) {
    std::pair<int, int> offset = (*s.instr_origins)[i];
    if (offset.first == -1) continue;
    const int first = max - offset.first + 1;
    const int second = max - offset.second + 1;
    if (need_comma) out << ", ";
    out << "\"" << i << "\": [" << first << ", " << second << "]";
    need_comma = true;
  }
  out << "}";
  out << ", \"blockIdtoInstructionRange\": {";
  need_comma = false;
  for (auto block : s.sequence->instruction_blocks()) {
    if (need_comma) out << ", ";
    out << "\"" << block->rpo_number() << "\": [" << block->code_start() << ", "
        << block->code_end() << "]";
    need_comma = true;
  }
  out << "}";
  return out;
}
2143 2144

struct InstructionSelectionPhase {
2145
  DECL_PIPELINE_PHASE_CONSTANTS(SelectInstructions)
2146 2147

  void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
2148 2149
    InstructionSelector selector(
        temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
2150
        data->schedule(), data->source_positions(), data->frame(),
2151
        data->info()->switch_jump_table()
2152 2153
            ? InstructionSelector::kEnableSwitchJumpTable
            : InstructionSelector::kDisableSwitchJumpTable,
2154
        &data->info()->tick_counter(), data->broker(),
2155
        data->address_of_max_unoptimized_frame_height(),
2156
        data->address_of_max_pushed_argument_count(),
2157
        data->info()->source_positions()
2158
            ? InstructionSelector::kAllSourcePositions
2159 2160 2161 2162 2163
            : InstructionSelector::kCallSourcePositions,
        InstructionSelector::SupportedFeatures(),
        FLAG_turbo_instruction_scheduling
            ? InstructionSelector::kEnableScheduling
            : InstructionSelector::kDisableScheduling,
2164 2165 2166
        data->roots_relative_addressing_enabled()
            ? InstructionSelector::kEnableRootsRelativeAddressing
            : InstructionSelector::kDisableRootsRelativeAddressing,
2167
        data->info()->GetPoisoningMitigationLevel(),
2168
        data->info()->trace_turbo_json()
2169 2170
            ? InstructionSelector::kEnableTraceTurboJson
            : InstructionSelector::kDisableTraceTurboJson);
2171 2172 2173
    if (!selector.SelectInstructions()) {
      data->set_compilation_failed();
    }
2174
    if (data->info()->trace_turbo_json()) {
2175 2176 2177 2178 2179 2180 2181
      TurboJsonFile json_of(data->info(), std::ios_base::app);
      json_of << "{\"name\":\"" << phase_name()
              << "\",\"type\":\"instructions\""
              << InstructionRangesAsJSON{data->sequence(),
                                         &selector.instr_origins()}
              << "},\n";
    }
2182 2183 2184 2185
  }
};


2186
struct MeetRegisterConstraintsPhase {
2187
  DECL_PIPELINE_PHASE_CONSTANTS(MeetRegisterConstraints)
2188
  void Run(PipelineData* data, Zone* temp_zone) {
2189
    ConstraintBuilder builder(data->top_tier_register_allocation_data());
2190
    builder.MeetRegisterConstraints();
2191 2192
  }
};
2193 2194


2195
struct ResolvePhisPhase {
2196
  DECL_PIPELINE_PHASE_CONSTANTS(ResolvePhis)
2197 2198

  void Run(PipelineData* data, Zone* temp_zone) {
2199
    ConstraintBuilder builder(data->top_tier_register_allocation_data());
2200
    builder.ResolvePhis();
2201 2202 2203 2204
  }
};


2205
struct BuildLiveRangesPhase {
2206
  DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRanges)
2207 2208

  void Run(PipelineData* data, Zone* temp_zone) {
2209 2210
    LiveRangeBuilder builder(data->top_tier_register_allocation_data(),
                             temp_zone);
2211
    builder.BuildLiveRanges();
2212 2213 2214
  }
};

2215
struct BuildBundlesPhase {
2216
  DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRangeBundles)
2217 2218

  void Run(PipelineData* data, Zone* temp_zone) {
2219
    BundleBuilder builder(data->top_tier_register_allocation_data());
2220 2221 2222
    builder.BuildBundles();
  }
};
2223

2224
template <typename RegAllocator>
2225
struct AllocateGeneralRegistersPhase {
2226
  DECL_PIPELINE_PHASE_CONSTANTS(AllocateGeneralRegisters)
2227 2228

  void Run(PipelineData* data, Zone* temp_zone) {
2229 2230
    RegAllocator allocator(data->top_tier_register_allocation_data(),
                           RegisterKind::kGeneral, temp_zone);
2231
    allocator.AllocateRegisters();
2232 2233 2234
  }
};

2235
template <typename RegAllocator>
2236
struct AllocateFPRegistersPhase {
2237
  DECL_PIPELINE_PHASE_CONSTANTS(AllocateFPRegisters)
2238 2239

  void Run(PipelineData* data, Zone* temp_zone) {
2240 2241
    RegAllocator allocator(data->top_tier_register_allocation_data(),
                           RegisterKind::kDouble, temp_zone);
2242
    allocator.AllocateRegisters();
2243 2244 2245
  }
};

2246
struct DecideSpillingModePhase {
2247
  DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
2248 2249

  void Run(PipelineData* data, Zone* temp_zone) {
2250
    OperandAssigner assigner(data->top_tier_register_allocation_data());
2251 2252 2253
    assigner.DecideSpillingMode();
  }
};
2254

2255
struct AssignSpillSlotsPhase {
2256
  DECL_PIPELINE_PHASE_CONSTANTS(AssignSpillSlots)
2257 2258

  void Run(PipelineData* data, Zone* temp_zone) {
2259
    OperandAssigner assigner(data->top_tier_register_allocation_data());
2260
    assigner.AssignSpillSlots();
2261 2262 2263 2264
  }
};


2265
struct CommitAssignmentPhase {
2266
  DECL_PIPELINE_PHASE_CONSTANTS(CommitAssignment)
2267 2268

  void Run(PipelineData* data, Zone* temp_zone) {
2269
    OperandAssigner assigner(data->top_tier_register_allocation_data());
2270
    assigner.CommitAssignment();
2271 2272 2273 2274
  }
};


2275
struct PopulateReferenceMapsPhase {
2276
  DECL_PIPELINE_PHASE_CONSTANTS(PopulatePointerMaps)
2277 2278

  void Run(PipelineData* data, Zone* temp_zone) {
2279
    ReferenceMapPopulator populator(data->top_tier_register_allocation_data());
2280
    populator.PopulateReferenceMaps();
2281 2282 2283 2284 2285
  }
};


struct ConnectRangesPhase {
2286
  DECL_PIPELINE_PHASE_CONSTANTS(ConnectRanges)
2287 2288

  void Run(PipelineData* data, Zone* temp_zone) {
2289
    LiveRangeConnector connector(data->top_tier_register_allocation_data());
2290
    connector.ConnectRanges(temp_zone);
2291 2292 2293 2294 2295
  }
};


struct ResolveControlFlowPhase {
2296
  DECL_PIPELINE_PHASE_CONSTANTS(ResolveControlFlow)
2297 2298

  void Run(PipelineData* data, Zone* temp_zone) {
2299
    LiveRangeConnector connector(data->top_tier_register_allocation_data());
2300
    connector.ResolveControlFlow(temp_zone);
2301 2302 2303
  }
};

2304 2305 2306 2307 2308 2309 2310 2311
struct MidTierRegisterOutputDefinitionPhase {
  DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)

  void Run(PipelineData* data, Zone* temp_zone) {
    DefineOutputs(data->mid_tier_register_allocator_data());
  }
};

2312 2313 2314 2315
struct MidTierRegisterAllocatorPhase {
  DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)

  void Run(PipelineData* data, Zone* temp_zone) {
2316
    AllocateRegisters(data->mid_tier_register_allocator_data());
2317 2318 2319
  }
};

2320 2321 2322 2323
struct MidTierSpillSlotAllocatorPhase {
  DECL_PIPELINE_PHASE_CONSTANTS(MidTierSpillSlotAllocator)

  void Run(PipelineData* data, Zone* temp_zone) {
2324 2325 2326 2327 2328 2329 2330 2331 2332
    AllocateSpillSlots(data->mid_tier_register_allocator_data());
  }
};

struct MidTierPopulateReferenceMapsPhase {
  DECL_PIPELINE_PHASE_CONSTANTS(MidTierPopulateReferenceMaps)

  void Run(PipelineData* data, Zone* temp_zone) {
    PopulateReferenceMaps(data->mid_tier_register_allocator_data());
2333 2334 2335
  }
};

2336
struct OptimizeMovesPhase {
2337
  DECL_PIPELINE_PHASE_CONSTANTS(OptimizeMoves)
2338 2339 2340 2341 2342 2343 2344

  void Run(PipelineData* data, Zone* temp_zone) {
    MoveOptimizer move_optimizer(temp_zone, data->sequence());
    move_optimizer.Run();
  }
};

2345
struct FrameElisionPhase {
2346
  DECL_PIPELINE_PHASE_CONSTANTS(FrameElision)
2347 2348 2349 2350 2351 2352

  void Run(PipelineData* data, Zone* temp_zone) {
    FrameElider(data->sequence()).Run();
  }
};

2353
struct JumpThreadingPhase {
2354
  DECL_PIPELINE_PHASE_CONSTANTS(JumpThreading)
2355

2356
  void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
2357
    ZoneVector<RpoNumber> result(temp_zone);
2358
    if (JumpThreading::ComputeForwarding(temp_zone, &result, data->sequence(),
2359
                                         frame_at_start)) {
2360
      JumpThreading::ApplyForwarding(temp_zone, result, data->sequence());
2361 2362 2363 2364
    }
  }
};

2365
struct AssembleCodePhase {
2366
  DECL_PIPELINE_PHASE_CONSTANTS(AssembleCode)
2367 2368 2369 2370 2371

  void Run(PipelineData* data, Zone* temp_zone) {
    data->code_generator()->AssembleCode();
  }
};
2372

2373
struct FinalizeCodePhase {
2374
  DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(FinalizeCode)
2375

2376 2377
  void Run(PipelineData* data, Zone* temp_zone) {
    data->set_code(data->code_generator()->FinalizeCode());
2378 2379 2380 2381 2382
  }
};


struct PrintGraphPhase {
2383
  DECL_PIPELINE_PHASE_CONSTANTS(PrintGraph)
2384 2385

  void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
2386
    OptimizedCompilationInfo* info = data->info();
2387
    Graph* graph = data->graph();
2388

2389
    if (info->trace_turbo_json()) {  // Print JSON.
2390
      UnparkedScopeIfNeeded scope(data->broker());
2391
      AllowHandleDereference allow_deref;
2392

2393
      TurboJsonFile json_of(info, std::ios_base::app);
danno's avatar
danno committed
2394
      json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
2395 2396
              << AsJSON(*graph, data->source_positions(), data->node_origins())
              << "},\n";
2397
    }
2398

2399
    if (info->trace_turbo_scheduled()) {
2400 2401 2402
      AccountingAllocator allocator;
      Schedule* schedule = data->schedule();
      if (schedule == nullptr) {
2403 2404 2405
        schedule = Scheduler::ComputeSchedule(
            temp_zone, data->graph(), Scheduler::kNoFlags,
            &info->tick_counter(), data->profile_data());
2406 2407
      }

2408
      UnparkedScopeIfNeeded scope(data->broker());
2409
      AllowHandleDereference allow_deref;
2410 2411 2412 2413
      CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
      tracing_scope.stream()
          << "-- Graph after " << phase << " -- " << std::endl
          << AsScheduledGraph(schedule);
2414
    } else if (info->trace_turbo_graph()) {  // Simple textual RPO.
2415
      UnparkedScopeIfNeeded scope(data->broker());
2416
      AllowHandleDereference allow_deref;
2417 2418 2419 2420
      CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
      tracing_scope.stream()
          << "-- Graph after " << phase << " -- " << std::endl
          << AsRPO(*graph);
2421
    }
2422
  }
2423
};
2424 2425


2426
struct VerifyGraphPhase {
2427
  DECL_PIPELINE_PHASE_CONSTANTS(VerifyGraph)
2428

2429 2430
  void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
           bool values_only = false) {
2431 2432
    Verifier::CodeType code_type;
    switch (data->info()->code_kind()) {
2433 2434 2435 2436 2437
      case CodeKind::WASM_FUNCTION:
      case CodeKind::WASM_TO_CAPI_FUNCTION:
      case CodeKind::WASM_TO_JS_FUNCTION:
      case CodeKind::JS_TO_WASM_FUNCTION:
      case CodeKind::C_WASM_ENTRY:
2438 2439 2440 2441 2442
        code_type = Verifier::kWasm;
        break;
      default:
        code_type = Verifier::kDefault;
    }
2443
    Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
2444 2445
                  values_only ? Verifier::kValuesOnly : Verifier::kAll,
                  code_type);
2446 2447 2448
  }
};

2449 2450 2451 2452
#undef DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS
#undef DECL_PIPELINE_PHASE_CONSTANTS
#undef DECL_PIPELINE_PHASE_CONSTANTS_HELPER

2453
void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
2454
  if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
2455 2456
    Run<PrintGraphPhase>(phase);
  }
2457
  if (FLAG_turbo_verify) {
2458
    Run<VerifyGraphPhase>(untyped);
2459 2460 2461
  }
}

2462
void PipelineImpl::Serialize() {
2463
  PipelineData* data = this->data_;
2464

2465
  data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
2466

2467
  if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
2468 2469 2470 2471 2472
    CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
    tracing_scope.stream()
        << "---------------------------------------------------\n"
        << "Begin compiling method " << info()->GetDebugName().get()
        << " using TurboFan" << std::endl;
2473
  }
2474
  if (info()->trace_turbo_json()) {
2475 2476
    TurboCfgFile tcf(isolate());
    tcf << AsC1VCompilation(info());
2477 2478
  }

2479
  data->source_positions()->AddDecorator();
2480
  if (data->info()->trace_turbo_json()) {
2481 2482
    data->node_origins()->AddDecorator();
  }
2483

2484
  data->broker()->SetTargetNativeContextRef(data->native_context());
2485
  if (data->broker()->is_concurrent_inlining()) {
2486
    Run<HeapBrokerInitializationPhase>();
2487
    Run<SerializationPhase>();
2488
    data->broker()->StopSerializing();
2489
  }
2490 2491 2492 2493 2494
  data->EndPhaseKind();
}

bool PipelineImpl::CreateGraph() {
  PipelineData* data = this->data_;
2495
  UnparkedScopeIfNeeded unparked_scope(data->broker());
2496 2497

  data->BeginPhaseKind("V8.TFGraphCreation");
2498

2499 2500 2501
  Run<GraphBuilderPhase>();
  RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);

2502
  // Perform function context specialization and inlining (if enabled).
2503
  Run<InliningPhase>();
2504
  RunPrintAndVerify(InliningPhase::phase_name(), true);
2505

2506
  // Remove dead->live edges from the graph.
2507
  Run<EarlyGraphTrimmingPhase>();
2508
  RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
2509

2510
  // Determine the Typer operation flags.
2511
  {
2512 2513 2514
    SharedFunctionInfoRef shared_info(data->broker(), info()->shared_info());
    if (is_sloppy(shared_info.language_mode()) &&
        shared_info.IsUserJavaScript()) {
2515
      // Sloppy mode functions always have an Object for this.
2516
      data->AddTyperFlag(Typer::kThisIsReceiver);
2517
    }
2518
    if (IsClassConstructor(shared_info.kind())) {
2519
      // Class constructors cannot be [[Call]]ed.
2520
      data->AddTyperFlag(Typer::kNewTargetIsReceiver);
2521
    }
2522
  }
2523

2524 2525
  // Run the type-sensitive lowerings and optimizations on the graph.
  {
2526
    if (!data->broker()->is_concurrent_inlining()) {
2527
      Run<HeapBrokerInitializationPhase>();
2528
      Run<CopyMetadataForConcurrentCompilePhase>();
2529
      data->broker()->StopSerializing();
2530
    }
2531 2532
  }

2533 2534 2535 2536 2537 2538 2539 2540
  data->EndPhaseKind();

  return true;
}

bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
  PipelineData* data = this->data_;

2541
  data->BeginPhaseKind("V8.TFLowering");
2542

2543 2544 2545 2546
  // Type the graph and keep the Typer running such that new nodes get
  // automatically typed when they are created.
  Run<TyperPhase>(data->CreateTyper());
  RunPrintAndVerify(TyperPhase::phase_name());
2547

2548 2549
  Run<TypedLoweringPhase>();
  RunPrintAndVerify(TypedLoweringPhase::phase_name());
2550

2551
  if (data->info()->loop_peeling()) {
2552
    Run<LoopPeelingPhase>();
2553
    RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
2554 2555
  } else {
    Run<LoopExitEliminationPhase>();
2556
    RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
2557 2558
  }

2559 2560
  if (FLAG_turbo_load_elimination) {
    Run<LoadEliminationPhase>();
2561
    RunPrintAndVerify(LoadEliminationPhase::phase_name());
2562
  }
2563
  data->DeleteTyper();
2564

2565 2566 2567
  if (FLAG_turbo_escape) {
    Run<EscapeAnalysisPhase>();
    if (data->compilation_failed()) {
2568 2569
      info()->AbortOptimization(
          BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
2570 2571
      data->EndPhaseKind();
      return false;
2572
    }
2573
    RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
2574 2575
  }

2576 2577 2578 2579 2580
  if (FLAG_assert_types) {
    Run<TypeAssertionsPhase>();
    RunPrintAndVerify(TypeAssertionsPhase::phase_name());
  }

2581 2582 2583
  // Perform simplified lowering. This has to run w/o the Typer decorator,
  // because we cannot compute meaningful types anyways, and the computed types
  // might even conflict with the representation/truncation logic.
2584
  Run<SimplifiedLoweringPhase>(linkage);
2585
  RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
2586

Georg Neis's avatar
Georg Neis committed
2587 2588 2589 2590 2591 2592 2593
  // From now on it is invalid to look at types on the nodes, because the types
  // on the nodes might not make sense after representation selection due to the
  // way we handle truncations; if we'd want to look at types afterwards we'd
  // essentially need to re-type (large portions of) the graph.

  // In order to catch bugs related to type access after this point, we now
  // remove the types from the nodes (currently only in Debug builds).
2594 2595
#ifdef DEBUG
  Run<UntyperPhase>();
2596
  RunPrintAndVerify(UntyperPhase::phase_name(), true);
2597
#endif
2598

2599 2600
  // Run generic lowering pass.
  Run<GenericLoweringPhase>();
2601
  RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
2602

2603
  data->BeginPhaseKind("V8.TFBlockBuilding");
2604

2605 2606
  data->InitializeFrameData(linkage->GetIncomingDescriptor());

2607 2608
  // Run early optimization pass.
  Run<EarlyOptimizationPhase>();
2609
  RunPrintAndVerify(EarlyOptimizationPhase::phase_name(), true);
2610

2611
  Run<EffectControlLinearizationPhase>();
2612
  RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
2613

2614 2615
  if (FLAG_turbo_store_elimination) {
    Run<StoreStoreEliminationPhase>();
2616
    RunPrintAndVerify(StoreStoreEliminationPhase::phase_name(), true);
2617 2618
  }

2619 2620 2621
  // Optimize control flow.
  if (FLAG_turbo_cf_optimization) {
    Run<ControlFlowOptimizationPhase>();
2622
    RunPrintAndVerify(ControlFlowOptimizationPhase::phase_name(), true);
2623
  }
2624

2625 2626 2627
  Run<LateOptimizationPhase>();
  RunPrintAndVerify(LateOptimizationPhase::phase_name(), true);

2628 2629
  // Optimize memory access and allocation operations.
  Run<MemoryOptimizationPhase>();
2630
  RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2631

2632 2633 2634 2635 2636 2637
  // Run value numbering and machine operator reducer to optimize load/store
  // address computation (in particular, reuse the address computation whenever
  // possible).
  Run<MachineOperatorOptimizationPhase>();
  RunPrintAndVerify(MachineOperatorOptimizationPhase::phase_name(), true);

2638 2639 2640
  Run<DecompressionOptimizationPhase>();
  RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);

2641
  data->source_positions()->RemoveDecorator();
2642
  if (data->info()->trace_turbo_json()) {
2643 2644
    data->node_origins()->RemoveDecorator();
  }
2645

2646 2647 2648
  ComputeScheduledGraph();

  return SelectInstructions(linkage);
2649
}
2650

2651 2652 2653 2654 2655 2656 2657 2658 2659
bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
  PipelineData* data = this->data_;

  data->BeginPhaseKind("V8.TFLowering");

  // Type the graph and keep the Typer running such that new nodes get
  // automatically typed when they are created.
  Run<TyperPhase>(data->CreateTyper());
  RunPrintAndVerify(TyperPhase::phase_name());
2660

2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678
  Run<TypedLoweringPhase>();
  RunPrintAndVerify(TypedLoweringPhase::phase_name());

  // TODO(9684): Consider rolling this into the preceeding phase or not creating
  // LoopExit nodes at all.
  Run<LoopExitEliminationPhase>();
  RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);

  data->DeleteTyper();

  if (FLAG_assert_types) {
    Run<TypeAssertionsPhase>();
    RunPrintAndVerify(TypeAssertionsPhase::phase_name());
  }

  // Perform simplified lowering. This has to run w/o the Typer decorator,
  // because we cannot compute meaningful types anyways, and the computed types
  // might even conflict with the representation/truncation logic.
2679
  Run<SimplifiedLoweringPhase>(linkage);
2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699
  RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);

  // From now on it is invalid to look at types on the nodes, because the types
  // on the nodes might not make sense after representation selection due to the
  // way we handle truncations; if we'd want to look at types afterwards we'd
  // essentially need to re-type (large portions of) the graph.

  // In order to catch bugs related to type access after this point, we now
  // remove the types from the nodes (currently only in Debug builds).
#ifdef DEBUG
  Run<UntyperPhase>();
  RunPrintAndVerify(UntyperPhase::phase_name(), true);
#endif

  // Run generic lowering pass.
  Run<GenericLoweringPhase>();
  RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);

  data->BeginPhaseKind("V8.TFBlockBuilding");

2700 2701
  data->InitializeFrameData(linkage->GetIncomingDescriptor());

2702 2703 2704 2705 2706
  ComputeScheduledGraph();

  Run<ScheduledEffectControlLinearizationPhase>();
  RunPrintAndVerify(ScheduledEffectControlLinearizationPhase::phase_name(),
                    true);
2707

2708 2709
  Run<ScheduledMachineLoweringPhase>();
  RunPrintAndVerify(ScheduledMachineLoweringPhase::phase_name(), true);
2710

2711 2712 2713 2714 2715
  // The DecompressionOptimizationPhase updates node's operations but does not
  // otherwise rewrite the graph, thus it is safe to run on a scheduled graph.
  Run<DecompressionOptimizationPhase>();
  RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);

2716
  data->source_positions()->RemoveDecorator();
2717
  if (data->info()->trace_turbo_json()) {
2718 2719 2720 2721 2722 2723
    data->node_origins()->RemoveDecorator();
  }

  return SelectInstructions(linkage);
}

2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
namespace {

// Compute a hash of the given graph, in a way that should provide the same
// result in multiple runs of mksnapshot, meaning the hash cannot depend on any
// external pointer values or uncompressed heap constants. This hash can be used
// to reject profiling data if the builtin's current code doesn't match the
// version that was profiled. Hash collisions are not catastrophic; in the worst
// case, we just defer some blocks that ideally shouldn't be deferred. The
// result value is in the valid Smi range.
int HashGraphForPGO(Graph* graph) {
  AccountingAllocator allocator;
  Zone local_zone(&allocator, ZONE_NAME);

  constexpr NodeId kUnassigned = static_cast<NodeId>(-1);

  constexpr byte kUnvisited = 0;
  constexpr byte kOnStack = 1;
  constexpr byte kVisited = 2;

  // Do a depth-first post-order traversal of the graph. For every node, hash:
  //
  //   - the node's traversal number
  //   - the opcode
  //   - the number of inputs
  //   - each input node's traversal number
  //
  // What's a traversal number? We can't use node IDs because they're not stable
  // build-to-build, so we assign a new number for each node as it is visited.

  ZoneVector<byte> state(graph->NodeCount(), kUnvisited, &local_zone);
  ZoneVector<NodeId> traversal_numbers(graph->NodeCount(), kUnassigned,
                                       &local_zone);
  ZoneStack<Node*> stack(&local_zone);

  NodeId visited_count = 0;
  size_t hash = 0;

  stack.push(graph->end());
  state[graph->end()->id()] = kOnStack;
  traversal_numbers[graph->end()->id()] = visited_count++;
  while (!stack.empty()) {
    Node* n = stack.top();
    bool pop = true;
    for (Node* const i : n->inputs()) {
      if (state[i->id()] == kUnvisited) {
        state[i->id()] = kOnStack;
        traversal_numbers[i->id()] = visited_count++;
        stack.push(i);
        pop = false;
        break;
      }
    }
    if (pop) {
      state[n->id()] = kVisited;
      stack.pop();
      hash = base::hash_combine(hash, traversal_numbers[n->id()], n->opcode(),
                                n->InputCount());
      for (Node* const i : n->inputs()) {
        DCHECK(traversal_numbers[i->id()] != kUnassigned);
        hash = base::hash_combine(hash, traversal_numbers[i->id()]);
      }
    }
  }
  return Smi(IntToSmi(static_cast<int>(hash))).value();
}

}  // namespace

2792
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
2793
    Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
2794
    JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
2795
    const char* debug_name, int32_t builtin_index,
2796 2797
    PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
    const ProfileDataFromFile* profile_data) {
2798
  OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
2799
  info.set_builtin_index(builtin_index);
2800

2801 2802
  if (poisoning_level != PoisoningMitigationLevel::kDontPoison) {
    info.SetPoisoningMitigationLevel(poisoning_level);
2803 2804
  }

2805
  // Construct a pipeline for scheduling and code generation.
2806
  ZoneStats zone_stats(isolate->allocator());
2807
  NodeOriginTable node_origins(graph);
2808
  JumpOptimizationInfo jump_opt;
2809 2810 2811
  bool should_optimize_jumps = isolate->serializer_enabled() &&
                               FLAG_turbo_rewrite_far_jumps &&
                               !FLAG_turbo_profiling;
2812
  PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
2813
                    jsgraph, nullptr, source_positions, &node_origins,
2814 2815
                    should_optimize_jumps ? &jump_opt : nullptr, options,
                    profile_data);
2816 2817 2818
  PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
  RuntimeCallTimerScope timer_scope(isolate,
                                    RuntimeCallCounterId::kOptimizeCode);
2819
  data.set_verify_graph(FLAG_verify_csa);
2820
  std::unique_ptr<PipelineStatistics> pipeline_statistics;
2821
  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2822 2823
    pipeline_statistics.reset(new PipelineStatistics(
        &info, isolate->GetTurboStatistics(), &zone_stats));
2824
    pipeline_statistics->BeginPhaseKind("V8.TFStubCodegen");
2825
  }
2826

2827
  PipelineImpl pipeline(&data);
2828

2829
  if (info.trace_turbo_json() || info.trace_turbo_graph()) {
2830 2831 2832 2833
    CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
    tracing_scope.stream()
        << "---------------------------------------------------\n"
        << "Begin compiling " << debug_name << " using TurboFan" << std::endl;
2834
    if (info.trace_turbo_json()) {
2835
      TurboJsonFile json_of(&info, std::ios_base::trunc);
2836 2837 2838 2839 2840
      json_of << "{\"function\" : ";
      JsonPrintFunctionSource(json_of, -1, info.GetDebugName(),
                              Handle<Script>(), isolate,
                              Handle<SharedFunctionInfo>());
      json_of << ",\n\"phases\":[";
2841
    }
2842
    pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
2843 2844
  }

2845 2846 2847
  pipeline.Run<CsaEarlyOptimizationPhase>();
  pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);

2848 2849 2850 2851
  // Optimize memory access and allocation operations.
  pipeline.Run<MemoryOptimizationPhase>();
  pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);

2852 2853 2854
  pipeline.Run<CsaOptimizationPhase>();
  pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);

2855 2856 2857 2858
  pipeline.Run<DecompressionOptimizationPhase>();
  pipeline.RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(),
                             true);

2859
  pipeline.Run<VerifyGraphPhase>(true);
2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872

  int graph_hash_before_scheduling = 0;
  if (FLAG_turbo_profiling || profile_data != nullptr) {
    graph_hash_before_scheduling = HashGraphForPGO(data.graph());
  }

  if (profile_data != nullptr &&
      profile_data->hash() != graph_hash_before_scheduling) {
    PrintF("Rejected profile data for %s due to function change\n", debug_name);
    profile_data = nullptr;
    data.set_profile_data(profile_data);
  }

2873
  pipeline.ComputeScheduledGraph();
2874
  DCHECK_NOT_NULL(data.schedule());
2875

2876 2877 2878
  // First run code generation on a copy of the pipeline, in order to be able to
  // repeat it for jump optimization. The first run has to happen on a temporary
  // pipeline to avoid deletion of zones on the main pipeline.
2879
  PipelineData second_data(&zone_stats, &info, isolate, isolate->allocator(),
2880
                           data.graph(), data.jsgraph(), data.schedule(),
2881
                           data.source_positions(), data.node_origins(),
2882 2883
                           data.jump_optimization_info(), options,
                           profile_data);
2884 2885
  PipelineJobScope second_scope(&second_data,
                                isolate->counters()->runtime_call_stats());
2886 2887
  second_data.set_verify_graph(FLAG_verify_csa);
  PipelineImpl second_pipeline(&second_data);
2888
  second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
2889

2890 2891 2892 2893
  if (FLAG_turbo_profiling) {
    info.profiler_data()->SetHash(graph_hash_before_scheduling);
  }

2894 2895
  if (jump_opt.is_optimizable()) {
    jump_opt.set_optimizing();
2896
    return pipeline.GenerateCode(call_descriptor);
2897
  } else {
2898
    return second_pipeline.FinalizeCode();
2899
  }
2900 2901
}

2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
struct BlockStartsAsJSON {
  const ZoneVector<int>* block_starts;
};

std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
  out << ", \"blockIdToOffset\": {";
  bool need_comma = false;
  for (size_t i = 0; i < s.block_starts->size(); ++i) {
    if (need_comma) out << ", ";
    int offset = (*s.block_starts)[i];
    out << "\"" << i << "\":" << offset;
    need_comma = true;
  }
  out << "},";
  return out;
}

2919
// static
2920
wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
2921
    wasm::WasmEngine* wasm_engine, CallDescriptor* call_descriptor,
2922 2923
    MachineGraph* mcgraph, CodeKind kind, int wasm_kind, const char* debug_name,
    const AssemblerOptions& options, SourcePositionTable* source_positions) {
2924 2925 2926 2927
  Graph* graph = mcgraph->graph();
  OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
  // Construct a pipeline for scheduling and code generation.
  ZoneStats zone_stats(wasm_engine->allocator());
2928
  NodeOriginTable* node_positions = graph->zone()->New<NodeOriginTable>(graph);
2929 2930 2931 2932 2933
  // {instruction_buffer} must live longer than {PipelineData}, since
  // {PipelineData} will reference the {instruction_buffer} via the
  // {AssemblerBuffer} of the {Assembler} contained in the {CodeGenerator}.
  std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
      wasm::WasmInstructionBuffer::New();
2934
  PipelineData data(&zone_stats, wasm_engine, &info, mcgraph, nullptr,
2935
                    source_positions, node_positions, options);
2936 2937 2938 2939
  std::unique_ptr<PipelineStatistics> pipeline_statistics;
  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
    pipeline_statistics.reset(new PipelineStatistics(
        &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats));
2940
    pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
2941 2942 2943 2944
  }

  PipelineImpl pipeline(&data);

2945
  if (info.trace_turbo_json() || info.trace_turbo_graph()) {
2946 2947 2948 2949 2950
    CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
    tracing_scope.stream()
        << "---------------------------------------------------\n"
        << "Begin compiling method " << info.GetDebugName().get()
        << " using TurboFan" << std::endl;
2951 2952
  }

2953
  if (info.trace_turbo_graph()) {  // Simple textual RPO.
2954
    StdoutStream{} << "-- wasm stub " << CodeKindToString(kind) << " graph -- "
2955 2956 2957 2958
                   << std::endl
                   << AsRPO(*graph);
  }

2959
  if (info.trace_turbo_json()) {
2960 2961 2962 2963
    TurboJsonFile json_of(&info, std::ios_base::trunc);
    json_of << "{\"function\":\"" << info.GetDebugName().get()
            << "\", \"source\":\"\",\n\"phases\":[";
  }
2964

2965
  pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true);
2966 2967 2968
  pipeline.ComputeScheduledGraph();

  Linkage linkage(call_descriptor);
2969 2970
  CHECK(pipeline.SelectInstructions(&linkage));
  pipeline.AssembleCode(&linkage, instruction_buffer->CreateView());
2971 2972

  CodeGenerator* code_generator = pipeline.code_generator();
2973
  wasm::WasmCompilationResult result;
2974
  code_generator->tasm()->GetCode(
2975
      nullptr, &result.code_desc, code_generator->safepoint_table_builder(),
2976
      static_cast<int>(code_generator->GetHandlerTableOffset()));
2977 2978
  result.instr_buffer = instruction_buffer->ReleaseBuffer();
  result.source_positions = code_generator->GetSourcePositionTable();
2979 2980
  result.protected_instructions_data =
      code_generator->GetProtectedInstructionsData();
2981 2982
  result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
  result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
2983
  result.result_tier = wasm::ExecutionTier::kTurbofan;
2984

2985
  DCHECK(result.succeeded());
2986

2987
  if (info.trace_turbo_json()) {
2988
    TurboJsonFile json_of(&info, std::ios_base::app);
2989 2990 2991
    json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
            << BlockStartsAsJSON{&code_generator->block_starts()}
            << "\"data\":\"";
2992 2993 2994
#ifdef ENABLE_DISASSEMBLER
    std::stringstream disassembler_stream;
    Disassembler::Decode(
2995 2996 2997
        nullptr, &disassembler_stream, result.code_desc.buffer,
        result.code_desc.buffer + result.code_desc.safepoint_table_offset,
        CodeReference(&result.code_desc));
2998 2999 3000 3001 3002 3003 3004 3005
    for (auto const c : disassembler_stream.str()) {
      json_of << AsEscapedUC16ForJSON(c);
    }
#endif  // ENABLE_DISASSEMBLER
    json_of << "\"}\n]";
    json_of << "\n}";
  }

3006
  if (info.trace_turbo_json() || info.trace_turbo_graph()) {
3007 3008 3009 3010 3011
    CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
    tracing_scope.stream()
        << "---------------------------------------------------\n"
        << "Finished compiling method " << info.GetDebugName().get()
        << " using TurboFan" << std::endl;
3012 3013
  }

3014
  return result;
3015 3016
}

3017
// static
3018
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
3019
    OptimizedCompilationInfo* info, Isolate* isolate,
3020
    std::unique_ptr<JSHeapBroker>* out_broker) {
3021
  ZoneStats zone_stats(isolate->allocator());
3022
  std::unique_ptr<PipelineStatistics> pipeline_statistics(
3023 3024
      CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
                               &zone_stats));
3025 3026

  PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get(),
3027
                    i::FLAG_concurrent_inlining);
3028
  PipelineImpl pipeline(&data);
3029 3030 3031

  Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));

3032
  {
3033 3034 3035 3036 3037
    CompilationHandleScope compilation_scope(isolate, info);
    CanonicalHandleScope canonical(isolate, info);
    info->ReopenHandlesInNewHandleScope(isolate);
    pipeline.Serialize();
    // Emulating the proper pipeline, we call CreateGraph on different places
3038
    // (i.e before or after creating a LocalIsolateScope) depending on
3039 3040 3041 3042 3043 3044 3045
    // is_concurrent_inlining.
    if (!data.broker()->is_concurrent_inlining()) {
      if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
    }
  }

  {
3046 3047
    LocalIsolateScope local_isolate_scope(data.broker(), info,
                                          isolate->main_thread_local_isolate());
3048 3049 3050
    if (data.broker()->is_concurrent_inlining()) {
      if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
    }
3051
    // We selectively Unpark inside OptimizeGraph.
3052
    if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
3053 3054

    pipeline.AssembleCode(&linkage);
3055
  }
3056 3057 3058 3059 3060 3061 3062 3063 3064 3065

  const bool will_retire_broker = out_broker == nullptr;
  if (!will_retire_broker) {
    // If the broker is going to be kept alive, pass the persistent and the
    // canonical handles containers back to the JSHeapBroker since it will
    // outlive the OptimizedCompilationInfo.
    data.broker()->SetPersistentAndCopyCanonicalHandlesForTesting(
        info->DetachPersistentHandles(), info->DetachCanonicalHandles());
  }

3066
  Handle<Code> code;
3067
  if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) &&
3068
      pipeline.CommitDependencies(code)) {
3069
    if (!will_retire_broker) *out_broker = data.ReleaseBroker();
3070 3071 3072
    return code;
  }
  return MaybeHandle<Code>();
3073
}
3074

3075
// static
3076
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
3077
    OptimizedCompilationInfo* info, Isolate* isolate,
3078
    CallDescriptor* call_descriptor, Graph* graph,
3079
    const AssemblerOptions& options, Schedule* schedule) {
3080
  // Construct a pipeline for scheduling and code generation.
3081
  ZoneStats zone_stats(isolate->allocator());
3082
  NodeOriginTable* node_positions = info->zone()->New<NodeOriginTable>(graph);
3083
  PipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph,
3084
                    nullptr, schedule, nullptr, node_positions, nullptr,
3085
                    options, nullptr);
3086
  std::unique_ptr<PipelineStatistics> pipeline_statistics;
3087
  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
3088 3089
    pipeline_statistics.reset(new PipelineStatistics(
        info, isolate->GetTurboStatistics(), &zone_stats));
3090
    pipeline_statistics->BeginPhaseKind("V8.TFTestCodegen");
3091 3092
  }

3093
  PipelineImpl pipeline(&data);
3094

3095
  if (info->trace_turbo_json()) {
3096 3097 3098
    TurboJsonFile json_of(info, std::ios_base::trunc);
    json_of << "{\"function\":\"" << info->GetDebugName().get()
            << "\", \"source\":\"\",\n\"phases\":[";
3099
  }
3100
  // TODO(rossberg): Should this really be untyped?
3101
  pipeline.RunPrintAndVerify("V8.TFMachineCode", true);
3102

3103 3104 3105 3106 3107
  // Ensure we have a schedule.
  if (data.schedule() == nullptr) {
    pipeline.ComputeScheduledGraph();
  }

3108 3109 3110 3111 3112 3113
  Handle<Code> code;
  if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
      pipeline.CommitDependencies(code)) {
    return code;
  }
  return MaybeHandle<Code>();
3114 3115
}

3116
// static
3117
std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
3118 3119
    Isolate* isolate, Handle<JSFunction> function, CodeKind code_kind,
    bool has_script, BailoutId osr_offset, JavaScriptFrame* osr_frame) {
3120 3121
  Handle<SharedFunctionInfo> shared =
      handle(function->shared(), function->GetIsolate());
3122 3123
  return std::make_unique<PipelineCompilationJob>(
      isolate, shared, function, osr_offset, osr_frame, code_kind);
3124
}
3125

3126
// static
3127
void Pipeline::GenerateCodeForWasmFunction(
3128
    OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
3129
    MachineGraph* mcgraph, CallDescriptor* call_descriptor,
3130
    SourcePositionTable* source_positions, NodeOriginTable* node_origins,
3131
    wasm::FunctionBody function_body, const wasm::WasmModule* module,
3132
    int function_index) {
3133 3134
  ZoneStats zone_stats(wasm_engine->allocator());
  std::unique_ptr<PipelineStatistics> pipeline_statistics(
3135 3136 3137 3138 3139 3140 3141
      CreatePipelineStatistics(wasm_engine, function_body, module, info,
                               &zone_stats));
  // {instruction_buffer} must live longer than {PipelineData}, since
  // {PipelineData} will reference the {instruction_buffer} via the
  // {AssemblerBuffer} of the {Assembler} contained in the {CodeGenerator}.
  std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
      wasm::WasmInstructionBuffer::New();
3142 3143
  PipelineData data(&zone_stats, wasm_engine, info, mcgraph,
                    pipeline_statistics.get(), source_positions, node_origins,
3144
                    WasmAssemblerOptions());
3145 3146 3147

  PipelineImpl pipeline(&data);

3148
  if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
3149 3150 3151 3152 3153
    CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
    tracing_scope.stream()
        << "---------------------------------------------------\n"
        << "Begin compiling method " << data.info()->GetDebugName().get()
        << " using TurboFan" << std::endl;
3154 3155
  }

3156
  pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
3157

3158
  data.BeginPhaseKind("V8.WasmOptimization");
3159
  const bool is_asm_js = is_asmjs_module(module);
3160
  if (FLAG_turbo_splitting && !is_asm_js) {
3161
    data.info()->set_splitting();
3162 3163
  }
  if (FLAG_wasm_opt || is_asm_js) {
3164 3165
    PipelineRunScope scope(&data, "V8.WasmFullOptimization",
                           RuntimeCallCounterId::kOptimizeWasmFullOptimization);
3166
    GraphReducer graph_reducer(scope.zone(), data.graph(),
3167
                               &data.info()->tick_counter(), data.broker(),
3168 3169 3170 3171 3172
                               data.mcgraph()->Dead());
    DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
                                              data.common(), scope.zone());
    ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
    const bool allow_signalling_nan = is_asm_js;
3173
    MachineOperatorReducer machine_reducer(&graph_reducer, data.mcgraph(),
3174 3175 3176 3177 3178 3179 3180 3181 3182 3183
                                           allow_signalling_nan);
    CommonOperatorReducer common_reducer(&graph_reducer, data.graph(),
                                         data.broker(), data.common(),
                                         data.machine(), scope.zone());
    AddReducer(&data, &graph_reducer, &dead_code_elimination);
    AddReducer(&data, &graph_reducer, &machine_reducer);
    AddReducer(&data, &graph_reducer, &common_reducer);
    AddReducer(&data, &graph_reducer, &value_numbering);
    graph_reducer.ReduceGraph();
  } else {
3184 3185
    PipelineRunScope scope(&data, "V8.OptimizeWasmBaseOptimization",
                           RuntimeCallCounterId::kOptimizeWasmBaseOptimization);
3186
    GraphReducer graph_reducer(scope.zone(), data.graph(),
3187
                               &data.info()->tick_counter(), data.broker(),
3188 3189 3190 3191 3192
                               data.mcgraph()->Dead());
    ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
    AddReducer(&data, &graph_reducer, &value_numbering);
    graph_reducer.ReduceGraph();
  }
3193
  pipeline.RunPrintAndVerify("V8.WasmOptimization", true);
3194 3195 3196 3197 3198 3199 3200 3201

  if (data.node_origins()) {
    data.node_origins()->RemoveDecorator();
  }

  pipeline.ComputeScheduledGraph();

  Linkage linkage(call_descriptor);
3202 3203
  if (!pipeline.SelectInstructions(&linkage)) return;
  pipeline.AssembleCode(&linkage, instruction_buffer->CreateView());
3204

3205
  auto result = std::make_unique<wasm::WasmCompilationResult>();
3206
  CodeGenerator* code_generator = pipeline.code_generator();
3207 3208 3209
  code_generator->tasm()->GetCode(
      nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
      static_cast<int>(code_generator->GetHandlerTableOffset()));
3210

3211 3212
  result->instr_buffer = instruction_buffer->ReleaseBuffer();
  result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
3213
  result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
3214
  result->source_positions = code_generator->GetSourcePositionTable();
3215 3216
  result->protected_instructions_data =
      code_generator->GetProtectedInstructionsData();
3217
  result->result_tier = wasm::ExecutionTier::kTurbofan;
3218

3219
  if (data.info()->trace_turbo_json()) {
3220
    TurboJsonFile json_of(data.info(), std::ios_base::app);
3221 3222 3223
    json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
            << BlockStartsAsJSON{&code_generator->block_starts()}
            << "\"data\":\"";
3224 3225 3226
#ifdef ENABLE_DISASSEMBLER
    std::stringstream disassembler_stream;
    Disassembler::Decode(
3227
        nullptr, &disassembler_stream, result->code_desc.buffer,
3228
        result->code_desc.buffer + result->code_desc.safepoint_table_offset,
3229
        CodeReference(&result->code_desc));
3230 3231 3232 3233 3234 3235 3236 3237
    for (auto const c : disassembler_stream.str()) {
      json_of << AsEscapedUC16ForJSON(c);
    }
#endif  // ENABLE_DISASSEMBLER
    json_of << "\"}\n]";
    json_of << "\n}";
  }

3238
  if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
3239 3240 3241 3242 3243
    CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
    tracing_scope.stream()
        << "---------------------------------------------------\n"
        << "Finished compiling method " << data.info()->GetDebugName().get()
        << " using TurboFan" << std::endl;
3244 3245
  }

3246 3247
  DCHECK(result->succeeded());
  info->SetWasmCompilationResult(std::move(result));
3248
}
3249

3250 3251
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
                                           InstructionSequence* sequence,
3252
                                           bool use_mid_tier_register_allocator,
3253
                                           bool run_verifier) {
3254
  OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
3255
                                CodeKind::FOR_TESTING);
3256
  ZoneStats zone_stats(sequence->isolate()->allocator());
3257
  PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
3258
  data.InitializeFrameData(nullptr);
3259 3260 3261 3262 3263 3264 3265

  if (info.trace_turbo_json()) {
    TurboJsonFile json_of(&info, std::ios_base::trunc);
    json_of << "{\"function\":\"" << info.GetDebugName().get()
            << "\", \"source\":\"\",\n\"phases\":[";
  }

3266
  PipelineImpl pipeline(&data);
3267
  if (use_mid_tier_register_allocator) {
3268 3269 3270 3271 3272
    pipeline.AllocateRegistersForMidTier(config, nullptr, run_verifier);
  } else {
    pipeline.AllocateRegistersForTopTier(config, nullptr, run_verifier);
  }

3273 3274 3275
  return !data.compilation_failed();
}

3276
void PipelineImpl::ComputeScheduledGraph() {
3277
  PipelineData* data = this->data_;
3278

3279 3280
  // We should only schedule the graph if it is not scheduled yet.
  DCHECK_NULL(data->schedule());
3281

3282
  Run<LateGraphTrimmingPhase>();
3283
  RunPrintAndVerify(LateGraphTrimmingPhase::phase_name(), true);
3284 3285

  Run<ComputeSchedulePhase>();
3286
  TraceScheduleAndVerify(data->info(), data, data->schedule(), "schedule");
3287 3288 3289
}

bool PipelineImpl::SelectInstructions(Linkage* linkage) {
3290
  auto call_descriptor = linkage->GetIncomingDescriptor();
3291 3292 3293 3294 3295
  PipelineData* data = this->data_;

  // We should have a scheduled graph.
  DCHECK_NOT_NULL(data->graph());
  DCHECK_NOT_NULL(data->schedule());
3296

3297
  if (FLAG_turbo_profiling) {
3298
    data->info()->set_profiler_data(BasicBlockInstrumentor::Instrument(
3299
        info(), data->graph(), data->schedule(), data->isolate()));
3300 3301
  }

3302 3303 3304 3305 3306
  bool verify_stub_graph =
      data->verify_graph() ||
      (FLAG_turbo_verify_machine_graph != nullptr &&
       (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
        !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())));
3307 3308 3309 3310 3311 3312 3313 3314
  // Jump optimization runs instruction selection twice, but the instruction
  // selector mutates nodes like swapping the inputs of a load, which can
  // violate the machine graph verification rules. So we skip the second
  // verification on a graph that already verified before.
  auto jump_opt = data->jump_optimization_info();
  if (jump_opt && jump_opt->is_optimizing()) {
    verify_stub_graph = false;
  }
3315
  if (verify_stub_graph) {
3316
    if (FLAG_trace_verify_csa) {
3317
      UnparkedScopeIfNeeded scope(data->broker());
3318
      AllowHandleDereference allow_deref;
3319 3320 3321 3322 3323 3324 3325 3326 3327 3328
      CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
      tracing_scope.stream()
          << "--------------------------------------------------\n"
          << "--- Verifying " << data->debug_name()
          << " generated by TurboFan\n"
          << "--------------------------------------------------\n"
          << *data->schedule()
          << "--------------------------------------------------\n"
          << "--- End of " << data->debug_name() << " generated by TurboFan\n"
          << "--------------------------------------------------\n";
3329
    }
3330 3331 3332 3333
    // TODO(jgruber): The parameter is called is_stub but actually contains
    // something different. Update either the name or its contents.
    const bool is_stub =
        !data->info()->IsOptimizing() && !data->info()->IsWasm();
3334
    Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName);
3335 3336
    MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage, is_stub,
                              data->debug_name(), &temp_zone);
3337 3338
  }

3339
  data->InitializeInstructionSequence(call_descriptor);
3340

3341 3342 3343 3344 3345
  // Depending on which code path led us to this function, the frame may or
  // may not have been initialized. If it hasn't yet, initialize it now.
  if (!data->frame()) {
    data->InitializeFrameData(call_descriptor);
  }
3346
  // Select and schedule instructions covering the scheduled graph.
3347
  Run<InstructionSelectionPhase>(linkage);
3348
  if (data->compilation_failed()) {
3349
    info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
3350 3351 3352
    data->EndPhaseKind();
    return false;
  }
3353

3354
  if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3355
    UnparkedScopeIfNeeded scope(data->broker());
3356
    AllowHandleDereference allow_deref;
3357
    TurboCfgFile tcf(isolate());
3358
    tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
3359
                 data->sequence());
3360 3361
  }

3362
  if (info()->trace_turbo_json()) {
3363
    std::ostringstream source_position_output;
3364
    // Output source position information before the graph is deleted.
3365 3366 3367 3368 3369
    if (data_->source_positions() != nullptr) {
      data_->source_positions()->PrintJson(source_position_output);
    } else {
      source_position_output << "{}";
    }
3370 3371
    source_position_output << ",\n\"NodeOrigins\" : ";
    data_->node_origins()->PrintJson(source_position_output);
3372
    data_->set_source_position_output(source_position_output.str());
3373 3374
  }

3375 3376
  data->DeleteGraphZone();

3377
  data->BeginPhaseKind("V8.TFRegisterAllocation");
3378

3379
  bool run_verifier = FLAG_turbo_verify_allocation;
3380

3381
  // Allocate registers.
3382
  if (call_descriptor->HasRestrictedAllocatableRegisters()) {
3383
    RegList registers = call_descriptor->AllocatableRegisters();
3384
    DCHECK_LT(0, NumRegs(registers));
3385 3386
    std::unique_ptr<const RegisterConfiguration> config;
    config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
3387 3388 3389 3390 3391
    AllocateRegistersForTopTier(config.get(), call_descriptor, run_verifier);
  } else {
    const RegisterConfiguration* config;
    if (data->info()->GetPoisoningMitigationLevel() !=
        PoisoningMitigationLevel::kDontPoison) {
3392 3393 3394
#ifdef V8_TARGET_ARCH_IA32
    FATAL("Poisoning is not supported on ia32.");
#else
3395
      config = RegisterConfiguration::Poisoning();
3396
#endif  // V8_TARGET_ARCH_IA32
3397 3398 3399 3400
    } else {
      config = RegisterConfiguration::Default();
    }

3401
    if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) {
3402 3403 3404 3405
      AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
    } else {
      AllocateRegistersForTopTier(config, call_descriptor, run_verifier);
    }
3406 3407
  }

3408 3409 3410
  // Verify the instruction sequence has the same hash in two stages.
  VerifyGeneratedCodeIsIdempotent();

3411
  Run<FrameElisionPhase>();
3412
  if (data->compilation_failed()) {
3413 3414
    info()->AbortOptimization(
        BailoutReason::kNotEnoughVirtualRegistersRegalloc);
3415
    data->EndPhaseKind();
3416
    return false;
3417 3418
  }

3419 3420
  // TODO(mtrofin): move this off to the register allocator.
  bool generate_frame_at_start =
3421
      data_->sequence()->instruction_blocks().front()->must_construct_frame();
3422 3423
  // Optimimize jumps.
  if (FLAG_turbo_jt) {
3424
    Run<JumpThreadingPhase>(generate_frame_at_start);
3425
  }
3426

3427
  data->EndPhaseKind();
3428

3429 3430
  return true;
}
3431

3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454
void PipelineImpl::VerifyGeneratedCodeIsIdempotent() {
  PipelineData* data = this->data_;
  JumpOptimizationInfo* jump_opt = data->jump_optimization_info();
  if (jump_opt == nullptr) return;

  InstructionSequence* code = data->sequence();
  int instruction_blocks = code->InstructionBlockCount();
  int virtual_registers = code->VirtualRegisterCount();
  size_t hash_code = base::hash_combine(instruction_blocks, virtual_registers);
  for (auto instr : *code) {
    hash_code = base::hash_combine(hash_code, instr->opcode(),
                                   instr->InputCount(), instr->OutputCount());
  }
  for (int i = 0; i < virtual_registers; i++) {
    hash_code = base::hash_combine(hash_code, code->GetRepresentation(i));
  }
  if (jump_opt->is_collecting()) {
    jump_opt->set_hash_code(hash_code);
  } else {
    CHECK_EQ(hash_code, jump_opt->hash_code());
  }
}

3455
struct InstructionStartsAsJSON {
3456
  const ZoneVector<TurbolizerInstructionStartInfo>* instr_starts;
3457 3458 3459 3460 3461 3462 3463
};

std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
  out << ", \"instructionOffsetToPCOffset\": {";
  bool need_comma = false;
  for (size_t i = 0; i < s.instr_starts->size(); ++i) {
    if (need_comma) out << ", ";
3464 3465 3466 3467 3468 3469
    const TurbolizerInstructionStartInfo& info = (*s.instr_starts)[i];
    out << "\"" << i << "\": {";
    out << "\"gap\": " << info.gap_pc_offset;
    out << ", \"arch\": " << info.arch_instr_pc_offset;
    out << ", \"condition\": " << info.condition_pc_offset;
    out << "}";
3470 3471 3472 3473 3474 3475
    need_comma = true;
  }
  out << "}";
  return out;
}

3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496
struct TurbolizerCodeOffsetsInfoAsJSON {
  const TurbolizerCodeOffsetsInfo* offsets_info;
};

std::ostream& operator<<(std::ostream& out,
                         const TurbolizerCodeOffsetsInfoAsJSON& s) {
  out << ", \"codeOffsetsInfo\": {";
  out << "\"codeStartRegisterCheck\": "
      << s.offsets_info->code_start_register_check << ", ";
  out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", ";
  out << "\"initPoison\": " << s.offsets_info->init_poison << ", ";
  out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", ";
  out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", ";
  out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits
      << ", ";
  out << "\"pools\": " << s.offsets_info->pools << ", ";
  out << "\"jumpTables\": " << s.offsets_info->jump_tables;
  out << "}";
  return out;
}

3497 3498
void PipelineImpl::AssembleCode(Linkage* linkage,
                                std::unique_ptr<AssemblerBuffer> buffer) {
3499
  PipelineData* data = this->data_;
3500
  data->BeginPhaseKind("V8.TFCodeGeneration");
3501
  data->InitializeCodeGenerator(linkage, std::move(buffer));
3502

3503 3504
  UnparkedScopeIfNeeded unparked_scope(data->broker(), FLAG_code_comments);

3505
  Run<AssembleCodePhase>();
3506
  if (data->info()->trace_turbo_json()) {
3507 3508 3509
    TurboJsonFile json_of(data->info(), std::ios_base::app);
    json_of << "{\"name\":\"code generation\""
            << ", \"type\":\"instructions\""
3510 3511 3512
            << InstructionStartsAsJSON{&data->code_generator()->instr_starts()}
            << TurbolizerCodeOffsetsInfoAsJSON{
                   &data->code_generator()->offsets_info()};
3513 3514
    json_of << "},\n";
  }
3515
  data->DeleteInstructionZone();
3516
  data->EndPhaseKind();
3517
}
3518

3519
MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
3520
  PipelineData* data = this->data_;
3521
  data->BeginPhaseKind("V8.TFFinalizeCode");
3522
  if (data->broker() && retire_broker) {
3523
    data->broker()->Retire();
3524
  }
3525
  Run<FinalizeCodePhase>();
3526

3527 3528 3529 3530 3531
  MaybeHandle<Code> maybe_code = data->code();
  Handle<Code> code;
  if (!maybe_code.ToHandle(&code)) {
    return maybe_code;
  }
3532

3533
  info()->SetCode(code);
3534
  PrintCode(isolate(), code, info());
3535

3536
  if (info()->trace_turbo_json()) {
3537
    TurboJsonFile json_of(info(), std::ios_base::app);
3538 3539 3540 3541

    json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
            << BlockStartsAsJSON{&data->code_generator()->block_starts()}
            << "\"data\":\"";
3542
#ifdef ENABLE_DISASSEMBLER
3543
    std::stringstream disassembly_stream;
3544
    code->Disassemble(nullptr, disassembly_stream, isolate());
3545 3546 3547
    std::string disassembly_string(disassembly_stream.str());
    for (const auto& c : disassembly_string) {
      json_of << AsEscapedUC16ForJSON(c);
3548
    }
3549 3550 3551
#endif  // ENABLE_DISASSEMBLER
    json_of << "\"}\n],\n";
    json_of << "\"nodePositions\":";
3552 3553 3554
    json_of << data->source_position_output() << ",\n";
    JsonPrintAllSourceWithPositions(json_of, data->info(), isolate());
    json_of << "\n}";
3555
  }
3556
  if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
3557 3558 3559 3560 3561
    CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
    tracing_scope.stream()
        << "---------------------------------------------------\n"
        << "Finished compiling method " << info()->GetDebugName().get()
        << " using TurboFan" << std::endl;
3562
  }
3563
  data->EndPhaseKind();
3564
  return code;
3565 3566
}

3567 3568
bool PipelineImpl::SelectInstructionsAndAssemble(
    CallDescriptor* call_descriptor) {
3569 3570
  Linkage linkage(call_descriptor);

3571
  // Perform instruction selection and register allocation.
3572
  if (!SelectInstructions(&linkage)) return false;
3573 3574

  // Generate the final machine code.
3575
  AssembleCode(&linkage);
3576 3577 3578 3579
  return true;
}

MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
3580
  if (!SelectInstructionsAndAssemble(call_descriptor)) {
3581
    return MaybeHandle<Code>();
3582
  }
3583
  return FinalizeCode();
3584 3585
}

3586 3587 3588 3589 3590
bool PipelineImpl::CommitDependencies(Handle<Code> code) {
  return data_->dependencies() == nullptr ||
         data_->dependencies()->Commit(code);
}

3591 3592 3593 3594
namespace {

void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
                   const char* phase_name) {
3595
  if (info->trace_turbo_json()) {
3596
    UnparkedScopeIfNeeded scope(data->broker());
3597 3598
    AllowHandleDereference allow_deref;
    TurboJsonFile json_of(info, std::ios_base::app);
3599 3600 3601 3602 3603 3604
    json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\""
            << ",\"blocks\":" << InstructionSequenceAsJSON{data->sequence()}
            << ",\"register_allocation\":{"
            << RegisterAllocationDataAsJSON{*(data->register_allocation_data()),
                                            *(data->sequence())}
            << "}},\n";
3605
  }
3606
  if (info->trace_turbo_graph()) {
3607
    UnparkedScopeIfNeeded scope(data->broker());
3608
    AllowHandleDereference allow_deref;
3609 3610 3611 3612
    CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
    tracing_scope.stream() << "----- Instruction sequence " << phase_name
                           << " -----\n"
                           << *data->sequence();
3613 3614 3615 3616 3617
  }
}

}  // namespace

3618 3619 3620
void PipelineImpl::AllocateRegistersForTopTier(
    const RegisterConfiguration* config, CallDescriptor* call_descriptor,
    bool run_verifier) {
3621 3622
  PipelineData* data = this->data_;
  // Don't track usage for this zone in compiler stats.
3623
  std::unique_ptr<Zone> verifier_zone;
3624 3625
  RegisterAllocatorVerifier* verifier = nullptr;
  if (run_verifier) {
3626 3627
    verifier_zone.reset(
        new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
3628
    verifier = verifier_zone->New<RegisterAllocatorVerifier>(
3629
        verifier_zone.get(), config, data->sequence(), data->frame());
3630 3631 3632
  }

#ifdef DEBUG
3633
  data_->sequence()->ValidateEdgeSplitForm();
3634
  data_->sequence()->ValidateDeferredBlockEntryPaths();
3635
  data_->sequence()->ValidateDeferredBlockExitPaths();
3636 3637
#endif

3638
  RegisterAllocationFlags flags;
3639
  if (data->info()->trace_turbo_allocation()) {
3640 3641
    flags |= RegisterAllocationFlag::kTraceAllocation;
  }
3642
  data->InitializeTopTierRegisterAllocationData(config, call_descriptor, flags);
3643 3644

  Run<MeetRegisterConstraintsPhase>();
3645
  Run<ResolvePhisPhase>();
3646
  Run<BuildLiveRangesPhase>();
3647 3648
  Run<BuildBundlesPhase>();

3649
  TraceSequence(info(), data, "before register allocation");
3650
  if (verifier != nullptr) {
3651 3652 3653
    CHECK(!data->top_tier_register_allocation_data()
               ->ExistsUseWithoutDefinition());
    CHECK(data->top_tier_register_allocation_data()
3654
              ->RangesDefinedInDeferredStayInDeferred());
3655
  }
3656

3657
  if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3658
    TurboCfgFile tcf(isolate());
3659 3660
    tcf << AsC1VRegisterAllocationData(
        "PreAllocation", data->top_tier_register_allocation_data());
3661 3662
  }

3663
  Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
3664 3665 3666 3667

  if (data->sequence()->HasFPVirtualRegisters()) {
    Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
  }
3668

3669
  Run<DecideSpillingModePhase>();
3670
  Run<AssignSpillSlotsPhase>();
3671
  Run<CommitAssignmentPhase>();
3672 3673 3674 3675 3676

  // TODO(chromium:725559): remove this check once
  // we understand the cause of the bug. We keep just the
  // check at the end of the allocation.
  if (verifier != nullptr) {
3677
    verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
3678 3679
  }

3680

3681
  Run<ConnectRangesPhase>();
3682

3683
  Run<ResolveControlFlowPhase>();
3684 3685 3686

  Run<PopulateReferenceMapsPhase>();

3687 3688 3689
  if (FLAG_turbo_move_optimization) {
    Run<OptimizeMovesPhase>();
  }
3690

3691
  TraceSequence(info(), data, "after register allocation");
3692 3693

  if (verifier != nullptr) {
3694
    verifier->VerifyAssignment("End of regalloc pipeline.");
3695 3696 3697
    verifier->VerifyGapMoves();
  }

3698
  if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3699
    TurboCfgFile tcf(isolate());
3700 3701
    tcf << AsC1VRegisterAllocationData(
        "CodeGen", data->top_tier_register_allocation_data());
3702
  }
3703 3704

  data->DeleteRegisterAllocationZone();
3705 3706
}

3707 3708 3709
void PipelineImpl::AllocateRegistersForMidTier(
    const RegisterConfiguration* config, CallDescriptor* call_descriptor,
    bool run_verifier) {
3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729
  PipelineData* data = data_;
  // Don't track usage for this zone in compiler stats.
  std::unique_ptr<Zone> verifier_zone;
  RegisterAllocatorVerifier* verifier = nullptr;
  if (run_verifier) {
    verifier_zone.reset(
        new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
    verifier = verifier_zone->New<RegisterAllocatorVerifier>(
        verifier_zone.get(), config, data->sequence(), data->frame());
  }

#ifdef DEBUG
  data->sequence()->ValidateEdgeSplitForm();
  data->sequence()->ValidateDeferredBlockEntryPaths();
  data->sequence()->ValidateDeferredBlockExitPaths();
#endif
  data->InitializeMidTierRegisterAllocationData(config, call_descriptor);

  TraceSequence(info(), data, "before register allocation");

3730 3731
  Run<MidTierRegisterOutputDefinitionPhase>();

3732 3733
  Run<MidTierRegisterAllocatorPhase>();

3734 3735
  Run<MidTierSpillSlotAllocatorPhase>();

3736
  Run<MidTierPopulateReferenceMapsPhase>();
3737 3738 3739 3740 3741 3742 3743 3744 3745

  TraceSequence(info(), data, "after register allocation");

  if (verifier != nullptr) {
    verifier->VerifyAssignment("End of regalloc pipeline.");
    verifier->VerifyGapMoves();
  }

  data->DeleteRegisterAllocationZone();
3746 3747
}

3748
OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
3749

3750
Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
3751

3752 3753 3754 3755
CodeGenerator* PipelineImpl::code_generator() const {
  return data_->code_generator();
}

3756 3757 3758
}  // namespace compiler
}  // namespace internal
}  // namespace v8