instruction-selector.h 32.2 KB
Newer Older
1 2 3 4
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6
#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
#define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
7

8
#include <map>
9

10
#include "src/codegen/cpu-features.h"
11
#include "src/common/globals.h"
12 13
#include "src/compiler/backend/instruction-scheduler.h"
#include "src/compiler/backend/instruction.h"
14
#include "src/compiler/common-operator.h"
15
#include "src/compiler/feedback-source.h"
16
#include "src/compiler/linkage.h"
17
#include "src/compiler/machine-operator.h"
18
#include "src/compiler/node.h"
19
#include "src/zone/zone-containers.h"
20

21 22 23 24
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/simd-shuffle.h"
#endif  // V8_ENABLE_WEBASSEMBLY

25 26
namespace v8 {
namespace internal {
27 28 29

class TickCounter;

30 31 32
namespace compiler {

// Forward declarations.
33
class BasicBlock;
34
struct CallBuffer;  // TODO(bmeurer): Remove this.
35
class Linkage;
36
class OperandGenerator;
37
class SwitchInfo;
38
class StateObjectDeduplicator;
39

40 41 42 43 44 45 46 47 48 49 50 51 52
// The flags continuation is a way to combine a branch or a materialization
// of a boolean value with an instruction that sets the flags register.
// The whole instruction is treated as a unit by the register allocator, and
// thus no spills or moves can be introduced between the flags-setting
// instruction and the branch or set it should be combined with.
class FlagsContinuation final {
 public:
  FlagsContinuation() : mode_(kFlags_none) {}

  // Creates a new flags continuation from the given condition and true/false
  // blocks.
  static FlagsContinuation ForBranch(FlagsCondition condition,
                                     BasicBlock* true_block,
53 54
                                     BasicBlock* false_block) {
    return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
55 56 57 58 59 60 61 62 63 64
  }

  static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
                                              BasicBlock* true_block,
                                              BasicBlock* false_block) {
    return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
                             false_block);
  }

  // Creates a new flags continuation for an eager deoptimization exit.
65 66 67 68
  static FlagsContinuation ForDeoptimize(
      FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
      FeedbackSource const& feedback, Node* frame_state,
      InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
69
    return FlagsContinuation(kFlags_deoptimize, condition, kind, reason,
70 71
                             feedback, frame_state, extra_args,
                             extra_args_count);
72 73 74 75
  }

  // Creates a new flags continuation for an eager deoptimization exit.
  static FlagsContinuation ForDeoptimizeAndPoison(
76
      FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
77 78
      FeedbackSource const& feedback, Node* frame_state,
      InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
79
    return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
80 81
                             reason, feedback, frame_state, extra_args,
                             extra_args_count);
82 83 84 85 86 87 88 89
  }

  // Creates a new flags continuation for a boolean value.
  static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
    return FlagsContinuation(condition, result);
  }

  // Creates a new flags continuation for a wasm trap.
90 91
  static FlagsContinuation ForTrap(FlagsCondition condition, TrapId trap_id,
                                   Node* result) {
92 93 94
    return FlagsContinuation(condition, trap_id, result);
  }

95 96 97 98 99
  static FlagsContinuation ForSelect(FlagsCondition condition, Node* result,
                                     Node* true_value, Node* false_value) {
    return FlagsContinuation(condition, result, true_value, false_value);
  }

100 101 102 103 104 105 106 107 108 109 110 111 112
  bool IsNone() const { return mode_ == kFlags_none; }
  bool IsBranch() const {
    return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
  }
  bool IsDeoptimize() const {
    return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
  }
  bool IsPoisoned() const {
    return mode_ == kFlags_branch_and_poison ||
           mode_ == kFlags_deoptimize_and_poison;
  }
  bool IsSet() const { return mode_ == kFlags_set; }
  bool IsTrap() const { return mode_ == kFlags_trap; }
113
  bool IsSelect() const { return mode_ == kFlags_select; }
114 115 116 117 118 119 120 121 122 123 124 125
  FlagsCondition condition() const {
    DCHECK(!IsNone());
    return condition_;
  }
  DeoptimizeKind kind() const {
    DCHECK(IsDeoptimize());
    return kind_;
  }
  DeoptimizeReason reason() const {
    DCHECK(IsDeoptimize());
    return reason_;
  }
126
  FeedbackSource const& feedback() const {
127 128 129 130 131 132 133
    DCHECK(IsDeoptimize());
    return feedback_;
  }
  Node* frame_state() const {
    DCHECK(IsDeoptimize());
    return frame_state_or_result_;
  }
134 135 136 137 138 139 140 141 142 143 144 145
  bool has_extra_args() const {
    DCHECK(IsDeoptimize());
    return extra_args_ != nullptr;
  }
  const InstructionOperand* extra_args() const {
    DCHECK(has_extra_args());
    return extra_args_;
  }
  int extra_args_count() const {
    DCHECK(has_extra_args());
    return extra_args_count_;
  }
146
  Node* result() const {
147
    DCHECK(IsSet() || IsSelect());
148 149
    return frame_state_or_result_;
  }
150
  TrapId trap_id() const {
151 152 153 154 155 156 157 158 159 160 161
    DCHECK(IsTrap());
    return trap_id_;
  }
  BasicBlock* true_block() const {
    DCHECK(IsBranch());
    return true_block_;
  }
  BasicBlock* false_block() const {
    DCHECK(IsBranch());
    return false_block_;
  }
162 163 164 165 166 167 168 169
  Node* true_value() const {
    DCHECK(IsSelect());
    return true_value_;
  }
  Node* false_value() const {
    DCHECK(IsSelect());
    return false_value_;
  }
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231

  void Negate() {
    DCHECK(!IsNone());
    condition_ = NegateFlagsCondition(condition_);
  }

  void Commute() {
    DCHECK(!IsNone());
    condition_ = CommuteFlagsCondition(condition_);
  }

  void Overwrite(FlagsCondition condition) { condition_ = condition; }

  void OverwriteAndNegateIfEqual(FlagsCondition condition) {
    DCHECK(condition_ == kEqual || condition_ == kNotEqual);
    bool negate = condition_ == kEqual;
    condition_ = condition;
    if (negate) Negate();
  }

  void OverwriteUnsignedIfSigned() {
    switch (condition_) {
      case kSignedLessThan:
        condition_ = kUnsignedLessThan;
        break;
      case kSignedLessThanOrEqual:
        condition_ = kUnsignedLessThanOrEqual;
        break;
      case kSignedGreaterThan:
        condition_ = kUnsignedGreaterThan;
        break;
      case kSignedGreaterThanOrEqual:
        condition_ = kUnsignedGreaterThanOrEqual;
        break;
      default:
        break;
    }
  }

  // Encodes this flags continuation into the given opcode.
  InstructionCode Encode(InstructionCode opcode) {
    opcode |= FlagsModeField::encode(mode_);
    if (mode_ != kFlags_none) {
      opcode |= FlagsConditionField::encode(condition_);
    }
    return opcode;
  }

 private:
  FlagsContinuation(FlagsMode mode, FlagsCondition condition,
                    BasicBlock* true_block, BasicBlock* false_block)
      : mode_(mode),
        condition_(condition),
        true_block_(true_block),
        false_block_(false_block) {
    DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
    DCHECK_NOT_NULL(true_block);
    DCHECK_NOT_NULL(false_block);
  }

  FlagsContinuation(FlagsMode mode, FlagsCondition condition,
                    DeoptimizeKind kind, DeoptimizeReason reason,
232 233
                    FeedbackSource const& feedback, Node* frame_state,
                    InstructionOperand* extra_args, int extra_args_count)
234 235 236 237 238
      : mode_(mode),
        condition_(condition),
        kind_(kind),
        reason_(reason),
        feedback_(feedback),
239 240 241
        frame_state_or_result_(frame_state),
        extra_args_(extra_args),
        extra_args_count_(extra_args_count) {
242 243 244 245 246 247 248 249 250 251 252
    DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
    DCHECK_NOT_NULL(frame_state);
  }

  FlagsContinuation(FlagsCondition condition, Node* result)
      : mode_(kFlags_set),
        condition_(condition),
        frame_state_or_result_(result) {
    DCHECK_NOT_NULL(result);
  }

253
  FlagsContinuation(FlagsCondition condition, TrapId trap_id, Node* result)
254 255 256 257 258 259 260
      : mode_(kFlags_trap),
        condition_(condition),
        frame_state_or_result_(result),
        trap_id_(trap_id) {
    DCHECK_NOT_NULL(result);
  }

261 262 263 264 265 266 267 268 269 270 271 272
  FlagsContinuation(FlagsCondition condition, Node* result,
                    Node* true_value, Node* false_value)
      : mode_(kFlags_select),
        condition_(condition),
        frame_state_or_result_(result),
        true_value_(true_value),
        false_value_(false_value) {
    DCHECK_NOT_NULL(result);
    DCHECK_NOT_NULL(true_value);
    DCHECK_NOT_NULL(false_value);
  }

273 274
  FlagsMode const mode_;
  FlagsCondition condition_;
275 276 277 278 279 280 281 282 283 284
  DeoptimizeKind kind_;             // Only valid if mode_ == kFlags_deoptimize*
  DeoptimizeReason reason_;         // Only valid if mode_ == kFlags_deoptimize*
  FeedbackSource feedback_;         // Only valid if mode_ == kFlags_deoptimize*
  Node* frame_state_or_result_;     // Only valid if mode_ == kFlags_deoptimize*
                                    // or mode_ == kFlags_set.
  InstructionOperand* extra_args_;  // Only valid if mode_ == kFlags_deoptimize*
  int extra_args_count_;            // Only valid if mode_ == kFlags_deoptimize*
  BasicBlock* true_block_;          // Only valid if mode_ == kFlags_branch*.
  BasicBlock* false_block_;         // Only valid if mode_ == kFlags_branch*.
  TrapId trap_id_;                  // Only valid if mode_ == kFlags_trap.
285 286
  Node* true_value_;                // Only valid if mode_ == kFlags_select.
  Node* false_value_;               // Only valid if mode_ == kFlags_select.
287 288
};

289 290
// This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee.
291 292 293 294
struct PushParameter {
  PushParameter(Node* n = nullptr,
                LinkageLocation l = LinkageLocation::ForAnyRegister())
      : node(n), location(l) {}
295

296 297
  Node* node;
  LinkageLocation location;
298
};
299

300
enum class FrameStateInputKind { kAny, kStackSlot };
301

302
// Instruction selection generates an InstructionSequence for a given Schedule.
303
class V8_EXPORT_PRIVATE InstructionSelector final {
304
 public:
305 306 307
  // Forward declarations.
  class Features;

308
  enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
309
  enum EnableScheduling { kDisableScheduling, kEnableScheduling };
310 311 312 313
  enum EnableRootsRelativeAddressing {
    kDisableRootsRelativeAddressing,
    kEnableRootsRelativeAddressing
  };
314 315 316 317
  enum EnableSwitchJumpTable {
    kDisableSwitchJumpTable,
    kEnableSwitchJumpTable
  };
318
  enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson };
319 320 321 322

  InstructionSelector(
      Zone* zone, size_t node_count, Linkage* linkage,
      InstructionSequence* sequence, Schedule* schedule,
323
      SourcePositionTable* source_positions, Frame* frame,
324
      EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
325 326
      JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
      size_t* max_pushed_argument_count,
327
      SourcePositionMode source_position_mode = kCallSourcePositions,
328 329 330
      Features features = SupportedFeatures(),
      EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
                                               ? kEnableScheduling
331
                                               : kDisableScheduling,
332 333
      EnableRootsRelativeAddressing enable_roots_relative_addressing =
          kDisableRootsRelativeAddressing,
334
      PoisoningMitigationLevel poisoning_level =
335 336
          PoisoningMitigationLevel::kDontPoison,
      EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
337 338

  // Visit code for the entire graph with the included schedule.
339
  bool SelectInstructions();
340

341 342 343
  void StartBlock(RpoNumber rpo);
  void EndBlock(RpoNumber rpo);
  void AddInstruction(Instruction* instr);
344
  void AddTerminator(Instruction* instr);
345

346 347 348 349
  // ===========================================================================
  // ============= Architecture-independent code emission methods. =============
  // ===========================================================================

350
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
351
                    size_t temp_count = 0, InstructionOperand* temps = nullptr);
352 353
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, size_t temp_count = 0,
354
                    InstructionOperand* temps = nullptr);
355 356
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
357
                    size_t temp_count = 0, InstructionOperand* temps = nullptr);
358 359 360
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
                    InstructionOperand c, size_t temp_count = 0,
361
                    InstructionOperand* temps = nullptr);
362 363 364
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
                    InstructionOperand c, InstructionOperand d,
365
                    size_t temp_count = 0, InstructionOperand* temps = nullptr);
366 367 368 369
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
                    InstructionOperand c, InstructionOperand d,
                    InstructionOperand e, size_t temp_count = 0,
370
                    InstructionOperand* temps = nullptr);
371 372 373 374
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
                    InstructionOperand c, InstructionOperand d,
                    InstructionOperand e, InstructionOperand f,
375
                    size_t temp_count = 0, InstructionOperand* temps = nullptr);
376
  Instruction* Emit(InstructionCode opcode, size_t output_count,
377 378
                    InstructionOperand* outputs, size_t input_count,
                    InstructionOperand* inputs, size_t temp_count = 0,
379
                    InstructionOperand* temps = nullptr);
380 381
  Instruction* Emit(Instruction* instr);

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
  // [0-3] operand instructions with no output, uses labels for true and false
  // blocks of the continuation.
  Instruction* EmitWithContinuation(InstructionCode opcode,
                                    FlagsContinuation* cont);
  Instruction* EmitWithContinuation(InstructionCode opcode,
                                    InstructionOperand a,
                                    FlagsContinuation* cont);
  Instruction* EmitWithContinuation(InstructionCode opcode,
                                    InstructionOperand a, InstructionOperand b,
                                    FlagsContinuation* cont);
  Instruction* EmitWithContinuation(InstructionCode opcode,
                                    InstructionOperand a, InstructionOperand b,
                                    InstructionOperand c,
                                    FlagsContinuation* cont);
  Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count,
                                    InstructionOperand* outputs,
                                    size_t input_count,
                                    InstructionOperand* inputs,
                                    FlagsContinuation* cont);
401 402 403 404
  Instruction* EmitWithContinuation(
      InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
      size_t input_count, InstructionOperand* inputs, size_t temp_count,
      InstructionOperand* temps, FlagsContinuation* cont);
405

406 407
  void EmitIdentity(Node* node);

408 409 410 411
  // ===========================================================================
  // ============== Architecture-independent CPU feature methods. ==============
  // ===========================================================================

412
  class Features final {
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
   public:
    Features() : bits_(0) {}
    explicit Features(unsigned bits) : bits_(bits) {}
    explicit Features(CpuFeature f) : bits_(1u << f) {}
    Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}

    bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }

   private:
    unsigned bits_;
  };

  bool IsSupported(CpuFeature feature) const {
    return features_.Contains(feature);
  }

  // Returns the features supported on the target platform.
  static Features SupportedFeatures() {
    return Features(CpuFeatures::SupportedFeatures());
  }

434 435 436
  // TODO(sigurds) This should take a CpuFeatures argument.
  static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();

437 438
  static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();

439 440
  bool NeedsPoisoning(IsSafetyCheck safety_check) const;

441 442 443 444 445 446 447 448 449
  // ===========================================================================
  // ============ Architecture-independent graph covering methods. =============
  // ===========================================================================

  // Used in pattern matching during code generation.
  // Check if {node} can be covered while generating code for the current
  // instruction. A node can be covered if the {user} of the node has the only
  // edge and the two are in the same basic block.
  bool CanCover(Node* user, Node* node) const;
450 451 452 453
  // CanCover is not transitive.  The counter example are Nodes A,B,C such that
  // CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A
  // and B might differ. CanCoverTransitively does the additional checks.
  bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const;
454

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
  // Used in pattern matching during code generation.
  // This function checks that {node} and {user} are in the same basic block,
  // and that {user} is the only user of {node} in this basic block.  This
  // check guarantees that there are no users of {node} scheduled between
  // {node} and {user}, and thus we can select a single instruction for both
  // nodes, if such an instruction exists. This check can be used for example
  // when selecting instructions for:
  //   n = Int32Add(a, b)
  //   c = Word32Compare(n, 0, cond)
  //   Branch(c, true_label, false_label)
  // Here we can generate a flag-setting add instruction, even if the add has
  // uses in other basic blocks, since the flag-setting add instruction will
  // still generate the result of the addition and not just set the flags.
  // However, if we had uses of the add in the same basic block, we could have:
  //   n = Int32Add(a, b)
  //   o = OtherOp(n, ...)
  //   c = Word32Compare(n, 0, cond)
  //   Branch(c, true_label, false_label)
  // where we cannot select the add and the compare together.  If we were to
  // select a flag-setting add instruction for Word32Compare and Int32Add while
  // visiting Word32Compare, we would then have to select an instruction for
  // OtherOp *afterwards*, which means we would attempt to use the result of
  // the add before we have defined it.
  bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;

480 481 482 483
  // Checks if {node} was already defined, and therefore code was already
  // generated for it.
  bool IsDefined(Node* node) const;

484 485 486 487
  // Checks if {node} has any uses, and therefore code has to be generated for
  // it.
  bool IsUsed(Node* node) const;

488 489 490
  // Checks if {node} is currently live.
  bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }

491 492 493
  // Gets the effect level of {node}.
  int GetEffectLevel(Node* node) const;

494 495 496 497
  // Gets the effect level of {node}, appropriately adjusted based on
  // continuation flags if the node is a branch.
  int GetEffectLevel(Node* node, FlagsContinuation* cont) const;

498
  int GetVirtualRegister(const Node* node);
499
  const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
500

501
  // Check if we can generate loads and stores of ExternalConstants relative
502
  // to the roots register.
503 504
  bool CanAddressRelativeToRootsRegister(
      const ExternalReference& reference) const;
505 506
  // Check if we can use the roots register to access GC roots.
  bool CanUseRootsRegister() const;
507

508 509
  Isolate* isolate() const { return sequence()->isolate(); }

510 511 512 513
  const ZoneVector<std::pair<int, int>>& instr_origins() const {
    return instr_origins_;
  }

514 515 516
 private:
  friend class OperandGenerator;

517 518 519 520 521
  bool UseInstructionScheduling() const {
    return (enable_scheduling_ == kEnableScheduling) &&
           InstructionScheduler::SchedulerSupported();
  }

522 523
  void AppendDeoptimizeArguments(InstructionOperandVector* args,
                                 DeoptimizeKind kind, DeoptimizeReason reason,
524
                                 FeedbackSource const& feedback,
525
                                 FrameState frame_state);
526

527 528 529 530
  void EmitTableSwitch(const SwitchInfo& sw,
                       InstructionOperand const& index_operand);
  void EmitBinarySearchSwitch(const SwitchInfo& sw,
                              InstructionOperand const& value_operand);
531

532 533 534 535 536 537
  void TryRename(InstructionOperand* op);
  int GetRename(int virtual_register);
  void SetRename(const Node* node, const Node* rename);
  void UpdateRenames(Instruction* instruction);
  void UpdateRenamesInPhi(PhiInstruction* phi);

538 539 540
  // Inform the instruction selection that {node} was just defined.
  void MarkAsDefined(Node* node);

541 542 543 544
  // Inform the instruction selection that {node} has at least one use and we
  // will need to generate code for it.
  void MarkAsUsed(Node* node);

545 546 547
  // Sets the effect level of {node}.
  void SetEffectLevel(Node* node, int effect_level);

548 549
  // Inform the register allocation of the representation of the value produced
  // by {node}.
550 551 552 553 554 555 556 557 558 559 560 561 562
  void MarkAsRepresentation(MachineRepresentation rep, Node* node);
  void MarkAsWord32(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kWord32, node);
  }
  void MarkAsWord64(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kWord64, node);
  }
  void MarkAsFloat32(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kFloat32, node);
  }
  void MarkAsFloat64(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kFloat64, node);
  }
563 564 565
  void MarkAsSimd128(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kSimd128, node);
  }
566
  void MarkAsTagged(Node* node) {
567 568
    MarkAsRepresentation(MachineRepresentation::kTagged, node);
  }
569 570 571
  void MarkAsCompressed(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kCompressed, node);
  }
572

573 574
  // Inform the register allocation of the representation of the unallocated
  // operand {op}.
575 576
  void MarkAsRepresentation(MachineRepresentation rep,
                            const InstructionOperand& op);
577

578 579 580
  enum CallBufferFlag {
    kCallCodeImmediate = 1u << 0,
    kCallAddressImmediate = 1u << 1,
581
    kCallTail = 1u << 2,
582
    kCallFixedTargetRegister = 1u << 3
583
  };
584
  using CallBufferFlags = base::Flags<CallBufferFlag>;
585

586 587 588 589 590 591
  // Initialize the call buffer with the InstructionOperands, nodes, etc,
  // corresponding
  // to the inputs and outputs of the call.
  // {call_code_immediate} to generate immediate operands to calls of code.
  // {call_address_immediate} to generate immediate operands to address calls.
  void InitializeCallBuffer(Node* call, CallBuffer* buffer,
592 593
                            CallBufferFlags flags, bool is_tail_call,
                            int stack_slot_delta = 0);
594
  bool IsTailCallAddressImmediate();
595

596 597
  void UpdateMaxPushedArgumentCount(size_t count);

598
  FrameStateDescriptor* GetFrameStateDescriptor(FrameState node);
599
  size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
600
                                         FrameState state, OperandGenerator* g,
601 602 603
                                         StateObjectDeduplicator* deduplicator,
                                         InstructionOperandVector* inputs,
                                         FrameStateInputKind kind, Zone* zone);
604 605 606 607 608 609
  size_t AddInputsToFrameStateDescriptor(StateValueList* values,
                                         InstructionOperandVector* inputs,
                                         OperandGenerator* g,
                                         StateObjectDeduplicator* deduplicator,
                                         Node* node, FrameStateInputKind kind,
                                         Zone* zone);
610 611 612 613 614 615
  size_t AddOperandToStateValueDescriptor(StateValueList* values,
                                          InstructionOperandVector* inputs,
                                          OperandGenerator* g,
                                          StateObjectDeduplicator* deduplicator,
                                          Node* input, MachineType type,
                                          FrameStateInputKind kind, Zone* zone);
616

617 618 619 620 621 622 623 624 625 626 627 628 629 630
  // ===========================================================================
  // ============= Architecture-specific graph covering methods. ===============
  // ===========================================================================

  // Visit nodes in the given block and generate code.
  void VisitBlock(BasicBlock* block);

  // Visit the node for the control flow at the end of the block, generating
  // code if necessary.
  void VisitControl(BasicBlock* block);

  // Visit the node and generate code, if any.
  void VisitNode(Node* node);

631
  // Visit the node and generate code for IEEE 754 functions.
632
  void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
633 634
  void VisitFloat64Ieee754Unop(Node*, InstructionCode code);

635 636
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
  MACHINE_OP_LIST(DECLARE_GENERATOR)
637
  MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
638 639
#undef DECLARE_GENERATOR

640 641
  // Visit the load node with a value and opcode to replace with.
  void VisitLoad(Node* node, Node* value, InstructionCode opcode);
642
  void VisitLoadTransform(Node* node, Node* value, InstructionCode opcode);
643
  void VisitFinishRegion(Node* node);
644
  void VisitParameter(Node* node);
645
  void VisitIfException(Node* node);
646
  void VisitOsrValue(Node* node);
647 648
  void VisitPhi(Node* node);
  void VisitProjection(Node* node);
649
  void VisitConstant(Node* node);
650
  void VisitCall(Node* call, BasicBlock* handler = nullptr);
651 652
  void VisitDeoptimizeIf(Node* node);
  void VisitDeoptimizeUnless(Node* node);
653
  void VisitDynamicCheckMapsWithDeoptUnless(Node* node);
654 655
  void VisitTrapIf(Node* node, TrapId trap_id);
  void VisitTrapUnless(Node* node, TrapId trap_id);
656
  void VisitTailCall(Node* call);
657 658
  void VisitGoto(BasicBlock* target);
  void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
659
  void VisitSwitch(Node* node, const SwitchInfo& sw);
660
  void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
661
                       FeedbackSource const& feedback, FrameState frame_state);
662
  void VisitSelect(Node* node);
663
  void VisitReturn(Node* ret);
664
  void VisitThrow(Node* node);
665
  void VisitRetain(Node* node);
666
  void VisitUnreachable(Node* node);
667
  void VisitStaticAssert(Node* node);
668
  void VisitDeadValue(Node* node);
669

670 671
  void VisitStackPointerGreaterThan(Node* node, FlagsContinuation* cont);

672 673
  void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);

674 675
  void EmitWordPoisonOnSpeculation(Node* node);

676
  void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
677
                            const CallDescriptor* call_descriptor, Node* node);
678
  void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
679
                          const CallDescriptor* call_descriptor, Node* node);
680 681

  bool CanProduceSignalingNaN(Node* node);
682

683 684 685
  void AddOutputToSelectContinuation(OperandGenerator* g, int first_input_index,
                                     Node* node);

686 687 688 689
  // ===========================================================================
  // ============= Vector instruction (SIMD) helper fns. =======================
  // ===========================================================================

690
#if V8_ENABLE_WEBASSEMBLY
691 692 693 694
  // Canonicalize shuffles to make pattern matching simpler. Returns the shuffle
  // indices, and a boolean indicating if the shuffle is a swizzle (one input).
  void CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle);

695 696 697
  // Swaps the two first input operands of the node, to help match shuffles
  // to specific architectural instructions.
  void SwapShuffleInputs(Node* node);
698
#endif  // V8_ENABLE_WEBASSEMBLY
699

700 701
  // ===========================================================================

702
  Schedule* schedule() const { return schedule_; }
703
  Linkage* linkage() const { return linkage_; }
704 705
  InstructionSequence* sequence() const { return sequence_; }
  Zone* instruction_zone() const { return sequence()->zone(); }
706
  Zone* zone() const { return zone_; }
707

708 709 710 711 712
  void set_instruction_selection_failed() {
    instruction_selection_failed_ = true;
  }
  bool instruction_selection_failed() { return instruction_selection_failed_; }

713
  void MarkPairProjectionsAsWord32(Node* node);
714
  bool IsSourcePositionUsed(Node* node);
715 716 717 718 719
  void VisitWord32AtomicBinaryOperation(Node* node, ArchOpcode int8_op,
                                        ArchOpcode uint8_op,
                                        ArchOpcode int16_op,
                                        ArchOpcode uint16_op,
                                        ArchOpcode word32_op);
720 721 722 723
  void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op,
                                        ArchOpcode uint16_op,
                                        ArchOpcode uint32_op,
                                        ArchOpcode uint64_op);
724 725
  void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
                                    ArchOpcode uint16_op, ArchOpcode uint32_op);
726

727 728 729 730 731 732 733 734 735 736 737
#if V8_TARGET_ARCH_64_BIT
  bool ZeroExtendsWord32ToWord64(Node* node, int recursion_depth = 0);
  bool ZeroExtendsWord32ToWord64NoPhis(Node* node);

  enum Upper32BitsState : uint8_t {
    kNotYetChecked,
    kUpperBitsGuaranteedZero,
    kNoGuarantee,
  };
#endif  // V8_TARGET_ARCH_64_BIT

738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
  struct FrameStateInput {
    FrameStateInput(Node* node_, FrameStateInputKind kind_)
        : node(node_), kind(kind_) {}

    Node* node;
    FrameStateInputKind kind;

    struct Hash {
      size_t operator()(FrameStateInput const& source) const {
        return base::hash_combine(source.node,
                                  static_cast<size_t>(source.kind));
      }
    };

    struct Equal {
      bool operator()(FrameStateInput const& lhs,
                      FrameStateInput const& rhs) const {
        return lhs.node == rhs.node && lhs.kind == rhs.kind;
      }
    };
  };

  struct CachedStateValues;
  class CachedStateValuesBuilder;

763 764
  // ===========================================================================

765
  Zone* const zone_;
766 767 768
  Linkage* const linkage_;
  InstructionSequence* const sequence_;
  SourcePositionTable* const source_positions_;
769
  SourcePositionMode const source_position_mode_;
770
  Features features_;
771
  Schedule* const schedule_;
772
  BasicBlock* current_block_;
773
  ZoneVector<Instruction*> instructions_;
774 775
  InstructionOperandVector continuation_inputs_;
  InstructionOperandVector continuation_outputs_;
776
  InstructionOperandVector continuation_temps_;
777
  BoolVector defined_;
778
  BoolVector used_;
779
  IntVector effect_level_;
780
  IntVector virtual_registers_;
781
  IntVector virtual_register_rename_;
782
  InstructionScheduler* scheduler_;
783
  EnableScheduling enable_scheduling_;
784
  EnableRootsRelativeAddressing enable_roots_relative_addressing_;
785
  EnableSwitchJumpTable enable_switch_jump_table_;
786 787 788
  ZoneUnorderedMap<FrameStateInput, CachedStateValues*, FrameStateInput::Hash,
                   FrameStateInput::Equal>
      state_values_cache_;
789

790
  PoisoningMitigationLevel poisoning_level_;
791
  Frame* frame_;
792
  bool instruction_selection_failed_;
793 794
  ZoneVector<std::pair<int, int>> instr_origins_;
  EnableTraceTurboJson trace_turbo_;
795
  TickCounter* const tick_counter_;
796 797 798
  // The broker is only used for unparking the LocalHeap for diagnostic printing
  // for failed StaticAsserts.
  JSHeapBroker* const broker_;
799

800 801
  // Store the maximal unoptimized frame height and an maximal number of pushed
  // arguments (for calls). Later used to apply an offset to stack checks.
802
  size_t* max_unoptimized_frame_height_;
803
  size_t* max_pushed_argument_count_;
804 805 806 807 808 809 810

#if V8_TARGET_ARCH_64_BIT
  // Holds lazily-computed results for whether phi nodes guarantee their upper
  // 32 bits to be zero. Indexed by node ID; nobody reads or writes the values
  // for non-phi nodes.
  ZoneVector<Upper32BitsState> phi_states_;
#endif
811 812 813 814 815 816
};

}  // namespace compiler
}  // namespace internal
}  // namespace v8

817
#endif  // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_