instruction-selector.h 28.9 KB
Newer Older
1 2 3 4 5 6 7
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
#define V8_COMPILER_INSTRUCTION_SELECTOR_H_

8
#include <map>
9 10

#include "src/compiler/common-operator.h"
11
#include "src/compiler/instruction-scheduler.h"
12
#include "src/compiler/instruction.h"
13
#include "src/compiler/linkage.h"
14
#include "src/compiler/machine-operator.h"
15
#include "src/compiler/node.h"
16
#include "src/globals.h"
17
#include "src/zone/zone-containers.h"
18 19 20 21 22 23

namespace v8 {
namespace internal {
namespace compiler {

// Forward declarations.
24
class BasicBlock;
25
struct CallBuffer;  // TODO(bmeurer): Remove this.
26
class Linkage;
27
class OperandGenerator;
28
class SwitchInfo;
29
class StateObjectDeduplicator;
30

31 32 33 34 35 36 37 38 39 40 41 42 43
// The flags continuation is a way to combine a branch or a materialization
// of a boolean value with an instruction that sets the flags register.
// The whole instruction is treated as a unit by the register allocator, and
// thus no spills or moves can be introduced between the flags-setting
// instruction and the branch or set it should be combined with.
class FlagsContinuation final {
 public:
  FlagsContinuation() : mode_(kFlags_none) {}

  // Creates a new flags continuation from the given condition and true/false
  // blocks.
  static FlagsContinuation ForBranch(FlagsCondition condition,
                                     BasicBlock* true_block,
44 45
                                     BasicBlock* false_block) {
    return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
46 47 48 49 50 51 52 53 54 55
  }

  static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
                                              BasicBlock* true_block,
                                              BasicBlock* false_block) {
    return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
                             false_block);
  }

  // Creates a new flags continuation for an eager deoptimization exit.
56 57 58 59 60 61 62 63 64 65 66
  static FlagsContinuation ForDeoptimize(FlagsCondition condition,
                                         DeoptimizeKind kind,
                                         DeoptimizeReason reason,
                                         VectorSlotPair const& feedback,
                                         Node* frame_state) {
    return FlagsContinuation(kFlags_deoptimize, condition, kind, reason,
                             feedback, frame_state);
  }

  // Creates a new flags continuation for an eager deoptimization exit.
  static FlagsContinuation ForDeoptimizeAndPoison(
67
      FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
68 69 70
      VectorSlotPair const& feedback, Node* frame_state) {
    return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
                             reason, feedback, frame_state);
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
  }

  // Creates a new flags continuation for a boolean value.
  static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
    return FlagsContinuation(condition, result);
  }

  // Creates a new flags continuation for a wasm trap.
  static FlagsContinuation ForTrap(FlagsCondition condition,
                                   Runtime::FunctionId trap_id, Node* result) {
    return FlagsContinuation(condition, trap_id, result);
  }

  bool IsNone() const { return mode_ == kFlags_none; }
  bool IsBranch() const {
    return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
  }
  bool IsDeoptimize() const {
    return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
  }
  bool IsPoisoned() const {
    return mode_ == kFlags_branch_and_poison ||
           mode_ == kFlags_deoptimize_and_poison;
  }
  bool IsSet() const { return mode_ == kFlags_set; }
  bool IsTrap() const { return mode_ == kFlags_trap; }
  FlagsCondition condition() const {
    DCHECK(!IsNone());
    return condition_;
  }
  DeoptimizeKind kind() const {
    DCHECK(IsDeoptimize());
    return kind_;
  }
  DeoptimizeReason reason() const {
    DCHECK(IsDeoptimize());
    return reason_;
  }
  VectorSlotPair const& feedback() const {
    DCHECK(IsDeoptimize());
    return feedback_;
  }
  Node* frame_state() const {
    DCHECK(IsDeoptimize());
    return frame_state_or_result_;
  }
  Node* result() const {
    DCHECK(IsSet());
    return frame_state_or_result_;
  }
  Runtime::FunctionId trap_id() const {
    DCHECK(IsTrap());
    return trap_id_;
  }
  BasicBlock* true_block() const {
    DCHECK(IsBranch());
    return true_block_;
  }
  BasicBlock* false_block() const {
    DCHECK(IsBranch());
    return false_block_;
  }

  void Negate() {
    DCHECK(!IsNone());
    condition_ = NegateFlagsCondition(condition_);
  }

  void Commute() {
    DCHECK(!IsNone());
    condition_ = CommuteFlagsCondition(condition_);
  }

  void Overwrite(FlagsCondition condition) { condition_ = condition; }

  void OverwriteAndNegateIfEqual(FlagsCondition condition) {
    DCHECK(condition_ == kEqual || condition_ == kNotEqual);
    bool negate = condition_ == kEqual;
    condition_ = condition;
    if (negate) Negate();
  }

  void OverwriteUnsignedIfSigned() {
    switch (condition_) {
      case kSignedLessThan:
        condition_ = kUnsignedLessThan;
        break;
      case kSignedLessThanOrEqual:
        condition_ = kUnsignedLessThanOrEqual;
        break;
      case kSignedGreaterThan:
        condition_ = kUnsignedGreaterThan;
        break;
      case kSignedGreaterThanOrEqual:
        condition_ = kUnsignedGreaterThanOrEqual;
        break;
      default:
        break;
    }
  }

  // Encodes this flags continuation into the given opcode.
  InstructionCode Encode(InstructionCode opcode) {
    opcode |= FlagsModeField::encode(mode_);
    if (mode_ != kFlags_none) {
      opcode |= FlagsConditionField::encode(condition_);
    }
    return opcode;
  }

 private:
  FlagsContinuation(FlagsMode mode, FlagsCondition condition,
                    BasicBlock* true_block, BasicBlock* false_block)
      : mode_(mode),
        condition_(condition),
        true_block_(true_block),
        false_block_(false_block) {
    DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
    DCHECK_NOT_NULL(true_block);
    DCHECK_NOT_NULL(false_block);
  }

  FlagsContinuation(FlagsMode mode, FlagsCondition condition,
                    DeoptimizeKind kind, DeoptimizeReason reason,
                    VectorSlotPair const& feedback, Node* frame_state)
      : mode_(mode),
        condition_(condition),
        kind_(kind),
        reason_(reason),
        feedback_(feedback),
        frame_state_or_result_(frame_state) {
    DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
    DCHECK_NOT_NULL(frame_state);
  }

  FlagsContinuation(FlagsCondition condition, Node* result)
      : mode_(kFlags_set),
        condition_(condition),
        frame_state_or_result_(result) {
    DCHECK_NOT_NULL(result);
  }

  FlagsContinuation(FlagsCondition condition, Runtime::FunctionId trap_id,
                    Node* result)
      : mode_(kFlags_trap),
        condition_(condition),
        frame_state_or_result_(result),
        trap_id_(trap_id) {
    DCHECK_NOT_NULL(result);
  }

  FlagsMode const mode_;
  FlagsCondition condition_;
  DeoptimizeKind kind_;          // Only valid if mode_ == kFlags_deoptimize*
  DeoptimizeReason reason_;      // Only valid if mode_ == kFlags_deoptimize*
  VectorSlotPair feedback_;      // Only valid if mode_ == kFlags_deoptimize*
  Node* frame_state_or_result_;  // Only valid if mode_ == kFlags_deoptimize*
                                 // or mode_ == kFlags_set.
  BasicBlock* true_block_;       // Only valid if mode_ == kFlags_branch*.
  BasicBlock* false_block_;      // Only valid if mode_ == kFlags_branch*.
  Runtime::FunctionId trap_id_;  // Only valid if mode_ == kFlags_trap.
};

234 235
// This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee.
236 237 238 239
struct PushParameter {
  PushParameter(Node* n = nullptr,
                LinkageLocation l = LinkageLocation::ForAnyRegister())
      : node(n), location(l) {}
240

241 242
  Node* node;
  LinkageLocation location;
243
};
244

245
enum class FrameStateInputKind { kAny, kStackSlot };
246

247
// Instruction selection generates an InstructionSequence for a given Schedule.
248
class V8_EXPORT_PRIVATE InstructionSelector final {
249
 public:
250 251 252
  // Forward declarations.
  class Features;

253
  enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
254
  enum EnableScheduling { kDisableScheduling, kEnableScheduling };
255
  enum EnableSerialization { kDisableSerialization, kEnableSerialization };
256 257 258 259
  enum EnableSwitchJumpTable {
    kDisableSwitchJumpTable,
    kEnableSwitchJumpTable
  };
260 261 262 263

  InstructionSelector(
      Zone* zone, size_t node_count, Linkage* linkage,
      InstructionSequence* sequence, Schedule* schedule,
264
      SourcePositionTable* source_positions, Frame* frame,
265
      EnableSwitchJumpTable enable_switch_jump_table,
266
      SourcePositionMode source_position_mode = kCallSourcePositions,
267 268 269
      Features features = SupportedFeatures(),
      EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
                                               ? kEnableScheduling
270
                                               : kDisableScheduling,
271
      EnableSerialization enable_serialization = kDisableSerialization,
272 273
      PoisoningMitigationLevel poisoning_level =
          PoisoningMitigationLevel::kDontPoison);
274 275

  // Visit code for the entire graph with the included schedule.
276
  bool SelectInstructions();
277

278 279 280
  void StartBlock(RpoNumber rpo);
  void EndBlock(RpoNumber rpo);
  void AddInstruction(Instruction* instr);
281
  void AddTerminator(Instruction* instr);
282

283 284 285 286
  // ===========================================================================
  // ============= Architecture-independent code emission methods. =============
  // ===========================================================================

287
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
288
                    size_t temp_count = 0, InstructionOperand* temps = nullptr);
289 290
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, size_t temp_count = 0,
291
                    InstructionOperand* temps = nullptr);
292 293
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
294
                    size_t temp_count = 0, InstructionOperand* temps = nullptr);
295 296 297
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
                    InstructionOperand c, size_t temp_count = 0,
298
                    InstructionOperand* temps = nullptr);
299 300 301
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
                    InstructionOperand c, InstructionOperand d,
302
                    size_t temp_count = 0, InstructionOperand* temps = nullptr);
303 304 305 306
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
                    InstructionOperand c, InstructionOperand d,
                    InstructionOperand e, size_t temp_count = 0,
307
                    InstructionOperand* temps = nullptr);
308 309 310 311
  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
                    InstructionOperand a, InstructionOperand b,
                    InstructionOperand c, InstructionOperand d,
                    InstructionOperand e, InstructionOperand f,
312
                    size_t temp_count = 0, InstructionOperand* temps = nullptr);
313
  Instruction* Emit(InstructionCode opcode, size_t output_count,
314 315
                    InstructionOperand* outputs, size_t input_count,
                    InstructionOperand* inputs, size_t temp_count = 0,
316
                    InstructionOperand* temps = nullptr);
317 318
  Instruction* Emit(Instruction* instr);

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
  // [0-3] operand instructions with no output, uses labels for true and false
  // blocks of the continuation.
  Instruction* EmitWithContinuation(InstructionCode opcode,
                                    FlagsContinuation* cont);
  Instruction* EmitWithContinuation(InstructionCode opcode,
                                    InstructionOperand a,
                                    FlagsContinuation* cont);
  Instruction* EmitWithContinuation(InstructionCode opcode,
                                    InstructionOperand a, InstructionOperand b,
                                    FlagsContinuation* cont);
  Instruction* EmitWithContinuation(InstructionCode opcode,
                                    InstructionOperand a, InstructionOperand b,
                                    InstructionOperand c,
                                    FlagsContinuation* cont);
  Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count,
                                    InstructionOperand* outputs,
                                    size_t input_count,
                                    InstructionOperand* inputs,
                                    FlagsContinuation* cont);

339 340 341 342 343
  // ===========================================================================
  // ===== Architecture-independent deoptimization exit emission methods. ======
  // ===========================================================================
  Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
                              InstructionOperand* outputs, size_t input_count,
344
                              InstructionOperand* inputs, DeoptimizeKind kind,
345 346 347
                              DeoptimizeReason reason,
                              VectorSlotPair const& feedback,
                              Node* frame_state);
348

349 350 351 352
  // ===========================================================================
  // ============== Architecture-independent CPU feature methods. ==============
  // ===========================================================================

353
  class Features final {
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
   public:
    Features() : bits_(0) {}
    explicit Features(unsigned bits) : bits_(bits) {}
    explicit Features(CpuFeature f) : bits_(1u << f) {}
    Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}

    bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }

   private:
    unsigned bits_;
  };

  bool IsSupported(CpuFeature feature) const {
    return features_.Contains(feature);
  }

  // Returns the features supported on the target platform.
  static Features SupportedFeatures() {
    return Features(CpuFeatures::SupportedFeatures());
  }

375 376 377
  // TODO(sigurds) This should take a CpuFeatures argument.
  static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();

378 379
  static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();

380 381
  bool NeedsPoisoning(IsSafetyCheck safety_check) const;

382 383 384 385 386 387 388 389 390 391
  // ===========================================================================
  // ============ Architecture-independent graph covering methods. =============
  // ===========================================================================

  // Used in pattern matching during code generation.
  // Check if {node} can be covered while generating code for the current
  // instruction. A node can be covered if the {user} of the node has the only
  // edge and the two are in the same basic block.
  bool CanCover(Node* user, Node* node) const;

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
  // Used in pattern matching during code generation.
  // This function checks that {node} and {user} are in the same basic block,
  // and that {user} is the only user of {node} in this basic block.  This
  // check guarantees that there are no users of {node} scheduled between
  // {node} and {user}, and thus we can select a single instruction for both
  // nodes, if such an instruction exists. This check can be used for example
  // when selecting instructions for:
  //   n = Int32Add(a, b)
  //   c = Word32Compare(n, 0, cond)
  //   Branch(c, true_label, false_label)
  // Here we can generate a flag-setting add instruction, even if the add has
  // uses in other basic blocks, since the flag-setting add instruction will
  // still generate the result of the addition and not just set the flags.
  // However, if we had uses of the add in the same basic block, we could have:
  //   n = Int32Add(a, b)
  //   o = OtherOp(n, ...)
  //   c = Word32Compare(n, 0, cond)
  //   Branch(c, true_label, false_label)
  // where we cannot select the add and the compare together.  If we were to
  // select a flag-setting add instruction for Word32Compare and Int32Add while
  // visiting Word32Compare, we would then have to select an instruction for
  // OtherOp *afterwards*, which means we would attempt to use the result of
  // the add before we have defined it.
  bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;

417 418 419 420
  // Checks if {node} was already defined, and therefore code was already
  // generated for it.
  bool IsDefined(Node* node) const;

421 422 423 424
  // Checks if {node} has any uses, and therefore code has to be generated for
  // it.
  bool IsUsed(Node* node) const;

425 426 427
  // Checks if {node} is currently live.
  bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }

428 429 430
  // Gets the effect level of {node}.
  int GetEffectLevel(Node* node) const;

431
  int GetVirtualRegister(const Node* node);
432
  const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
433

434 435 436 437
  // Check if we can generate loads and stores of ExternalConstants relative
  // to the roots register, i.e. if both a root register is available for this
  // compilation unit and the serializer is disabled.
  bool CanAddressRelativeToRootsRegister() const;
438 439
  // Check if we can use the roots register to access GC roots.
  bool CanUseRootsRegister() const;
440

441 442
  Isolate* isolate() const { return sequence()->isolate(); }

443 444 445
 private:
  friend class OperandGenerator;

446 447 448 449 450
  bool UseInstructionScheduling() const {
    return (enable_scheduling_ == kEnableScheduling) &&
           InstructionScheduler::SchedulerSupported();
  }

451 452 453 454 455
  void AppendDeoptimizeArguments(InstructionOperandVector* args,
                                 DeoptimizeKind kind, DeoptimizeReason reason,
                                 VectorSlotPair const& feedback,
                                 Node* frame_state);

456 457 458 459
  void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
  void EmitLookupSwitch(const SwitchInfo& sw,
                        InstructionOperand& value_operand);

460 461 462 463 464 465
  void TryRename(InstructionOperand* op);
  int GetRename(int virtual_register);
  void SetRename(const Node* node, const Node* rename);
  void UpdateRenames(Instruction* instruction);
  void UpdateRenamesInPhi(PhiInstruction* phi);

466 467 468
  // Inform the instruction selection that {node} was just defined.
  void MarkAsDefined(Node* node);

469 470 471 472
  // Inform the instruction selection that {node} has at least one use and we
  // will need to generate code for it.
  void MarkAsUsed(Node* node);

473 474 475
  // Sets the effect level of {node}.
  void SetEffectLevel(Node* node, int effect_level);

476 477
  // Inform the register allocation of the representation of the value produced
  // by {node}.
478 479 480 481 482 483 484 485 486 487 488 489 490
  void MarkAsRepresentation(MachineRepresentation rep, Node* node);
  void MarkAsWord32(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kWord32, node);
  }
  void MarkAsWord64(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kWord64, node);
  }
  void MarkAsFloat32(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kFloat32, node);
  }
  void MarkAsFloat64(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kFloat64, node);
  }
491 492 493
  void MarkAsSimd128(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kSimd128, node);
  }
494 495 496
  void MarkAsReference(Node* node) {
    MarkAsRepresentation(MachineRepresentation::kTagged, node);
  }
497

498 499
  // Inform the register allocation of the representation of the unallocated
  // operand {op}.
500 501
  void MarkAsRepresentation(MachineRepresentation rep,
                            const InstructionOperand& op);
502

503 504 505
  enum CallBufferFlag {
    kCallCodeImmediate = 1u << 0,
    kCallAddressImmediate = 1u << 1,
506 507
    kCallTail = 1u << 2,
    kCallFixedTargetRegister = 1u << 3,
508 509 510
  };
  typedef base::Flags<CallBufferFlag> CallBufferFlags;

511 512 513 514 515 516
  // Initialize the call buffer with the InstructionOperands, nodes, etc,
  // corresponding
  // to the inputs and outputs of the call.
  // {call_code_immediate} to generate immediate operands to calls of code.
  // {call_address_immediate} to generate immediate operands to address calls.
  void InitializeCallBuffer(Node* call, CallBuffer* buffer,
517 518
                            CallBufferFlags flags, bool is_tail_call,
                            int stack_slot_delta = 0);
519
  bool IsTailCallAddressImmediate();
520
  int GetTempsCountForTailCallFromJSFunction();
521

522
  FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
523 524 525 526 527 528 529 530 531 532 533
  size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
                                         Node* state, OperandGenerator* g,
                                         StateObjectDeduplicator* deduplicator,
                                         InstructionOperandVector* inputs,
                                         FrameStateInputKind kind, Zone* zone);
  size_t AddOperandToStateValueDescriptor(StateValueList* values,
                                          InstructionOperandVector* inputs,
                                          OperandGenerator* g,
                                          StateObjectDeduplicator* deduplicator,
                                          Node* input, MachineType type,
                                          FrameStateInputKind kind, Zone* zone);
534

535 536 537 538 539 540 541 542 543 544 545 546 547 548
  // ===========================================================================
  // ============= Architecture-specific graph covering methods. ===============
  // ===========================================================================

  // Visit nodes in the given block and generate code.
  void VisitBlock(BasicBlock* block);

  // Visit the node for the control flow at the end of the block, generating
  // code if necessary.
  void VisitControl(BasicBlock* block);

  // Visit the node and generate code, if any.
  void VisitNode(Node* node);

549
  // Visit the node and generate code for IEEE 754 functions.
550
  void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
551 552
  void VisitFloat64Ieee754Unop(Node*, InstructionCode code);

553 554
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
  MACHINE_OP_LIST(DECLARE_GENERATOR)
555
  MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
556 557
#undef DECLARE_GENERATOR

558
  void VisitFinishRegion(Node* node);
559
  void VisitParameter(Node* node);
560
  void VisitIfException(Node* node);
561
  void VisitOsrValue(Node* node);
562 563
  void VisitPhi(Node* node);
  void VisitProjection(Node* node);
564
  void VisitConstant(Node* node);
565
  void VisitCall(Node* call, BasicBlock* handler = nullptr);
566 567
  void VisitCallWithCallerSavedRegisters(Node* call,
                                         BasicBlock* handler = nullptr);
568 569
  void VisitDeoptimizeIf(Node* node);
  void VisitDeoptimizeUnless(Node* node);
570 571
  void VisitTrapIf(Node* node, Runtime::FunctionId func_id);
  void VisitTrapUnless(Node* node, Runtime::FunctionId func_id);
572
  void VisitTailCall(Node* call);
573 574
  void VisitGoto(BasicBlock* target);
  void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
575
  void VisitSwitch(Node* node, const SwitchInfo& sw);
576
  void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
577
                       VectorSlotPair const& feedback, Node* value);
578
  void VisitReturn(Node* ret);
579
  void VisitThrow(Node* node);
580
  void VisitRetain(Node* node);
581
  void VisitUnreachable(Node* node);
582
  void VisitDeadValue(Node* node);
583

584 585
  void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);

586 587
  void EmitWordPoisonOnSpeculation(Node* node);

588
  void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
589
                            const CallDescriptor* call_descriptor, Node* node);
590
  void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
591
                          const CallDescriptor* call_descriptor, Node* node);
592 593 594

  void EmitIdentity(Node* node);
  bool CanProduceSignalingNaN(Node* node);
595

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
  // ===========================================================================
  // ============= Vector instruction (SIMD) helper fns. =======================
  // ===========================================================================

  // Tries to match a byte shuffle to a scalar splat operation. Returns the
  // index of the lane if successful.
  template <int LANES>
  static bool TryMatchDup(const uint8_t* shuffle, int* index) {
    const int kBytesPerLane = kSimd128Size / LANES;
    // Get the first lane's worth of bytes and check that indices start at a
    // lane boundary and are consecutive.
    uint8_t lane0[kBytesPerLane];
    lane0[0] = shuffle[0];
    if (lane0[0] % kBytesPerLane != 0) return false;
    for (int i = 1; i < kBytesPerLane; ++i) {
      lane0[i] = shuffle[i];
      if (lane0[i] != lane0[0] + i) return false;
    }
    // Now check that the other lanes are identical to lane0.
    for (int i = 1; i < LANES; ++i) {
      for (int j = 0; j < kBytesPerLane; ++j) {
        if (lane0[j] != shuffle[i * kBytesPerLane + j]) return false;
      }
    }
    *index = lane0[0] / kBytesPerLane;
    return true;
  }

624 625 626
  // Tries to match an 8x16 byte shuffle to an equivalent 32x4 shuffle. If
  // successful, it writes the 32x4 shuffle word indices. E.g.
  // [0 1 2 3 8 9 10 11 4 5 6 7 12 13 14 15] == [0 2 1 3]
627 628
  static bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4);

629 630 631 632 633 634 635 636 637
  // Tries to match an 8x16 byte shuffle to an equivalent 16x8 shuffle. If
  // successful, it writes the 16x8 shuffle word indices. E.g.
  // [0 1 8 9 2 3 10 11 4 5 12 13 6 7 14 15] == [0 4 1 5 2 6 3 7]
  static bool TryMatch16x8Shuffle(const uint8_t* shuffle, uint8_t* shuffle16x8);

  // Tries to match a byte shuffle to a concatenate operation, formed by taking
  // 16 bytes from the 32 byte concatenation of the inputs.  If successful, it
  // writes the byte offset. E.g. [4 5 6 7 .. 16 17 18 19] concatenates both
  // source vectors with offset 4.
638 639 640
  static bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask,
                             uint8_t* offset);

641 642 643 644 645
  // Tries to match a byte shuffle to a blend operation, which is a shuffle
  // where no lanes change position. E.g. [0 9 2 11 .. 14 31] interleaves the
  // even lanes of the first source with the odd lanes of the second.
  static bool TryMatchBlend(const uint8_t* shuffle);

646 647 648 649 650
  // Packs 4 bytes of shuffle into a 32 bit immediate, using a mask from
  // CanonicalizeShuffle to convert unary shuffles.
  static int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask);

  // Canonicalize shuffles to make pattern matching simpler. Returns a mask that
651
  // will clear the high bit of indices if shuffle is unary (a swizzle).
652 653
  uint8_t CanonicalizeShuffle(Node* node);

654 655
  // ===========================================================================

656
  Schedule* schedule() const { return schedule_; }
657
  Linkage* linkage() const { return linkage_; }
658 659
  InstructionSequence* sequence() const { return sequence_; }
  Zone* instruction_zone() const { return sequence()->zone(); }
660
  Zone* zone() const { return zone_; }
661

662 663 664 665 666
  void set_instruction_selection_failed() {
    instruction_selection_failed_ = true;
  }
  bool instruction_selection_failed() { return instruction_selection_failed_; }

667
  void MarkPairProjectionsAsWord32(Node* node);
668
  bool IsSourcePositionUsed(Node* node);
669 670 671 672 673
  void VisitWord32AtomicBinaryOperation(Node* node, ArchOpcode int8_op,
                                        ArchOpcode uint8_op,
                                        ArchOpcode int16_op,
                                        ArchOpcode uint16_op,
                                        ArchOpcode word32_op);
674 675 676 677
  void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op,
                                        ArchOpcode uint16_op,
                                        ArchOpcode uint32_op,
                                        ArchOpcode uint64_op);
678

679 680
  // ===========================================================================

681
  Zone* const zone_;
682 683 684
  Linkage* const linkage_;
  InstructionSequence* const sequence_;
  SourcePositionTable* const source_positions_;
685
  SourcePositionMode const source_position_mode_;
686
  Features features_;
687
  Schedule* const schedule_;
688
  BasicBlock* current_block_;
689
  ZoneVector<Instruction*> instructions_;
690 691
  InstructionOperandVector continuation_inputs_;
  InstructionOperandVector continuation_outputs_;
692
  BoolVector defined_;
693
  BoolVector used_;
694
  IntVector effect_level_;
695
  IntVector virtual_registers_;
696
  IntVector virtual_register_rename_;
697
  InstructionScheduler* scheduler_;
698
  EnableScheduling enable_scheduling_;
699
  EnableSerialization enable_serialization_;
700
  EnableSwitchJumpTable enable_switch_jump_table_;
701

702
  PoisoningMitigationLevel poisoning_level_;
703
  Frame* frame_;
704
  bool instruction_selection_failed_;
705 706 707 708 709 710 711
};

}  // namespace compiler
}  // namespace internal
}  // namespace v8

#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_H_