machine-operator.h 39 KB
Newer Older
1 2 3 4 5 6 7
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_COMPILER_MACHINE_OPERATOR_H_
#define V8_COMPILER_MACHINE_OPERATOR_H_

8
#include "src/base/compiler-specific.h"
9
#include "src/base/enum-set.h"
10
#include "src/base/flags.h"
11
#include "src/codegen/atomic-memory-order.h"
12
#include "src/codegen/machine-type.h"
13
#include "src/compiler/globals.h"
14
#include "src/compiler/write-barrier-kind.h"
15
#include "src/zone/zone.h"
16 17 18 19 20

namespace v8 {
namespace internal {
namespace compiler {

21
// Forward declarations.
22
struct MachineOperatorGlobalCache;
23 24
class Operator;

25

26
// For operators that are not supported on all platforms.
27
class OptionalOperator final {
28
 public:
29 30
  OptionalOperator(bool supported, const Operator* op)
      : supported_(supported), op_(op) {}
31

32 33
  bool IsSupported() const { return supported_; }
  // Gets the operator only if it is supported.
34
  const Operator* op() const {
35
    DCHECK(supported_);
36 37
    return op_;
  }
38 39 40
  // Always gets the operator, even for unsupported operators. This is useful to
  // use the operator as a placeholder in a graph, for instance.
  const Operator* placeholder() const { return op_; }
41 42

 private:
43
  bool supported_;
44 45 46
  const Operator* const op_;
};

47

48
// A Load needs a MachineType.
49
using LoadRepresentation = MachineType;
50

51
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
52
    V8_WARN_UNUSED_RESULT;
53

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
// A Word(32|64)AtomicLoad needs both a LoadRepresentation and a memory
// order.
class AtomicLoadParameters final {
 public:
  AtomicLoadParameters(LoadRepresentation representation,
                       AtomicMemoryOrder order)
      : representation_(representation), order_(order) {}

  LoadRepresentation representation() const { return representation_; }
  AtomicMemoryOrder order() const { return order_; }

 private:
  LoadRepresentation representation_;
  AtomicMemoryOrder order_;
};

V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters);
bool operator!=(AtomicLoadParameters, AtomicLoadParameters);

size_t hash_value(AtomicLoadParameters);

V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);

V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*)
    V8_WARN_UNUSED_RESULT;

80
enum class MemoryAccessKind {
81 82 83 84 85
  kNormal,
  kUnaligned,
  kProtected,
};

86
size_t hash_value(MemoryAccessKind);
87

88
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, MemoryAccessKind);
89 90

enum class LoadTransformation {
91 92 93 94 95 96 97 98 99 100
  kS128Load8Splat,
  kS128Load16Splat,
  kS128Load32Splat,
  kS128Load64Splat,
  kS128Load8x8S,
  kS128Load8x8U,
  kS128Load16x4S,
  kS128Load16x4U,
  kS128Load32x2S,
  kS128Load32x2U,
101 102
  kS128Load32Zero,
  kS128Load64Zero,
103 104 105 106 107 108 109
};

size_t hash_value(LoadTransformation);

V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadTransformation);

struct LoadTransformParameters {
110
  MemoryAccessKind kind;
111 112 113 114 115 116 117 118 119 120 121
  LoadTransformation transformation;
};

size_t hash_value(LoadTransformParameters);

V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
                                           LoadTransformParameters);

V8_EXPORT_PRIVATE LoadTransformParameters const& LoadTransformParametersOf(
    Operator const*) V8_WARN_UNUSED_RESULT;

122 123 124 125
V8_EXPORT_PRIVATE bool operator==(LoadTransformParameters,
                                  LoadTransformParameters);
bool operator!=(LoadTransformParameters, LoadTransformParameters);

126
struct LoadLaneParameters {
127
  MemoryAccessKind kind;
128 129 130 131 132 133 134 135 136
  LoadRepresentation rep;
  uint8_t laneidx;
};

V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadLaneParameters);

V8_EXPORT_PRIVATE LoadLaneParameters const& LoadLaneParametersOf(
    Operator const*) V8_WARN_UNUSED_RESULT;

137
// A Store needs a MachineType and a WriteBarrierKind in order to emit the
138 139
// correct write barrier, and needs to state whether it is storing into the
// header word, so that the value can be packed, if necessary.
140
class StoreRepresentation final {
141
 public:
142
  StoreRepresentation(MachineRepresentation representation,
143
                      WriteBarrierKind write_barrier_kind)
144 145
      : representation_(representation),
        write_barrier_kind_(write_barrier_kind) {}
146

147
  MachineRepresentation representation() const { return representation_; }
148
  WriteBarrierKind write_barrier_kind() const { return write_barrier_kind_; }
149

150
 private:
151
  MachineRepresentation representation_;
152
  WriteBarrierKind write_barrier_kind_;
153 154
};

155
V8_EXPORT_PRIVATE bool operator==(StoreRepresentation, StoreRepresentation);
156
bool operator!=(StoreRepresentation, StoreRepresentation);
157

158
size_t hash_value(StoreRepresentation);
159

160
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
161

162
V8_EXPORT_PRIVATE StoreRepresentation const& StoreRepresentationOf(
163
    Operator const*) V8_WARN_UNUSED_RESULT;
164

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
// A Word(32|64)AtomicStore needs both a StoreRepresentation and a memory order.
class AtomicStoreParameters final {
 public:
  AtomicStoreParameters(MachineRepresentation representation,
                        WriteBarrierKind write_barrier_kind,
                        AtomicMemoryOrder order)
      : store_representation_(representation, write_barrier_kind),
        order_(order) {}

  MachineRepresentation representation() const {
    return store_representation_.representation();
  }
  WriteBarrierKind write_barrier_kind() const {
    return store_representation_.write_barrier_kind();
  }
  AtomicMemoryOrder order() const { return order_; }

  StoreRepresentation store_representation() const {
    return store_representation_;
  }

 private:
  StoreRepresentation store_representation_;
  AtomicMemoryOrder order_;
};

V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters);
bool operator!=(AtomicStoreParameters, AtomicStoreParameters);

size_t hash_value(AtomicStoreParameters);

V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
                                           AtomicStoreParameters);

V8_EXPORT_PRIVATE AtomicStoreParameters const& AtomicStoreParametersOf(
    Operator const*) V8_WARN_UNUSED_RESULT;

202
// An UnalignedStore needs a MachineType.
203
using UnalignedStoreRepresentation = MachineRepresentation;
204 205

UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
206
    Operator const*) V8_WARN_UNUSED_RESULT;
207

208
struct StoreLaneParameters {
209
  MemoryAccessKind kind;
210 211 212 213 214 215 216 217 218
  MachineRepresentation rep;
  uint8_t laneidx;
};

V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreLaneParameters);

V8_EXPORT_PRIVATE StoreLaneParameters const& StoreLaneParametersOf(
    Operator const*) V8_WARN_UNUSED_RESULT;

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
class StackSlotRepresentation final {
 public:
  StackSlotRepresentation(int size, int alignment)
      : size_(size), alignment_(alignment) {}

  int size() const { return size_; }
  int alignment() const { return alignment_; }

 private:
  int size_;
  int alignment_;
};

V8_EXPORT_PRIVATE bool operator==(StackSlotRepresentation,
                                  StackSlotRepresentation);
bool operator!=(StackSlotRepresentation, StackSlotRepresentation);

size_t hash_value(StackSlotRepresentation);

V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
                                           StackSlotRepresentation);

241
V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
242
    Operator const* op) V8_WARN_UNUSED_RESULT;
243

244
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
245

246
class S128ImmediateParameter {
247
 public:
248 249
  explicit S128ImmediateParameter(const uint8_t immediate[16]) {
    std::copy(immediate, immediate + 16, immediate_.begin());
250
  }
251
  S128ImmediateParameter() = default;
252 253 254
  const std::array<uint8_t, 16>& immediate() const { return immediate_; }
  const uint8_t* data() const { return immediate_.data(); }
  uint8_t operator[](int x) const { return immediate_[x]; }
255 256

 private:
257
  std::array<uint8_t, 16> immediate_;
258 259
};

260 261 262 263
V8_EXPORT_PRIVATE bool operator==(S128ImmediateParameter const& lhs,
                                  S128ImmediateParameter const& rhs);
bool operator!=(S128ImmediateParameter const& lhs,
                S128ImmediateParameter const& rhs);
264

265
size_t hash_value(S128ImmediateParameter const& p);
266 267

V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
268
                                           S128ImmediateParameter const&);
269

270
V8_EXPORT_PRIVATE S128ImmediateParameter const& S128ImmediateParameterOf(
271
    Operator const* op) V8_WARN_UNUSED_RESULT;
272

273 274
StackCheckKind StackCheckKindOf(Operator const* op) V8_WARN_UNUSED_RESULT;

275 276 277 278 279 280 281 282 283 284
// ShiftKind::kShiftOutZeros means that it is guaranteed that the bits shifted
// out of the left operand are all zeros. If this is not the case, undefined
// behavior (i.e., incorrect optimizations) will happen.
// This is mostly useful for Smi untagging.
enum class ShiftKind { kNormal, kShiftOutZeros };

size_t hash_value(ShiftKind);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ShiftKind);
ShiftKind ShiftKindOf(Operator const*) V8_WARN_UNUSED_RESULT;

285 286 287 288 289
// TruncateKind::kSetOverflowToMin sets the result of a saturating float-to-int
// conversion to INT_MIN if the conversion returns INT_MAX due to overflow. This
// makes it easier to detect an overflow. This parameter is ignored on platforms
// like x64 and ia32 where a range overflow does not result in INT_MAX.
enum class TruncateKind { kArchitectureDefault, kSetOverflowToMin };
290 291
std::ostream& operator<<(std::ostream& os, TruncateKind kind);
size_t hash_value(TruncateKind kind);
292

293 294 295
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
296 297
class V8_EXPORT_PRIVATE MachineOperatorBuilder final
    : public NON_EXPORTED_BASE(ZoneObject) {
298
 public:
299 300
  // Flags that specify which operations are available. This is useful
  // for operations that are unsupported by some back-ends.
301
  enum Flag : unsigned {
302
    kNoFlags = 0u,
303 304 305 306 307 308 309 310 311 312 313 314 315 316
    kFloat32RoundDown = 1u << 0,
    kFloat64RoundDown = 1u << 1,
    kFloat32RoundUp = 1u << 2,
    kFloat64RoundUp = 1u << 3,
    kFloat32RoundTruncate = 1u << 4,
    kFloat64RoundTruncate = 1u << 5,
    kFloat32RoundTiesEven = 1u << 6,
    kFloat64RoundTiesEven = 1u << 7,
    kFloat64RoundTiesAway = 1u << 8,
    kInt32DivIsSafe = 1u << 9,
    kUint32DivIsSafe = 1u << 10,
    kWord32ShiftIsSafe = 1u << 11,
    kWord32Ctz = 1u << 12,
    kWord64Ctz = 1u << 13,
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
    kWord64CtzLowerable = 1u << 14,
    kWord32Popcnt = 1u << 15,
    kWord64Popcnt = 1u << 16,
    kWord32ReverseBits = 1u << 17,
    kWord64ReverseBits = 1u << 18,
    kFloat32Select = 1u << 19,
    kFloat64Select = 1u << 20,
    kInt32AbsWithOverflow = 1u << 21,
    kInt64AbsWithOverflow = 1u << 22,
    kWord32Rol = 1u << 23,
    kWord64Rol = 1u << 24,
    kWord64RolLowerable = 1u << 25,
    kSatConversionIsSafe = 1u << 26,
    kWord32Select = 1u << 27,
    kWord64Select = 1u << 28,
332 333 334 335
    kAllOptionalOps =
        kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
        kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
        kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
336 337 338 339
        kWord32Ctz | kWord64Ctz | kWord64CtzLowerable | kWord32Popcnt |
        kWord64Popcnt | kWord32ReverseBits | kWord64ReverseBits |
        kInt32AbsWithOverflow | kInt64AbsWithOverflow | kWord32Rol |
        kWord64Rol | kWord64RolLowerable | kSatConversionIsSafe |
340
        kFloat32Select | kFloat64Select | kWord32Select | kWord64Select
341
  };
342
  using Flags = base::Flags<Flag, unsigned>;
343

344 345 346 347
  class AlignmentRequirements {
   public:
    enum UnalignedAccessSupport { kNoSupport, kSomeSupport, kFullSupport };

348 349
    bool IsUnalignedLoadSupported(MachineRepresentation rep) const {
      return IsUnalignedSupported(unalignedLoadUnsupportedTypes_, rep);
350 351
    }

352 353
    bool IsUnalignedStoreSupported(MachineRepresentation rep) const {
      return IsUnalignedSupported(unalignedStoreUnsupportedTypes_, rep);
354 355 356 357 358 359 360 361
    }

    static AlignmentRequirements FullUnalignedAccessSupport() {
      return AlignmentRequirements(kFullSupport);
    }
    static AlignmentRequirements NoUnalignedAccessSupport() {
      return AlignmentRequirements(kNoSupport);
    }
362
    static AlignmentRequirements SomeUnalignedAccessUnsupported(
363 364
        base::EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes,
        base::EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes) {
365 366
      return AlignmentRequirements(kSomeSupport, unalignedLoadUnsupportedTypes,
                                   unalignedStoreUnsupportedTypes);
367 368 369 370 371
    }

   private:
    explicit AlignmentRequirements(
        AlignmentRequirements::UnalignedAccessSupport unalignedAccessSupport,
372 373 374 375
        base::EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes =
            base::EnumSet<MachineRepresentation>(),
        base::EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes =
            base::EnumSet<MachineRepresentation>())
376
        : unalignedSupport_(unalignedAccessSupport),
377 378
          unalignedLoadUnsupportedTypes_(unalignedLoadUnsupportedTypes),
          unalignedStoreUnsupportedTypes_(unalignedStoreUnsupportedTypes) {}
379

380
    bool IsUnalignedSupported(base::EnumSet<MachineRepresentation> unsupported,
381
                              MachineRepresentation rep) const {
382
      // All accesses of bytes in memory are aligned.
383 384 385 386 387 388 389
      DCHECK_NE(MachineRepresentation::kWord8, rep);
      switch (unalignedSupport_) {
        case kFullSupport:
          return true;
        case kNoSupport:
          return false;
        case kSomeSupport:
390
          return !unsupported.contains(rep);
391
      }
392
      UNREACHABLE();
393 394 395
    }

    const AlignmentRequirements::UnalignedAccessSupport unalignedSupport_;
396 397
    const base::EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes_;
    const base::EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes_;
398 399
  };

400 401 402
  explicit MachineOperatorBuilder(
      Zone* zone,
      MachineRepresentation word = MachineType::PointerRepresentation(),
403 404
      Flags supportedOperators = kNoFlags,
      AlignmentRequirements alignmentRequirements =
405
          AlignmentRequirements::FullUnalignedAccessSupport());
406

407 408 409
  MachineOperatorBuilder(const MachineOperatorBuilder&) = delete;
  MachineOperatorBuilder& operator=(const MachineOperatorBuilder&) = delete;

410
  const Operator* Comment(const char* msg);
411
  const Operator* AbortCSADcheck();
412 413
  const Operator* DebugBreak();

414 415 416 417 418
  const Operator* Word32And();
  const Operator* Word32Or();
  const Operator* Word32Xor();
  const Operator* Word32Shl();
  const Operator* Word32Shr();
419 420 421 422 423
  const Operator* Word32Sar(ShiftKind kind);
  const Operator* Word32Sar() { return Word32Sar(ShiftKind::kNormal); }
  const Operator* Word32SarShiftOutZeros() {
    return Word32Sar(ShiftKind::kShiftOutZeros);
  }
424
  const OptionalOperator Word32Rol();
425 426
  const Operator* Word32Ror();
  const Operator* Word32Equal();
427
  const Operator* Word32Clz();
428
  const OptionalOperator Word32Ctz();
429
  const OptionalOperator Word32Popcnt();
430
  const OptionalOperator Word64Popcnt();
431 432
  const OptionalOperator Word32ReverseBits();
  const OptionalOperator Word64ReverseBits();
433 434
  const Operator* Word32ReverseBytes();
  const Operator* Word64ReverseBytes();
435
  const Operator* Simd128ReverseBytes();
436 437
  const OptionalOperator Int32AbsWithOverflow();
  const OptionalOperator Int64AbsWithOverflow();
438 439 440 441

  // Return true if the target's Word32 shift implementation is directly
  // compatible with JavaScript's specification. Otherwise, we have to manually
  // generate a mask with 0x1f on the amount ahead of generating the shift.
442
  bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
443

444 445 446 447 448
  // Return true if the target's implementation of float-to-int-conversions is a
  // saturating conversion rounding towards 0. Otherwise, we have to manually
  // generate the correct value if a saturating conversion is requested.
  bool SatConversionIsSafe() const { return flags_ & kSatConversionIsSafe; }

449 450 451 452 453
  const Operator* Word64And();
  const Operator* Word64Or();
  const Operator* Word64Xor();
  const Operator* Word64Shl();
  const Operator* Word64Shr();
454 455 456 457 458
  const Operator* Word64Sar(ShiftKind kind);
  const Operator* Word64Sar() { return Word64Sar(ShiftKind::kNormal); }
  const Operator* Word64SarShiftOutZeros() {
    return Word64Sar(ShiftKind::kShiftOutZeros);
  }
459 460 461 462 463 464

  // 64-bit rol, ror, clz and ctz operators have two versions: the non-suffixed
  // ones are meant to be used in 64-bit systems and have no control input. The
  // "Lowerable"-suffixed ones are meant to be temporary operators in 32-bit
  // systems and will be lowered to 32-bit operators. They have a control input
  // to enable the lowering.
465
  const OptionalOperator Word64Rol();
466
  const Operator* Word64Ror();
467
  const Operator* Word64Clz();
468
  const OptionalOperator Word64Ctz();
469 470 471 472 473
  const OptionalOperator Word64RolLowerable();
  const Operator* Word64RorLowerable();
  const Operator* Word64ClzLowerable();
  const OptionalOperator Word64CtzLowerable();

474 475
  const Operator* Word64Equal();

476
  const Operator* Int32PairAdd();
477
  const Operator* Int32PairSub();
478
  const Operator* Int32PairMul();
479
  const Operator* Word32PairShl();
480 481
  const Operator* Word32PairShr();
  const Operator* Word32PairSar();
482

483 484 485 486 487
  const Operator* Int32Add();
  const Operator* Int32AddWithOverflow();
  const Operator* Int32Sub();
  const Operator* Int32SubWithOverflow();
  const Operator* Int32Mul();
488
  const Operator* Int32MulWithOverflow();
489
  const Operator* Int32MulHigh();
490 491 492 493
  const Operator* Int32Div();
  const Operator* Int32Mod();
  const Operator* Int32LessThan();
  const Operator* Int32LessThanOrEqual();
494
  const Operator* Uint32Div();
495 496
  const Operator* Uint32LessThan();
  const Operator* Uint32LessThanOrEqual();
497
  const Operator* Uint32Mod();
498
  const Operator* Uint32MulHigh();
499 500
  bool Int32DivIsSafe() const { return flags_ & kInt32DivIsSafe; }
  bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
501 502

  const Operator* Int64Add();
503
  const Operator* Int64AddWithOverflow();
504
  const Operator* Int64Sub();
505
  const Operator* Int64SubWithOverflow();
506 507 508 509 510
  const Operator* Int64Mul();
  const Operator* Int64Div();
  const Operator* Int64Mod();
  const Operator* Int64LessThan();
  const Operator* Int64LessThanOrEqual();
511
  const Operator* Uint64Div();
512
  const Operator* Uint64LessThan();
513
  const Operator* Uint64LessThanOrEqual();
514
  const Operator* Uint64Mod();
515

516
  // This operator reinterprets the bits of a tagged pointer as a word.
517 518
  const Operator* BitcastTaggedToWord();

519 520 521 522 523 524 525
  // This operator reinterprets the bits of a tagged value as a word preserving
  // non-pointer bits (all the bits that are not modified by GC):
  // 1) smi tag
  // 2) weak tag
  // 3) smi payload if the tagged value is a smi.
  // Note, that it's illegal to "look" at the pointer bits of non-smi values.
  const Operator* BitcastTaggedToWordForTagAndSmiBits();
526

527 528 529 530
  // This operator reinterprets the bits of a tagged MaybeObject pointer as
  // word.
  const Operator* BitcastMaybeObjectToWord();

531 532 533
  // This operator reinterprets the bits of a word as tagged pointer.
  const Operator* BitcastWordToTagged();

534 535 536
  // This operator reinterprets the bits of a word as a Smi.
  const Operator* BitcastWordToTaggedSigned();

537 538 539
  // JavaScript float64 to int32/uint32 truncation.
  const Operator* TruncateFloat64ToWord32();

540 541 542 543 544
  // These operators change the representation of numbers while preserving the
  // value of the number. Narrowing operators assume the input is representable
  // in the target type and are *not* defined for other inputs.
  // Use narrowing change operators only when there is a static guarantee that
  // the input value is representable in the target value.
545 546 547 548
  //
  // Some operators can have the behaviour on overflow change through specifying
  // TruncateKind. The exact semantics are documented in the tests in
  // test/cctest/compiler/test-run-machops.cc .
549 550
  const Operator* ChangeFloat32ToFloat64();
  const Operator* ChangeFloat64ToInt32();   // narrowing
551
  const Operator* ChangeFloat64ToInt64();
552
  const Operator* ChangeFloat64ToUint32();  // narrowing
553
  const Operator* ChangeFloat64ToUint64();
554
  const Operator* TruncateFloat64ToInt64(TruncateKind kind);
555
  const Operator* TruncateFloat64ToUint32();
556 557
  const Operator* TruncateFloat32ToInt32(TruncateKind kind);
  const Operator* TruncateFloat32ToUint32(TruncateKind kind);
558
  const Operator* TryTruncateFloat32ToInt64();
559
  const Operator* TryTruncateFloat64ToInt64();
560
  const Operator* TryTruncateFloat32ToUint64();
561
  const Operator* TryTruncateFloat64ToUint64();
562
  const Operator* ChangeInt32ToFloat64();
563
  const Operator* BitcastWord32ToWord64();
564
  const Operator* ChangeInt32ToInt64();
565
  const Operator* ChangeInt64ToFloat64();
566
  const Operator* ChangeUint32ToFloat64();
567
  const Operator* ChangeUint32ToUint64();
568

569 570
  // These operators truncate or round numbers, both changing the representation
  // of the number and mapping multiple input values onto the same output value.
571
  const Operator* TruncateFloat64ToFloat32();
572
  const Operator* TruncateInt64ToInt32();
573
  const Operator* RoundFloat64ToInt32();
574
  const Operator* RoundInt32ToFloat32();
575
  const Operator* RoundInt64ToFloat32();
576
  const Operator* RoundInt64ToFloat64();
577
  const Operator* RoundUint32ToFloat32();
578
  const Operator* RoundUint64ToFloat32();
579
  const Operator* RoundUint64ToFloat64();
580

581 582 583 584 585 586 587
  // These operators reinterpret the bits of a floating point number as an
  // integer and vice versa.
  const Operator* BitcastFloat32ToInt32();
  const Operator* BitcastFloat64ToInt64();
  const Operator* BitcastInt32ToFloat32();
  const Operator* BitcastInt64ToFloat64();

588 589 590 591 592 593 594
  // These operators sign-extend to Int32/Int64
  const Operator* SignExtendWord8ToInt32();
  const Operator* SignExtendWord16ToInt32();
  const Operator* SignExtendWord8ToInt64();
  const Operator* SignExtendWord16ToInt64();
  const Operator* SignExtendWord32ToInt64();

595 596 597 598 599 600 601 602 603 604
  // Floating point operators always operate with IEEE 754 round-to-nearest
  // (single-precision).
  const Operator* Float32Add();
  const Operator* Float32Sub();
  const Operator* Float32Mul();
  const Operator* Float32Div();
  const Operator* Float32Sqrt();

  // Floating point operators always operate with IEEE 754 round-to-nearest
  // (double-precision).
605 606 607 608 609
  const Operator* Float64Add();
  const Operator* Float64Sub();
  const Operator* Float64Mul();
  const Operator* Float64Div();
  const Operator* Float64Mod();
610
  const Operator* Float64Sqrt();
611

612 613 614 615 616 617
  // Floating point comparisons complying to IEEE 754 (single-precision).
  const Operator* Float32Equal();
  const Operator* Float32LessThan();
  const Operator* Float32LessThanOrEqual();

  // Floating point comparisons complying to IEEE 754 (double-precision).
618 619 620
  const Operator* Float64Equal();
  const Operator* Float64LessThan();
  const Operator* Float64LessThanOrEqual();
621

622 623 624
  // Floating point min/max complying to EcmaScript 6 (double-precision).
  const Operator* Float64Max();
  const Operator* Float64Min();
625 626 627
  // Floating point min/max complying to WebAssembly (single-precision).
  const Operator* Float32Max();
  const Operator* Float32Min();
628

629 630 631 632 633 634
  // Floating point abs complying to IEEE 754 (single-precision).
  const Operator* Float32Abs();

  // Floating point abs complying to IEEE 754 (double-precision).
  const Operator* Float64Abs();

635
  // Floating point rounding.
636
  const OptionalOperator Float32RoundDown();
637
  const OptionalOperator Float64RoundDown();
638
  const OptionalOperator Float32RoundUp();
639
  const OptionalOperator Float64RoundUp();
640
  const OptionalOperator Float32RoundTruncate();
641 642
  const OptionalOperator Float64RoundTruncate();
  const OptionalOperator Float64RoundTiesAway();
643
  const OptionalOperator Float32RoundTiesEven();
644
  const OptionalOperator Float64RoundTiesEven();
645

646 647 648 649 650
  // Conditional selects. Input 1 is the condition, Input 2 is the result value
  // if the condition is {true}, Input 3 is the result value if the condition is
  // false.
  const OptionalOperator Word32Select();
  const OptionalOperator Word64Select();
651 652 653
  const OptionalOperator Float32Select();
  const OptionalOperator Float64Select();

654
  // Floating point neg.
655 656
  const Operator* Float32Neg();
  const Operator* Float64Neg();
657

658
  // Floating point trigonometric functions (double-precision).
659 660 661 662
  const Operator* Float64Acos();
  const Operator* Float64Acosh();
  const Operator* Float64Asin();
  const Operator* Float64Asinh();
663 664
  const Operator* Float64Atan();
  const Operator* Float64Atan2();
665
  const Operator* Float64Atanh();
666
  const Operator* Float64Cos();
667
  const Operator* Float64Cosh();
668
  const Operator* Float64Sin();
669
  const Operator* Float64Sinh();
670
  const Operator* Float64Tan();
671
  const Operator* Float64Tanh();
672 673

  // Floating point exponential functions (double-precision).
674
  const Operator* Float64Exp();
675 676
  const Operator* Float64Expm1();
  const Operator* Float64Pow();
677

678 679
  // Floating point logarithm (double-precision).
  const Operator* Float64Log();
680
  const Operator* Float64Log1p();
681 682
  const Operator* Float64Log2();
  const Operator* Float64Log10();
683

684
  // Floating point cube root (double-precision).
685 686
  const Operator* Float64Cbrt();

687 688 689 690 691 692
  // Floating point bit representation.
  const Operator* Float64ExtractLowWord32();
  const Operator* Float64ExtractHighWord32();
  const Operator* Float64InsertLowWord32();
  const Operator* Float64InsertHighWord32();

693 694 695 696
  // Change signalling NaN to quiet NaN.
  // Identity for any input that is not signalling NaN.
  const Operator* Float64SilenceNaN();

697
  // SIMD operators.
698
  const Operator* F64x2Splat();
699
  const Operator* F64x2Abs();
700
  const Operator* F64x2Neg();
701
  const Operator* F64x2Sqrt();
702 703 704
  const Operator* F64x2Add();
  const Operator* F64x2Sub();
  const Operator* F64x2Mul();
Ng Zhi An's avatar
Ng Zhi An committed
705
  const Operator* F64x2Div();
706
  const Operator* F64x2ExtractLane(int32_t);
707 708
  const Operator* F64x2Min();
  const Operator* F64x2Max();
709
  const Operator* F64x2ReplaceLane(int32_t);
710 711
  const Operator* F64x2Eq();
  const Operator* F64x2Ne();
712 713
  const Operator* F64x2Lt();
  const Operator* F64x2Le();
714 715
  const Operator* F64x2Qfma();
  const Operator* F64x2Qfms();
716 717
  const Operator* F64x2Pmin();
  const Operator* F64x2Pmax();
718 719 720 721
  const Operator* F64x2Ceil();
  const Operator* F64x2Floor();
  const Operator* F64x2Trunc();
  const Operator* F64x2NearestInt();
722 723 724
  const Operator* F64x2ConvertLowI32x4S();
  const Operator* F64x2ConvertLowI32x4U();
  const Operator* F64x2PromoteLowF32x4();
725

726 727 728 729 730 731 732
  const Operator* F32x4Splat();
  const Operator* F32x4ExtractLane(int32_t);
  const Operator* F32x4ReplaceLane(int32_t);
  const Operator* F32x4SConvertI32x4();
  const Operator* F32x4UConvertI32x4();
  const Operator* F32x4Abs();
  const Operator* F32x4Neg();
733
  const Operator* F32x4Sqrt();
734 735 736 737 738 739 740 741 742 743
  const Operator* F32x4Add();
  const Operator* F32x4Sub();
  const Operator* F32x4Mul();
  const Operator* F32x4Div();
  const Operator* F32x4Min();
  const Operator* F32x4Max();
  const Operator* F32x4Eq();
  const Operator* F32x4Ne();
  const Operator* F32x4Lt();
  const Operator* F32x4Le();
744 745
  const Operator* F32x4Qfma();
  const Operator* F32x4Qfms();
746 747
  const Operator* F32x4Pmin();
  const Operator* F32x4Pmax();
748 749 750 751
  const Operator* F32x4Ceil();
  const Operator* F32x4Floor();
  const Operator* F32x4Trunc();
  const Operator* F32x4NearestInt();
752
  const Operator* F32x4DemoteF64x2Zero();
753

754
  const Operator* I64x2Splat();
755
  const Operator* I64x2SplatI32Pair();
756 757
  const Operator* I64x2ExtractLane(int32_t);
  const Operator* I64x2ReplaceLane(int32_t);
758
  const Operator* I64x2ReplaceLaneI32Pair(int32_t);
759
  const Operator* I64x2Abs();
760
  const Operator* I64x2Neg();
761 762 763 764
  const Operator* I64x2SConvertI32x4Low();
  const Operator* I64x2SConvertI32x4High();
  const Operator* I64x2UConvertI32x4Low();
  const Operator* I64x2UConvertI32x4High();
765
  const Operator* I64x2BitMask();
766 767
  const Operator* I64x2Shl();
  const Operator* I64x2ShrS();
768 769
  const Operator* I64x2Add();
  const Operator* I64x2Sub();
770
  const Operator* I64x2Mul();
771
  const Operator* I64x2Eq();
772
  const Operator* I64x2Ne();
773 774
  const Operator* I64x2GtS();
  const Operator* I64x2GeS();
775
  const Operator* I64x2ShrU();
776 777 778 779
  const Operator* I64x2ExtMulLowI32x4S();
  const Operator* I64x2ExtMulHighI32x4S();
  const Operator* I64x2ExtMulLowI32x4U();
  const Operator* I64x2ExtMulHighI32x4U();
780

781 782 783 784
  const Operator* I32x4Splat();
  const Operator* I32x4ExtractLane(int32_t);
  const Operator* I32x4ReplaceLane(int32_t);
  const Operator* I32x4SConvertF32x4();
785 786
  const Operator* I32x4SConvertI16x8Low();
  const Operator* I32x4SConvertI16x8High();
787
  const Operator* I32x4Neg();
788 789
  const Operator* I32x4Shl();
  const Operator* I32x4ShrS();
790 791 792 793 794 795 796
  const Operator* I32x4Add();
  const Operator* I32x4Sub();
  const Operator* I32x4Mul();
  const Operator* I32x4MinS();
  const Operator* I32x4MaxS();
  const Operator* I32x4Eq();
  const Operator* I32x4Ne();
797 798
  const Operator* I32x4GtS();
  const Operator* I32x4GeS();
799 800

  const Operator* I32x4UConvertF32x4();
801 802
  const Operator* I32x4UConvertI16x8Low();
  const Operator* I32x4UConvertI16x8High();
803
  const Operator* I32x4ShrU();
804 805
  const Operator* I32x4MinU();
  const Operator* I32x4MaxU();
806 807
  const Operator* I32x4GtU();
  const Operator* I32x4GeU();
808
  const Operator* I32x4Abs();
809
  const Operator* I32x4BitMask();
810
  const Operator* I32x4DotI16x8S();
811 812 813 814
  const Operator* I32x4ExtMulLowI16x8S();
  const Operator* I32x4ExtMulHighI16x8S();
  const Operator* I32x4ExtMulLowI16x8U();
  const Operator* I32x4ExtMulHighI16x8U();
815 816
  const Operator* I32x4ExtAddPairwiseI16x8S();
  const Operator* I32x4ExtAddPairwiseI16x8U();
817 818
  const Operator* I32x4TruncSatF64x2SZero();
  const Operator* I32x4TruncSatF64x2UZero();
819 820

  const Operator* I16x8Splat();
821 822
  const Operator* I16x8ExtractLaneU(int32_t);
  const Operator* I16x8ExtractLaneS(int32_t);
823
  const Operator* I16x8ReplaceLane(int32_t);
824 825
  const Operator* I16x8SConvertI8x16Low();
  const Operator* I16x8SConvertI8x16High();
826
  const Operator* I16x8Neg();
827 828
  const Operator* I16x8Shl();
  const Operator* I16x8ShrS();
829
  const Operator* I16x8SConvertI32x4();
830
  const Operator* I16x8Add();
831
  const Operator* I16x8AddSatS();
832
  const Operator* I16x8Sub();
833
  const Operator* I16x8SubSatS();
834 835 836 837 838
  const Operator* I16x8Mul();
  const Operator* I16x8MinS();
  const Operator* I16x8MaxS();
  const Operator* I16x8Eq();
  const Operator* I16x8Ne();
839 840
  const Operator* I16x8GtS();
  const Operator* I16x8GeS();
841

842 843
  const Operator* I16x8UConvertI8x16Low();
  const Operator* I16x8UConvertI8x16High();
844
  const Operator* I16x8ShrU();
845
  const Operator* I16x8UConvertI32x4();
846 847
  const Operator* I16x8AddSatU();
  const Operator* I16x8SubSatU();
848 849
  const Operator* I16x8MinU();
  const Operator* I16x8MaxU();
850 851
  const Operator* I16x8GtU();
  const Operator* I16x8GeU();
852
  const Operator* I16x8RoundingAverageU();
853
  const Operator* I16x8Q15MulRSatS();
854
  const Operator* I16x8Abs();
855
  const Operator* I16x8BitMask();
856 857 858 859
  const Operator* I16x8ExtMulLowI8x16S();
  const Operator* I16x8ExtMulHighI8x16S();
  const Operator* I16x8ExtMulLowI8x16U();
  const Operator* I16x8ExtMulHighI8x16U();
860 861
  const Operator* I16x8ExtAddPairwiseI8x16S();
  const Operator* I16x8ExtAddPairwiseI8x16U();
862 863

  const Operator* I8x16Splat();
864 865
  const Operator* I8x16ExtractLaneU(int32_t);
  const Operator* I8x16ExtractLaneS(int32_t);
866 867
  const Operator* I8x16ReplaceLane(int32_t);
  const Operator* I8x16Neg();
868 869
  const Operator* I8x16Shl();
  const Operator* I8x16ShrS();
870
  const Operator* I8x16SConvertI16x8();
871
  const Operator* I8x16Add();
872
  const Operator* I8x16AddSatS();
873
  const Operator* I8x16Sub();
874
  const Operator* I8x16SubSatS();
875 876 877 878
  const Operator* I8x16MinS();
  const Operator* I8x16MaxS();
  const Operator* I8x16Eq();
  const Operator* I8x16Ne();
879 880
  const Operator* I8x16GtS();
  const Operator* I8x16GeS();
881

882
  const Operator* I8x16ShrU();
883
  const Operator* I8x16UConvertI16x8();
884 885
  const Operator* I8x16AddSatU();
  const Operator* I8x16SubSatU();
886 887
  const Operator* I8x16MinU();
  const Operator* I8x16MaxU();
888 889
  const Operator* I8x16GtU();
  const Operator* I8x16GeU();
890
  const Operator* I8x16RoundingAverageU();
891
  const Operator* I8x16Popcnt();
892
  const Operator* I8x16Abs();
893
  const Operator* I8x16BitMask();
894

895
  const Operator* S128Const(const uint8_t value[16]);
896 897 898 899 900 901

  const Operator* S128Zero();
  const Operator* S128And();
  const Operator* S128Or();
  const Operator* S128Xor();
  const Operator* S128Not();
902
  const Operator* S128Select();
903
  const Operator* S128AndNot();
904

905
  const Operator* I8x16Swizzle(bool relaxed = false);
906
  const Operator* I8x16Shuffle(const uint8_t shuffle[16]);
907

908
  const Operator* V128AnyTrue();
909 910 911 912
  const Operator* I64x2AllTrue();
  const Operator* I32x4AllTrue();
  const Operator* I16x8AllTrue();
  const Operator* I8x16AllTrue();
913

914 915 916 917 918
  // Relaxed SIMD operators.
  const Operator* I8x16RelaxedLaneSelect();
  const Operator* I16x8RelaxedLaneSelect();
  const Operator* I32x4RelaxedLaneSelect();
  const Operator* I64x2RelaxedLaneSelect();
919 920 921 922
  const Operator* F32x4RelaxedMin();
  const Operator* F32x4RelaxedMax();
  const Operator* F64x2RelaxedMin();
  const Operator* F64x2RelaxedMax();
923 924 925 926
  const Operator* I32x4RelaxedTruncF32x4S();
  const Operator* I32x4RelaxedTruncF32x4U();
  const Operator* I32x4RelaxedTruncF64x2SZero();
  const Operator* I32x4RelaxedTruncF64x2UZero();
927
  const Operator* I16x8RelaxedQ15MulRS();
928

929
  // load [base + index]
930
  const Operator* Load(LoadRepresentation rep);
931
  const Operator* LoadImmutable(LoadRepresentation rep);
932
  const Operator* ProtectedLoad(LoadRepresentation rep);
933

934 935
  const Operator* LoadTransform(MemoryAccessKind kind,
                                LoadTransformation transform);
936

937
  // SIMD load: replace a specified lane with [base + index].
938
  const Operator* LoadLane(MemoryAccessKind kind, LoadRepresentation rep,
939 940
                           uint8_t laneidx);

941
  // store [base + index], value
942
  const Operator* Store(StoreRepresentation rep);
943
  const Operator* ProtectedStore(MachineRepresentation rep);
944

945
  // SIMD store: store a specified lane of value into [base + index].
946
  const Operator* StoreLane(MemoryAccessKind kind, MachineRepresentation rep,
947 948
                            uint8_t laneidx);

949
  // unaligned load [base + index]
950
  const Operator* UnalignedLoad(LoadRepresentation rep);
951 952 953 954

  // unaligned store [base + index], value
  const Operator* UnalignedStore(UnalignedStoreRepresentation rep);

955 956
  const Operator* StackSlot(int size, int alignment = 0);
  const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
957

958
  // Access to the machine stack.
959
  const Operator* LoadFramePointer();
960
  const Operator* LoadParentFramePointer();
961

962 963 964 965 966 967 968 969
  // Compares: stack_pointer [- offset] > value. The offset is optionally
  // applied for kFunctionEntry stack checks.
  const Operator* StackPointerGreaterThan(StackCheckKind kind);

  // Loads the offset that should be applied to the current stack
  // pointer before a stack check. Used as input to the
  // Runtime::kStackGuardWithGap call.
  const Operator* LoadStackCheckOffset();
970

971 972 973
  // Memory barrier.
  const Operator* MemBarrier();

974
  // atomic-load [base + index]
975
  const Operator* Word32AtomicLoad(AtomicLoadParameters params);
976
  // atomic-load [base + index]
977
  const Operator* Word64AtomicLoad(AtomicLoadParameters params);
978
  // atomic-store [base + index], value
979
  const Operator* Word32AtomicStore(AtomicStoreParameters params);
980
  // atomic-store [base + index], value
981
  const Operator* Word64AtomicStore(AtomicStoreParameters params);
982
  // atomic-exchange [base + index], value
983
  const Operator* Word32AtomicExchange(MachineType type);
984
  // atomic-exchange [base + index], value
985
  const Operator* Word64AtomicExchange(MachineType type);
986
  // atomic-compare-exchange [base + index], old_value, new_value
987
  const Operator* Word32AtomicCompareExchange(MachineType type);
988
  // atomic-compare-exchange [base + index], old_value, new_value
989
  const Operator* Word64AtomicCompareExchange(MachineType type);
990
  // atomic-add [base + index], value
991
  const Operator* Word32AtomicAdd(MachineType type);
992
  // atomic-sub [base + index], value
993
  const Operator* Word32AtomicSub(MachineType type);
994
  // atomic-and [base + index], value
995
  const Operator* Word32AtomicAnd(MachineType type);
996
  // atomic-or [base + index], value
997
  const Operator* Word32AtomicOr(MachineType type);
998
  // atomic-xor [base + index], value
999
  const Operator* Word32AtomicXor(MachineType type);
1000
  // atomic-add [base + index], value
1001
  const Operator* Word64AtomicAdd(MachineType type);
1002
  // atomic-sub [base + index], value
1003
  const Operator* Word64AtomicSub(MachineType type);
1004
  // atomic-and [base + index], value
1005
  const Operator* Word64AtomicAnd(MachineType type);
1006
  // atomic-or [base + index], value
1007
  const Operator* Word64AtomicOr(MachineType type);
1008
  // atomic-xor [base + index], value
1009
  const Operator* Word64AtomicXor(MachineType type);
1010
  // atomic-pair-load [base + index]
1011
  const Operator* Word32AtomicPairLoad(AtomicMemoryOrder order);
1012
  // atomic-pair-sub [base + index], value_high, value-low
1013
  const Operator* Word32AtomicPairStore(AtomicMemoryOrder order);
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
  // atomic-pair-add [base + index], value_high, value_low
  const Operator* Word32AtomicPairAdd();
  // atomic-pair-sub [base + index], value_high, value-low
  const Operator* Word32AtomicPairSub();
  // atomic-pair-and [base + index], value_high, value_low
  const Operator* Word32AtomicPairAnd();
  // atomic-pair-or [base + index], value_high, value_low
  const Operator* Word32AtomicPairOr();
  // atomic-pair-xor [base + index], value_high, value_low
  const Operator* Word32AtomicPairXor();
1024 1025 1026 1027 1028
  // atomic-pair-exchange [base + index], value_high, value_low
  const Operator* Word32AtomicPairExchange();
  // atomic-pair-compare-exchange [base + index], old_value_high, old_value_low,
  // new_value_high, new_value_low
  const Operator* Word32AtomicPairCompareExchange();
1029

1030
  // Target machine word-size assumed by this builder.
1031 1032 1033
  bool Is32() const { return word() == MachineRepresentation::kWord32; }
  bool Is64() const { return word() == MachineRepresentation::kWord64; }
  MachineRepresentation word() const { return word_; }
1034

1035 1036
  bool UnalignedLoadSupported(MachineRepresentation rep) {
    return alignment_requirements_.IsUnalignedLoadSupported(rep);
1037 1038
  }

1039 1040
  bool UnalignedStoreSupported(MachineRepresentation rep) {
    return alignment_requirements_.IsUnalignedStoreSupported(rep);
1041 1042
  }

1043 1044
// Pseudo operators that translate to 32/64-bit operators depending on the
// word-size of the target machine assumed by this builder.
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
#define PSEUDO_OP_LIST(V)      \
  V(Word, And)                 \
  V(Word, Or)                  \
  V(Word, Xor)                 \
  V(Word, Shl)                 \
  V(Word, Shr)                 \
  V(Word, Ror)                 \
  V(Word, Clz)                 \
  V(Word, Equal)               \
  V(Int, Add)                  \
  V(Int, Sub)                  \
  V(Int, Mul)                  \
  V(Int, Div)                  \
  V(Int, Mod)                  \
  V(Int, LessThan)             \
  V(Int, LessThanOrEqual)      \
  V(Uint, Div)                 \
  V(Uint, LessThan)            \
1063
  V(Uint, Mod)
1064
#define PSEUDO_OP(Prefix, Suffix)                                \
1065
  const Operator* Prefix##Suffix() {                             \
1066 1067 1068 1069 1070
    return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \
  }
  PSEUDO_OP_LIST(PSEUDO_OP)
#undef PSEUDO_OP
#undef PSEUDO_OP_LIST
1071

1072 1073 1074 1075 1076 1077 1078
  const Operator* WordSar(ShiftKind kind = ShiftKind::kNormal) {
    return Is32() ? Word32Sar(kind) : Word64Sar(kind);
  }
  const Operator* WordSarShiftOutZeros() {
    return WordSar(ShiftKind::kShiftOutZeros);
  }

1079
 private:
1080
  Zone* zone_;
1081
  MachineOperatorGlobalCache const& cache_;
1082
  MachineRepresentation const word_;
1083
  Flags const flags_;
1084
  AlignmentRequirements const alignment_requirements_;
1085
};
1086

1087 1088

DEFINE_OPERATORS_FOR_FLAGS(MachineOperatorBuilder::Flags)
1089

1090 1091 1092
}  // namespace compiler
}  // namespace internal
}  // namespace v8
1093 1094

#endif  // V8_COMPILER_MACHINE_OPERATOR_H_