instruction-selector-ppc.cc 83.6 KB
Newer Older
1 2 3 4
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5
#include "src/base/iterator.h"
6
#include "src/compiler/backend/instruction-selector-impl.h"
7 8
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
9
#include "src/execution/ppc/frame-constants-ppc.h"
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25

namespace v8 {
namespace internal {
namespace compiler {

enum ImmediateMode {
  kInt16Imm,
  kInt16Imm_Unsigned,
  kInt16Imm_Negate,
  kInt16Imm_4ByteAligned,
  kShift32Imm,
  kShift64Imm,
  kNoImmediate
};

// Adds PPC-specific methods for generating operands.
26
class PPCOperandGenerator final : public OperandGenerator {
27 28 29 30 31 32 33 34 35 36 37 38 39 40
 public:
  explicit PPCOperandGenerator(InstructionSelector* selector)
      : OperandGenerator(selector) {}

  InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
    if (CanBeImmediate(node, mode)) {
      return UseImmediate(node);
    }
    return UseRegister(node);
  }

  bool CanBeImmediate(Node* node, ImmediateMode mode) {
    int64_t value;
    if (node->opcode() == IrOpcode::kInt32Constant)
41
      value = OpParameter<int32_t>(node->op());
42
    else if (node->opcode() == IrOpcode::kInt64Constant)
43
      value = OpParameter<int64_t>(node->op());
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
    else
      return false;
    return CanBeImmediate(value, mode);
  }

  bool CanBeImmediate(int64_t value, ImmediateMode mode) {
    switch (mode) {
      case kInt16Imm:
        return is_int16(value);
      case kInt16Imm_Unsigned:
        return is_uint16(value);
      case kInt16Imm_Negate:
        return is_int16(-value);
      case kInt16Imm_4ByteAligned:
        return is_int16(value) && !(value & 3);
      case kShift32Imm:
        return 0 <= value && value < 32;
      case kShift64Imm:
        return 0 <= value && value < 64;
      case kNoImmediate:
        return false;
    }
    return false;
  }
};

70
namespace {
71

72 73
void VisitRR(InstructionSelector* selector, InstructionCode opcode,
             Node* node) {
74 75
  PPCOperandGenerator g(selector);
  selector->Emit(opcode, g.DefineAsRegister(node),
76
                 g.UseRegister(node->InputAt(0)));
77 78
}

79 80
void VisitRRR(InstructionSelector* selector, InstructionCode opcode,
              Node* node) {
81 82 83 84 85 86
  PPCOperandGenerator g(selector);
  selector->Emit(opcode, g.DefineAsRegister(node),
                 g.UseRegister(node->InputAt(0)),
                 g.UseRegister(node->InputAt(1)));
}

87
void VisitRRO(InstructionSelector* selector, InstructionCode opcode, Node* node,
88
              ImmediateMode operand_mode) {
89 90 91 92 93 94
  PPCOperandGenerator g(selector);
  selector->Emit(opcode, g.DefineAsRegister(node),
                 g.UseRegister(node->InputAt(0)),
                 g.UseOperand(node->InputAt(1), operand_mode));
}

95
#if V8_TARGET_ARCH_PPC64
96 97
void VisitTryTruncateDouble(InstructionSelector* selector,
                            InstructionCode opcode, Node* node) {
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
  PPCOperandGenerator g(selector);
  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
  InstructionOperand outputs[2];
  size_t output_count = 0;
  outputs[output_count++] = g.DefineAsRegister(node);

  Node* success_output = NodeProperties::FindProjection(node, 1);
  if (success_output) {
    outputs[output_count++] = g.DefineAsRegister(success_output);
  }

  selector->Emit(opcode, output_count, outputs, 1, inputs);
}
#endif

113 114
// Shared routine for multiple binary operations.
template <typename Matcher>
115 116 117
void VisitBinop(InstructionSelector* selector, Node* node,
                InstructionCode opcode, ImmediateMode operand_mode,
                FlagsContinuation* cont) {
118 119 120 121 122 123 124 125 126 127
  PPCOperandGenerator g(selector);
  Matcher m(node);
  InstructionOperand inputs[4];
  size_t input_count = 0;
  InstructionOperand outputs[2];
  size_t output_count = 0;

  inputs[input_count++] = g.UseRegister(m.left().node());
  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);

128 129 130 131 132 133 134 135
  if (cont->IsDeoptimize()) {
    // If we can deoptimize as a result of the binop, we need to make sure that
    // the deopt inputs are not overwritten by the binop result. One way
    // to achieve that is to declare the output register as same-as-first.
    outputs[output_count++] = g.DefineSameAsFirst(node);
  } else {
    outputs[output_count++] = g.DefineAsRegister(node);
  }
136 137 138 139 140 141

  DCHECK_NE(0u, input_count);
  DCHECK_NE(0u, output_count);
  DCHECK_GE(arraysize(inputs), input_count);
  DCHECK_GE(arraysize(outputs), output_count);

142
  selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
143
                                 inputs, cont);
144 145 146 147
}

// Shared routine for multiple binary operations.
template <typename Matcher>
148 149
void VisitBinop(InstructionSelector* selector, Node* node,
                InstructionCode opcode, ImmediateMode operand_mode) {
150 151 152 153
  FlagsContinuation cont;
  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}

154 155
}  // namespace

156 157 158 159 160 161 162 163
void InstructionSelector::VisitStackSlot(Node* node) {
  StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
  int slot = frame_->AllocateSpillSlot(rep.size());
  OperandGenerator g(this);

  Emit(kArchStackSlot, g.DefineAsRegister(node),
       sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
164

165 166 167
void InstructionSelector::VisitAbortCSAAssert(Node* node) {
  PPCOperandGenerator g(this);
  Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
168 169
}

170
void InstructionSelector::VisitLoad(Node* node) {
171
  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
172 173 174
  PPCOperandGenerator g(this);
  Node* base = node->InputAt(0);
  Node* offset = node->InputAt(1);
175
  InstructionCode opcode = kArchNop;
176
  ImmediateMode mode = kInt16Imm;
177 178
  switch (load_rep.representation()) {
    case MachineRepresentation::kFloat32:
179 180
      opcode = kPPC_LoadFloat32;
      break;
181
    case MachineRepresentation::kFloat64:
182
      opcode = kPPC_LoadDouble;
183
      break;
184 185 186
    case MachineRepresentation::kBit:  // Fall through.
    case MachineRepresentation::kWord8:
      opcode = load_rep.IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
187
      break;
188 189
    case MachineRepresentation::kWord16:
      opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
190
      break;
191
    case MachineRepresentation::kWord32:
192
      opcode = kPPC_LoadWordU32;
193
      break;
194 195
    case MachineRepresentation::kTaggedSigned:   // Fall through.
    case MachineRepresentation::kTaggedPointer:  // Fall through.
196
    case MachineRepresentation::kTagged:         // Fall through.
197
    case MachineRepresentation::kWord64:
198 199 200
      opcode = kPPC_LoadWord64;
      mode = kInt16Imm_4ByteAligned;
      break;
201 202
    case MachineRepresentation::kCompressedPointer:  // Fall through.
    case MachineRepresentation::kCompressed:         // Fall through.
203
    case MachineRepresentation::kSimd128:  // Fall through.
204
    case MachineRepresentation::kNone:
205 206
      UNREACHABLE();
  }
207 208

  if (node->opcode() == IrOpcode::kPoisonedLoad &&
209
      poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
210 211 212
    opcode |= MiscField::encode(kMemoryAccessPoisoned);
  }

213 214 215
  bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
                    node->opcode() == IrOpcode::kWord64AtomicLoad);

216 217
  if (g.CanBeImmediate(offset, mode)) {
    Emit(opcode | AddressingModeField::encode(kMode_MRI),
218 219
         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset),
         g.UseImmediate(is_atomic));
220 221
  } else if (g.CanBeImmediate(base, mode)) {
    Emit(opcode | AddressingModeField::encode(kMode_MRI),
222 223
         g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base),
         g.UseImmediate(is_atomic));
224 225
  } else {
    Emit(opcode | AddressingModeField::encode(kMode_MRR),
226 227
         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
         g.UseImmediate(is_atomic));
228 229 230
  }
}

231 232
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }

233 234 235 236
void InstructionSelector::VisitProtectedLoad(Node* node) {
  // TODO(eholk)
  UNIMPLEMENTED();
}
237 238 239 240 241 242 243

void InstructionSelector::VisitStore(Node* node) {
  PPCOperandGenerator g(this);
  Node* base = node->InputAt(0);
  Node* offset = node->InputAt(1);
  Node* value = node->InputAt(2);

244 245 246 247 248 249 250 251 252 253 254 255 256
  bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicStore ||
                    node->opcode() == IrOpcode::kWord64AtomicStore);

  MachineRepresentation rep;
  WriteBarrierKind write_barrier_kind = kNoWriteBarrier;

  if (is_atomic) {
    rep = AtomicStoreRepresentationOf(node->op());
  } else {
    StoreRepresentation store_rep = StoreRepresentationOf(node->op());
    write_barrier_kind = store_rep.write_barrier_kind();
    rep = store_rep.representation();
  }
257

258 259
  if (write_barrier_kind != kNoWriteBarrier &&
      V8_LIKELY(!FLAG_disable_write_barriers)) {
260
    DCHECK(CanBeTaggedPointer(rep));
261
    AddressingMode addressing_mode;
262 263 264
    InstructionOperand inputs[3];
    size_t input_count = 0;
    inputs[input_count++] = g.UseUniqueRegister(base);
265 266 267 268 269 270 271 272 273 274 275 276 277
    // OutOfLineRecordWrite uses the offset in an 'add' instruction as well as
    // for the store itself, so we must check compatibility with both.
    if (g.CanBeImmediate(offset, kInt16Imm)
#if V8_TARGET_ARCH_PPC64
        && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
#endif
            ) {
      inputs[input_count++] = g.UseImmediate(offset);
      addressing_mode = kMode_MRI;
    } else {
      inputs[input_count++] = g.UseUniqueRegister(offset);
      addressing_mode = kMode_MRR;
    }
278
    inputs[input_count++] = g.UseUniqueRegister(value);
279 280
    RecordWriteMode record_write_mode =
        WriteBarrierKindToRecordWriteMode(write_barrier_kind);
281 282 283
    InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
    size_t const temp_count = arraysize(temps);
    InstructionCode code = kArchStoreWithWriteBarrier;
284
    code |= AddressingModeField::encode(addressing_mode);
285
    code |= MiscField::encode(static_cast<int>(record_write_mode));
286
    CHECK_EQ(is_atomic, false);
287 288
    Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
  } else {
289
    ArchOpcode opcode = kArchNop;
290 291
    ImmediateMode mode = kInt16Imm;
    switch (rep) {
292
      case MachineRepresentation::kFloat32:
293 294
        opcode = kPPC_StoreFloat32;
        break;
295
      case MachineRepresentation::kFloat64:
296 297
        opcode = kPPC_StoreDouble;
        break;
298 299
      case MachineRepresentation::kBit:  // Fall through.
      case MachineRepresentation::kWord8:
300 301
        opcode = kPPC_StoreWord8;
        break;
302
      case MachineRepresentation::kWord16:
303 304
        opcode = kPPC_StoreWord16;
        break;
305
#if !V8_TARGET_ARCH_PPC64
306 307
      case MachineRepresentation::kTaggedSigned:   // Fall through.
      case MachineRepresentation::kTaggedPointer:  // Fall through.
308
      case MachineRepresentation::kTagged:         // Fall through.
309
#endif
310
      case MachineRepresentation::kWord32:
311 312
        opcode = kPPC_StoreWord32;
        break;
313
#if V8_TARGET_ARCH_PPC64
314 315
      case MachineRepresentation::kTaggedSigned:   // Fall through.
      case MachineRepresentation::kTaggedPointer:  // Fall through.
316
      case MachineRepresentation::kTagged:         // Fall through.
317
      case MachineRepresentation::kWord64:
318 319 320
        opcode = kPPC_StoreWord64;
        mode = kInt16Imm_4ByteAligned;
        break;
321 322
#else
      case MachineRepresentation::kWord64:  // Fall through.
323
#endif
324 325
      case MachineRepresentation::kCompressedPointer:  // Fall through.
      case MachineRepresentation::kCompressed:         // Fall through.
326
      case MachineRepresentation::kSimd128:  // Fall through.
327
      case MachineRepresentation::kNone:
328 329 330
        UNREACHABLE();
        return;
    }
331

332 333
    if (g.CanBeImmediate(offset, mode)) {
      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
334 335
           g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value),
           g.UseImmediate(is_atomic));
336 337
    } else if (g.CanBeImmediate(base, mode)) {
      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
338 339
           g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value),
           g.UseImmediate(is_atomic));
340 341
    } else {
      Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
342 343
           g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value),
           g.UseImmediate(is_atomic));
344
    }
345 346 347
  }
}

348 349 350 351 352
void InstructionSelector::VisitProtectedStore(Node* node) {
  // TODO(eholk)
  UNIMPLEMENTED();
}

353 354 355 356 357
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }

// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
358 359 360 361 362 363 364 365 366 367

template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
                         ArchOpcode opcode, bool left_can_cover,
                         bool right_can_cover, ImmediateMode imm_mode) {
  PPCOperandGenerator g(selector);

  // Map instruction to equivalent operation with inverted right input.
  ArchOpcode inv_opcode = opcode;
  switch (opcode) {
368 369
    case kPPC_And:
      inv_opcode = kPPC_AndComplement;
370
      break;
371 372
    case kPPC_Or:
      inv_opcode = kPPC_OrComplement;
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
      break;
    default:
      UNREACHABLE();
  }

  // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
  if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
    Matcher mleft(m->left().node());
    if (mleft.right().Is(-1)) {
      selector->Emit(inv_opcode, g.DefineAsRegister(node),
                     g.UseRegister(m->right().node()),
                     g.UseRegister(mleft.left().node()));
      return;
    }
  }

  // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
  if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
      right_can_cover) {
    Matcher mright(m->right().node());
    if (mright.right().Is(-1)) {
      // TODO(all): support shifted operand on right.
      selector->Emit(inv_opcode, g.DefineAsRegister(node),
                     g.UseRegister(m->left().node()),
                     g.UseRegister(mright.left().node()));
      return;
    }
  }

  VisitBinop<Matcher>(selector, node, opcode, imm_mode);
}

static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
406
  int mask_width = base::bits::CountPopulation(value);
407 408 409 410 411 412 413 414 415 416 417
  int mask_msb = base::bits::CountLeadingZeros32(value);
  int mask_lsb = base::bits::CountTrailingZeros32(value);
  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
    return false;
  *mb = mask_lsb + mask_width - 1;
  *me = mask_lsb;
  return true;
}

#if V8_TARGET_ARCH_PPC64
static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
418
  int mask_width = base::bits::CountPopulation(value);
419 420 421 422 423 424 425 426 427 428 429 430 431 432
  int mask_msb = base::bits::CountLeadingZeros64(value);
  int mask_lsb = base::bits::CountTrailingZeros64(value);
  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
    return false;
  *mb = mask_lsb + mask_width - 1;
  *me = mask_lsb;
  return true;
}
#endif

// TODO(mbrandy): Absorb rotate-right into rlwinm?
void InstructionSelector::VisitWord32And(Node* node) {
  PPCOperandGenerator g(this);
  Int32BinopMatcher m(node);
433 434
  int mb = 0;
  int me = 0;
435 436 437 438 439 440 441 442 443 444 445 446 447
  if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
    int sh = 0;
    Node* left = m.left().node();
    if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
        CanCover(node, left)) {
      // Try to absorb left/right shift into rlwinm
      Int32BinopMatcher mleft(m.left().node());
      if (mleft.right().IsInRange(0, 31)) {
        left = mleft.left().node();
        sh = mleft.right().Value();
        if (m.left().IsWord32Shr()) {
          // Adjust the mask such that it doesn't include any rotated bits.
          if (mb > 31 - sh) mb = 31 - sh;
448
          sh = (32 - sh) & 0x1F;
449 450 451 452 453 454 455 456 457 458 459 460 461
        } else {
          // Adjust the mask such that it doesn't include any rotated bits.
          if (me < sh) me = sh;
        }
      }
    }
    if (mb >= me) {
      Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left),
           g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me));
      return;
    }
  }
  VisitLogical<Int32BinopMatcher>(
462
      this, node, &m, kPPC_And, CanCover(node, m.left().node()),
463 464 465 466 467 468 469 470
      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}

#if V8_TARGET_ARCH_PPC64
// TODO(mbrandy): Absorb rotate-right into rldic?
void InstructionSelector::VisitWord64And(Node* node) {
  PPCOperandGenerator g(this);
  Int64BinopMatcher m(node);
471 472
  int mb = 0;
  int me = 0;
473 474 475 476 477 478 479 480 481 482 483 484 485
  if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
    int sh = 0;
    Node* left = m.left().node();
    if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
        CanCover(node, left)) {
      // Try to absorb left/right shift into rldic
      Int64BinopMatcher mleft(m.left().node());
      if (mleft.right().IsInRange(0, 63)) {
        left = mleft.left().node();
        sh = mleft.right().Value();
        if (m.left().IsWord64Shr()) {
          // Adjust the mask such that it doesn't include any rotated bits.
          if (mb > 63 - sh) mb = 63 - sh;
486
          sh = (64 - sh) & 0x3F;
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
        } else {
          // Adjust the mask such that it doesn't include any rotated bits.
          if (me < sh) me = sh;
        }
      }
    }
    if (mb >= me) {
      bool match = false;
      ArchOpcode opcode;
      int mask;
      if (me == 0) {
        match = true;
        opcode = kPPC_RotLeftAndClearLeft64;
        mask = mb;
      } else if (mb == 63) {
        match = true;
        opcode = kPPC_RotLeftAndClearRight64;
        mask = me;
      } else if (sh && me <= sh && m.left().IsWord64Shl()) {
        match = true;
        opcode = kPPC_RotLeftAndClear64;
        mask = mb;
      }
      if (match) {
        Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
             g.TempImmediate(sh), g.TempImmediate(mask));
        return;
      }
    }
  }
  VisitLogical<Int64BinopMatcher>(
518
      this, node, &m, kPPC_And, CanCover(node, m.left().node()),
519 520 521 522 523 524 525
      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}
#endif

void InstructionSelector::VisitWord32Or(Node* node) {
  Int32BinopMatcher m(node);
  VisitLogical<Int32BinopMatcher>(
526
      this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
527 528 529 530 531 532 533
      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Or(Node* node) {
  Int64BinopMatcher m(node);
  VisitLogical<Int64BinopMatcher>(
534
      this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
535 536 537 538 539 540 541 542
      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
}
#endif

void InstructionSelector::VisitWord32Xor(Node* node) {
  PPCOperandGenerator g(this);
  Int32BinopMatcher m(node);
  if (m.right().Is(-1)) {
543
    Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
544
  } else {
545
    VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
546 547 548
  }
}

549 550
void InstructionSelector::VisitStackPointerGreaterThan(
    Node* node, FlagsContinuation* cont) {
551 552 553
  StackCheckKind kind = StackCheckKindOf(node->op());
  InstructionCode opcode =
      kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
554 555

  PPCOperandGenerator g(this);
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575

  // No outputs.
  InstructionOperand* const outputs = nullptr;
  const int output_count = 0;

  // Applying an offset to this stack check requires a temp register. Offsets
  // are only applied to the first stack check. If applying an offset, we must
  // ensure the input and temp registers do not alias, thus kUniqueRegister.
  InstructionOperand temps[] = {g.TempRegister()};
  const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
  const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
                                 ? OperandGenerator::kUniqueRegister
                                 : OperandGenerator::kRegister;

  Node* const value = node->InputAt(0);
  InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
  static constexpr int input_count = arraysize(inputs);

  EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
                       temp_count, temps, cont);
576 577
}

578 579 580 581 582
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Xor(Node* node) {
  PPCOperandGenerator g(this);
  Int64BinopMatcher m(node);
  if (m.right().Is(-1)) {
583
    Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
584
  } else {
585
    VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
  }
}
#endif

void InstructionSelector::VisitWord32Shl(Node* node) {
  PPCOperandGenerator g(this);
  Int32BinopMatcher m(node);
  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
    // Try to absorb logical-and into rlwinm
    Int32BinopMatcher mleft(m.left().node());
    int sh = m.right().Value();
    int mb;
    int me;
    if (mleft.right().HasValue() &&
        IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
      // Adjust the mask such that it doesn't include any rotated bits.
      if (me < sh) me = sh;
      if (mb >= me) {
        Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
             g.TempImmediate(mb), g.TempImmediate(me));
        return;
      }
    }
  }
611
  VisitRRO(this, kPPC_ShiftLeft32, node, kShift32Imm);
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Shl(Node* node) {
  PPCOperandGenerator g(this);
  Int64BinopMatcher m(node);
  // TODO(mbrandy): eliminate left sign extension if right >= 32
  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
    // Try to absorb logical-and into rldic
    Int64BinopMatcher mleft(m.left().node());
    int sh = m.right().Value();
    int mb;
    int me;
    if (mleft.right().HasValue() &&
        IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
      // Adjust the mask such that it doesn't include any rotated bits.
      if (me < sh) me = sh;
      if (mb >= me) {
        bool match = false;
        ArchOpcode opcode;
        int mask;
        if (me == 0) {
          match = true;
          opcode = kPPC_RotLeftAndClearLeft64;
          mask = mb;
        } else if (mb == 63) {
          match = true;
          opcode = kPPC_RotLeftAndClearRight64;
          mask = me;
        } else if (sh && me <= sh) {
          match = true;
          opcode = kPPC_RotLeftAndClear64;
          mask = mb;
        }
        if (match) {
          Emit(opcode, g.DefineAsRegister(node),
               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
               g.TempImmediate(mask));
          return;
        }
      }
    }
  }
655
  VisitRRO(this, kPPC_ShiftLeft64, node, kShift64Imm);
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
}
#endif

void InstructionSelector::VisitWord32Shr(Node* node) {
  PPCOperandGenerator g(this);
  Int32BinopMatcher m(node);
  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
    // Try to absorb logical-and into rlwinm
    Int32BinopMatcher mleft(m.left().node());
    int sh = m.right().Value();
    int mb;
    int me;
    if (mleft.right().HasValue() &&
        IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
      // Adjust the mask such that it doesn't include any rotated bits.
      if (mb > 31 - sh) mb = 31 - sh;
672
      sh = (32 - sh) & 0x1F;
673 674 675 676 677 678 679 680
      if (mb >= me) {
        Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
             g.TempImmediate(mb), g.TempImmediate(me));
        return;
      }
    }
  }
681
  VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm);
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Shr(Node* node) {
  PPCOperandGenerator g(this);
  Int64BinopMatcher m(node);
  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
    // Try to absorb logical-and into rldic
    Int64BinopMatcher mleft(m.left().node());
    int sh = m.right().Value();
    int mb;
    int me;
    if (mleft.right().HasValue() &&
        IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
      // Adjust the mask such that it doesn't include any rotated bits.
      if (mb > 63 - sh) mb = 63 - sh;
698
      sh = (64 - sh) & 0x3F;
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
      if (mb >= me) {
        bool match = false;
        ArchOpcode opcode;
        int mask;
        if (me == 0) {
          match = true;
          opcode = kPPC_RotLeftAndClearLeft64;
          mask = mb;
        } else if (mb == 63) {
          match = true;
          opcode = kPPC_RotLeftAndClearRight64;
          mask = me;
        }
        if (match) {
          Emit(opcode, g.DefineAsRegister(node),
               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
               g.TempImmediate(mask));
          return;
        }
      }
    }
  }
721
  VisitRRO(this, kPPC_ShiftRight64, node, kShift64Imm);
722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
}
#endif

void InstructionSelector::VisitWord32Sar(Node* node) {
  PPCOperandGenerator g(this);
  Int32BinopMatcher m(node);
  // Replace with sign extension for (x << K) >> K where K is 16 or 24.
  if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
    Int32BinopMatcher mleft(m.left().node());
    if (mleft.right().Is(16) && m.right().Is(16)) {
      Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node),
           g.UseRegister(mleft.left().node()));
      return;
    } else if (mleft.right().Is(24) && m.right().Is(24)) {
      Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node),
           g.UseRegister(mleft.left().node()));
      return;
    }
  }
741
  VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm);
742 743
}

744
#if !V8_TARGET_ARCH_PPC64
745
void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
746
                    InstructionCode opcode2, Node* node) {
747
  PPCOperandGenerator g(selector);
748

749 750 751 752 753 754 755
  Node* projection1 = NodeProperties::FindProjection(node, 1);
  if (projection1) {
    // We use UseUniqueRegister here to avoid register sharing with the output
    // registers.
    InstructionOperand inputs[] = {
        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
756

757 758 759
    InstructionOperand outputs[] = {
        g.DefineAsRegister(node),
        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
760

761 762 763 764 765 766 767 768
    selector->Emit(opcode, 2, outputs, 4, inputs);
  } else {
    // The high word of the result is not used, so we emit the standard 32 bit
    // instruction.
    selector->Emit(opcode2, g.DefineSameAsFirst(node),
                   g.UseRegister(node->InputAt(0)),
                   g.UseRegister(node->InputAt(2)));
  }
769 770
}

771
void InstructionSelector::VisitInt32PairAdd(Node* node) {
772
  VisitPairBinop(this, kPPC_AddPair, kPPC_Add32, node);
773 774 775
}

void InstructionSelector::VisitInt32PairSub(Node* node) {
776
  VisitPairBinop(this, kPPC_SubPair, kPPC_Sub, node);
777
}
778

779 780
void InstructionSelector::VisitInt32PairMul(Node* node) {
  PPCOperandGenerator g(this);
781 782 783 784 785 786
  Node* projection1 = NodeProperties::FindProjection(node, 1);
  if (projection1) {
    InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
                                   g.UseUniqueRegister(node->InputAt(1)),
                                   g.UseUniqueRegister(node->InputAt(2)),
                                   g.UseUniqueRegister(node->InputAt(3))};
787

788 789 790
    InstructionOperand outputs[] = {
        g.DefineAsRegister(node),
        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
791

792
    InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
793

794 795 796 797 798 799 800
    Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps);
  } else {
    // The high word of the result is not used, so we emit the standard 32 bit
    // instruction.
    Emit(kPPC_Mul32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
         g.UseRegister(node->InputAt(2)));
  }
801
}
802

803 804
namespace {
// Shared routine for multiple shift operations.
805
void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
806 807
                    Node* node) {
  PPCOperandGenerator g(selector);
808 809
  // We use g.UseUniqueRegister here to guarantee that there is
  // no register aliasing of input registers with output registers.
810 811 812 813 814 815 816 817
  Int32Matcher m(node->InputAt(2));
  InstructionOperand shift_operand;
  if (m.HasValue()) {
    shift_operand = g.UseImmediate(m.node());
  } else {
    shift_operand = g.UseUniqueRegister(m.node());
  }

818 819
  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
                                 g.UseUniqueRegister(node->InputAt(1)),
820 821
                                 shift_operand};

822
  Node* projection1 = NodeProperties::FindProjection(node, 1);
823

824 825 826 827 828 829 830 831 832 833 834 835 836
  InstructionOperand outputs[2];
  InstructionOperand temps[1];
  int32_t output_count = 0;
  int32_t temp_count = 0;

  outputs[output_count++] = g.DefineAsRegister(node);
  if (projection1) {
    outputs[output_count++] = g.DefineAsRegister(projection1);
  } else {
    temps[temp_count++] = g.TempRegister();
  }

  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
837
}
838
}  // namespace
839 840 841 842 843 844 845 846 847 848 849

void InstructionSelector::VisitWord32PairShl(Node* node) {
  VisitPairShift(this, kPPC_ShiftLeftPair, node);
}

void InstructionSelector::VisitWord32PairShr(Node* node) {
  VisitPairShift(this, kPPC_ShiftRightPair, node);
}

void InstructionSelector::VisitWord32PairSar(Node* node) {
  VisitPairShift(this, kPPC_ShiftRightAlgPair, node);
850 851
}
#endif
852 853 854

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Sar(Node* node) {
855 856 857 858 859 860
  PPCOperandGenerator g(this);
  Int64BinopMatcher m(node);
  if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
      m.right().Is(32)) {
    // Just load and sign-extend the interesting 4 bytes instead. This happens,
    // for example, when we're loading and untagging SMIs.
861 862
    BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
                                                AddressOption::kAllowAll);
863 864 865 866 867 868 869 870 871 872 873 874
    if (mleft.matches() && mleft.index() == nullptr) {
      int64_t offset = 0;
      Node* displacement = mleft.displacement();
      if (displacement != nullptr) {
        Int64Matcher mdisplacement(displacement);
        DCHECK(mdisplacement.HasValue());
        offset = mdisplacement.Value();
      }
      offset = SmiWordOffset(offset);
      if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
        Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI),
             g.DefineAsRegister(node), g.UseRegister(mleft.base()),
875
             g.TempImmediate(offset), g.UseImmediate(0));
876 877 878 879
        return;
      }
    }
  }
880
  VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm);
881 882 883 884 885
}
#endif

// TODO(mbrandy): Absorb logical-and into rlwinm?
void InstructionSelector::VisitWord32Ror(Node* node) {
886
  VisitRRO(this, kPPC_RotRight32, node, kShift32Imm);
887 888 889 890 891
}

#if V8_TARGET_ARCH_PPC64
// TODO(mbrandy): Absorb logical-and into rldic?
void InstructionSelector::VisitWord64Ror(Node* node) {
892
  VisitRRO(this, kPPC_RotRight64, node, kShift64Imm);
893 894 895
}
#endif

896 897 898 899 900
void InstructionSelector::VisitWord32Clz(Node* node) {
  PPCOperandGenerator g(this);
  Emit(kPPC_Cntlz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}

901 902 903 904 905 906 907
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Clz(Node* node) {
  PPCOperandGenerator g(this);
  Emit(kPPC_Cntlz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
#endif

908 909 910 911 912
void InstructionSelector::VisitWord32Popcnt(Node* node) {
  PPCOperandGenerator g(this);
  Emit(kPPC_Popcnt32, g.DefineAsRegister(node),
       g.UseRegister(node->InputAt(0)));
}
913

914 915 916 917 918 919 920 921
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Popcnt(Node* node) {
  PPCOperandGenerator g(this);
  Emit(kPPC_Popcnt64, g.DefineAsRegister(node),
       g.UseRegister(node->InputAt(0)));
}
#endif

922
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
923

924 925 926 927
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
#endif

928 929 930 931 932 933
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif

934 935
void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
  PPCOperandGenerator g(this);
Junliang Yan's avatar
Junliang Yan committed
936
  InstructionOperand temp[] = {g.TempRegister()};
937
  Emit(kPPC_ByteRev64, g.DefineAsRegister(node),
938
       g.UseUniqueRegister(node->InputAt(0)), 1, temp);
939
}
940

941 942 943 944 945
void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
  PPCOperandGenerator g(this);
  Emit(kPPC_ByteRev32, g.DefineAsRegister(node),
       g.UseRegister(node->InputAt(0)));
}
946

947 948 949 950 951 952
void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
  // TODO(miladfar): Implement the ppc selector for reversing SIMD bytes.
  // Check if the input node is a Load and do a Load Reverse at once.
  UNIMPLEMENTED();
}

953
void InstructionSelector::VisitInt32Add(Node* node) {
954
  VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
955 956 957 958
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Add(Node* node) {
959
  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
960 961 962 963 964 965 966
}
#endif

void InstructionSelector::VisitInt32Sub(Node* node) {
  PPCOperandGenerator g(this);
  Int32BinopMatcher m(node);
  if (m.left().Is(0)) {
967
    Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
968
  } else {
969
    VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
970 971 972 973 974 975 976 977
  }
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Sub(Node* node) {
  PPCOperandGenerator g(this);
  Int64BinopMatcher m(node);
  if (m.left().Is(0)) {
978
    Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
979
  } else {
980
    VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
981 982 983 984
  }
}
#endif

985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
namespace {

void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                  InstructionOperand left, InstructionOperand right,
                  FlagsContinuation* cont);
void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
                              FlagsContinuation* cont) {
  PPCOperandGenerator g(selector);
  Int32BinopMatcher m(node);
  InstructionOperand result_operand = g.DefineAsRegister(node);
  InstructionOperand high32_operand = g.TempRegister();
  InstructionOperand temp_operand = g.TempRegister();
  {
    InstructionOperand outputs[] = {result_operand, high32_operand};
    InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
                                   g.UseRegister(m.right().node())};
    selector->Emit(kPPC_Mul32WithHigh32, 2, outputs, 2, inputs);
  }
  {
    InstructionOperand shift_31 = g.UseImmediate(31);
    InstructionOperand outputs[] = {temp_operand};
    InstructionOperand inputs[] = {result_operand, shift_31};
    selector->Emit(kPPC_ShiftRightAlg32, 1, outputs, 2, inputs);
  }

  VisitCompare(selector, kPPC_Cmp32, high32_operand, temp_operand, cont);
}

}  // namespace

1015
void InstructionSelector::VisitInt32Mul(Node* node) {
1016
  VisitRRR(this, kPPC_Mul32, node);
1017 1018 1019 1020
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Mul(Node* node) {
1021
  VisitRRR(this, kPPC_Mul64, node);
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
}
#endif

void InstructionSelector::VisitInt32MulHigh(Node* node) {
  PPCOperandGenerator g(this);
  Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}

void InstructionSelector::VisitUint32MulHigh(Node* node) {
  PPCOperandGenerator g(this);
  Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}

void InstructionSelector::VisitInt32Div(Node* node) {
1038
  VisitRRR(this, kPPC_Div32, node);
1039 1040 1041 1042
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Div(Node* node) {
1043
  VisitRRR(this, kPPC_Div64, node);
1044 1045 1046 1047
}
#endif

void InstructionSelector::VisitUint32Div(Node* node) {
1048
  VisitRRR(this, kPPC_DivU32, node);
1049 1050 1051 1052
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitUint64Div(Node* node) {
1053
  VisitRRR(this, kPPC_DivU64, node);
1054 1055 1056 1057
}
#endif

void InstructionSelector::VisitInt32Mod(Node* node) {
1058
  VisitRRR(this, kPPC_Mod32, node);
1059 1060 1061 1062
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Mod(Node* node) {
1063
  VisitRRR(this, kPPC_Mod64, node);
1064 1065 1066 1067
}
#endif

void InstructionSelector::VisitUint32Mod(Node* node) {
1068
  VisitRRR(this, kPPC_ModU32, node);
1069 1070 1071 1072
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitUint64Mod(Node* node) {
1073
  VisitRRR(this, kPPC_ModU64, node);
1074 1075 1076 1077
}
#endif

void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1078
  VisitRR(this, kPPC_Float32ToDouble, node);
1079 1080
}

1081 1082 1083 1084
void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
  VisitRR(this, kPPC_Int32ToFloat32, node);
}

1085
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1086
  VisitRR(this, kPPC_Uint32ToFloat32, node);
1087 1088
}

1089
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1090
  VisitRR(this, kPPC_Int32ToDouble, node);
1091 1092 1093
}

void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1094
  VisitRR(this, kPPC_Uint32ToDouble, node);
1095 1096 1097
}

void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1098
  VisitRR(this, kPPC_DoubleToInt32, node);
1099 1100 1101
}

void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1102
  VisitRR(this, kPPC_DoubleToUint32, node);
1103 1104
}

1105 1106 1107
void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
  VisitRR(this, kPPC_DoubleToUint32, node);
}
1108

1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
  // TODO(mbrandy): inspect input to see if nop is appropriate.
  VisitRR(this, kPPC_ExtendSignWord8, node);
}

void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
  // TODO(mbrandy): inspect input to see if nop is appropriate.
  VisitRR(this, kPPC_ExtendSignWord16, node);
}

1119
#if V8_TARGET_ARCH_PPC64
1120
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1121
  VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
1122 1123
}

1124
void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1125
  VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
1126 1127
}

1128 1129 1130
void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
  VisitRR(this, kPPC_DoubleToInt64, node);
}
1131

1132
void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1133
  VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
1134 1135
}

1136
void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1137
  VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
1138 1139
}

1140 1141 1142 1143 1144 1145
void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
  DCHECK(SmiValuesAre31Bits());
  DCHECK(COMPRESS_POINTERS_BOOL);
  EmitIdentity(node);
}

1146 1147
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
  // TODO(mbrandy): inspect input to see if nop is appropriate.
1148
  VisitRR(this, kPPC_ExtendSignWord32, node);
1149 1150
}

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
  // TODO(mbrandy): inspect input to see if nop is appropriate.
  VisitRR(this, kPPC_ExtendSignWord8, node);
}

void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
  // TODO(mbrandy): inspect input to see if nop is appropriate.
  VisitRR(this, kPPC_ExtendSignWord16, node);
}

void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
  // TODO(mbrandy): inspect input to see if nop is appropriate.
  VisitRR(this, kPPC_ExtendSignWord32, node);
}
1165 1166 1167

void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
  // TODO(mbrandy): inspect input to see if nop is appropriate.
1168
  VisitRR(this, kPPC_Uint32ToUint64, node);
1169
}
1170 1171 1172 1173

void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
  VisitRR(this, kPPC_DoubleToUint64, node);
}
1174 1175 1176 1177

void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
  VisitRR(this, kPPC_DoubleToInt64, node);
}
1178 1179 1180
#endif

void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1181 1182 1183
  VisitRR(this, kPPC_DoubleToFloat32, node);
}

1184
void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1185 1186 1187 1188 1189
  VisitRR(this, kArchTruncateDoubleToI, node);
}

void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
  VisitRR(this, kPPC_DoubleToInt32, node);
1190 1191
}

1192
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1193
  VisitRR(this, kPPC_DoubleToInt32, node);
1194 1195
}

1196
void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1197
  VisitRR(this, kPPC_DoubleToUint32, node);
1198 1199
}

1200 1201 1202
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
  // TODO(mbrandy): inspect input to see if nop is appropriate.
1203
  VisitRR(this, kPPC_Int64ToInt32, node);
1204
}
1205

1206 1207 1208 1209
void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
  VisitRR(this, kPPC_Int64ToFloat32, node);
}

1210 1211 1212
void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
  VisitRR(this, kPPC_Int64ToDouble, node);
}
1213

1214 1215 1216
void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
  VisitRR(this, kPPC_Int64ToDouble, node);
}
1217

1218 1219 1220 1221
void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
  VisitRR(this, kPPC_Uint64ToFloat32, node);
}

1222
void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1223
  VisitRR(this, kPPC_Uint64ToDouble, node);
1224
}
1225 1226
#endif

1227
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1228
  VisitRR(this, kPPC_BitcastFloat32ToInt32, node);
1229 1230 1231 1232
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1233
  VisitRR(this, kPPC_BitcastDoubleToInt64, node);
1234 1235 1236 1237
}
#endif

void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1238
  VisitRR(this, kPPC_BitcastInt32ToFloat32, node);
1239 1240 1241 1242
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1243
  VisitRR(this, kPPC_BitcastInt64ToDouble, node);
1244 1245 1246
}
#endif

1247
void InstructionSelector::VisitFloat32Add(Node* node) {
1248
  VisitRRR(this, kPPC_AddDouble | MiscField::encode(1), node);
1249 1250
}

1251 1252
void InstructionSelector::VisitFloat64Add(Node* node) {
  // TODO(mbrandy): detect multiply-add
1253 1254 1255 1256
  VisitRRR(this, kPPC_AddDouble, node);
}

void InstructionSelector::VisitFloat32Sub(Node* node) {
1257 1258
  VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
}
1259 1260 1261

void InstructionSelector::VisitFloat64Sub(Node* node) {
  // TODO(mbrandy): detect multiply-subtract
1262 1263
  VisitRRR(this, kPPC_SubDouble, node);
}
1264 1265

void InstructionSelector::VisitFloat32Mul(Node* node) {
1266
  VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
1267 1268 1269 1270
}

void InstructionSelector::VisitFloat64Mul(Node* node) {
  // TODO(mbrandy): detect negate
1271 1272 1273 1274
  VisitRRR(this, kPPC_MulDouble, node);
}

void InstructionSelector::VisitFloat32Div(Node* node) {
1275
  VisitRRR(this, kPPC_DivDouble | MiscField::encode(1), node);
1276 1277 1278
}

void InstructionSelector::VisitFloat64Div(Node* node) {
1279
  VisitRRR(this, kPPC_DivDouble, node);
1280 1281 1282 1283
}

void InstructionSelector::VisitFloat64Mod(Node* node) {
  PPCOperandGenerator g(this);
1284
  Emit(kPPC_ModDouble, g.DefineAsFixed(node, d1),
1285 1286
       g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
      ->MarkAsCall();
1287 1288
}

1289 1290 1291
void InstructionSelector::VisitFloat32Max(Node* node) {
  VisitRRR(this, kPPC_MaxDouble | MiscField::encode(1), node);
}
1292

1293 1294 1295
void InstructionSelector::VisitFloat64Max(Node* node) {
  VisitRRR(this, kPPC_MaxDouble, node);
}
1296

1297 1298 1299 1300
void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
  VisitRR(this, kPPC_Float64SilenceNaN, node);
}

1301 1302 1303
void InstructionSelector::VisitFloat32Min(Node* node) {
  VisitRRR(this, kPPC_MinDouble | MiscField::encode(1), node);
}
1304

1305 1306 1307
void InstructionSelector::VisitFloat64Min(Node* node) {
  VisitRRR(this, kPPC_MinDouble, node);
}
1308

1309
void InstructionSelector::VisitFloat32Abs(Node* node) {
1310
  VisitRR(this, kPPC_AbsDouble | MiscField::encode(1), node);
1311
}
1312

1313 1314 1315
void InstructionSelector::VisitFloat64Abs(Node* node) {
  VisitRR(this, kPPC_AbsDouble, node);
}
1316

1317
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1318
  VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node);
1319
}
1320

1321 1322 1323 1324
void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
                                                  InstructionCode opcode) {
  PPCOperandGenerator g(this);
  Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
1325
      ->MarkAsCall();
1326 1327 1328
}

void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1329
                                                   InstructionCode opcode) {
1330
  PPCOperandGenerator g(this);
1331 1332 1333
  Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
       g.UseFixed(node->InputAt(1), d2))
      ->MarkAsCall();
1334
}
1335

1336
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1337
  VisitRR(this, kPPC_SqrtDouble, node);
1338 1339
}

1340
void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1341
  VisitRR(this, kPPC_FloorDouble | MiscField::encode(1), node);
1342
}
1343

1344
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1345
  VisitRR(this, kPPC_FloorDouble, node);
1346 1347
}

1348
void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1349
  VisitRR(this, kPPC_CeilDouble | MiscField::encode(1), node);
1350
}
1351

1352 1353 1354 1355
void InstructionSelector::VisitFloat64RoundUp(Node* node) {
  VisitRR(this, kPPC_CeilDouble, node);
}

1356
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1357
  VisitRR(this, kPPC_TruncateDouble | MiscField::encode(1), node);
1358 1359
}

1360
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1361
  VisitRR(this, kPPC_TruncateDouble, node);
1362 1363 1364
}

void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1365
  VisitRR(this, kPPC_RoundDouble, node);
1366 1367
}

1368 1369 1370 1371
void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
  UNREACHABLE();
}

1372 1373 1374 1375
void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
  UNREACHABLE();
}

1376 1377 1378
void InstructionSelector::VisitFloat32Neg(Node* node) {
  VisitRR(this, kPPC_NegDouble, node);
}
1379

1380 1381 1382
void InstructionSelector::VisitFloat64Neg(Node* node) {
  VisitRR(this, kPPC_NegDouble, node);
}
1383

1384 1385
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1386
    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
    return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
                                         kInt16Imm, &cont);
  }
  FlagsContinuation cont;
  VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32, kInt16Imm,
                                &cont);
}

void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1397
    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1398 1399 1400 1401 1402 1403 1404 1405
    return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
                                         kInt16Imm_Negate, &cont);
  }
  FlagsContinuation cont;
  VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
                                kInt16Imm_Negate, &cont);
}

1406 1407 1408
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1409
    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1410
    return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm,
1411 1412 1413
                                         &cont);
  }
  FlagsContinuation cont;
1414
  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm, &cont);
1415 1416 1417 1418
}

void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1419
    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1420 1421 1422 1423 1424 1425 1426 1427
    return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate,
                                         &cont);
  }
  FlagsContinuation cont;
  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate, &cont);
}
#endif

1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
static bool CompareLogical(FlagsContinuation* cont) {
  switch (cont->condition()) {
    case kUnsignedLessThan:
    case kUnsignedGreaterThanOrEqual:
    case kUnsignedLessThanOrEqual:
    case kUnsignedGreaterThan:
      return true;
    default:
      return false;
  }
  UNREACHABLE();
}

1441 1442
namespace {

1443
// Shared routine for multiple compare operations.
1444 1445 1446
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                  InstructionOperand left, InstructionOperand right,
                  FlagsContinuation* cont) {
1447
  selector->EmitWithContinuation(opcode, left, right, cont);
1448 1449 1450
}

// Shared routine for multiple word compare operations.
1451 1452 1453
void VisitWordCompare(InstructionSelector* selector, Node* node,
                      InstructionCode opcode, FlagsContinuation* cont,
                      bool commutative, ImmediateMode immediate_mode) {
1454 1455 1456 1457 1458 1459
  PPCOperandGenerator g(selector);
  Node* left = node->InputAt(0);
  Node* right = node->InputAt(1);

  // Match immediates on left or right side of comparison.
  if (g.CanBeImmediate(right, immediate_mode)) {
1460 1461
    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
                 cont);
1462 1463
  } else if (g.CanBeImmediate(left, immediate_mode)) {
    if (!commutative) cont->Commute();
1464 1465
    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
                 cont);
1466
  } else {
1467 1468
    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
                 cont);
1469 1470 1471
  }
}

1472 1473
void VisitWord32Compare(InstructionSelector* selector, Node* node,
                        FlagsContinuation* cont) {
1474 1475 1476 1477 1478
  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
  VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
}

#if V8_TARGET_ARCH_PPC64
1479 1480
void VisitWord64Compare(InstructionSelector* selector, Node* node,
                        FlagsContinuation* cont) {
1481 1482 1483 1484 1485
  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
  VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode);
}
#endif

1486 1487 1488
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
1489 1490 1491
  PPCOperandGenerator g(selector);
  Node* left = node->InputAt(0);
  Node* right = node->InputAt(1);
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
  VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
               g.UseRegister(right), cont);
}

// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
  PPCOperandGenerator g(selector);
  Node* left = node->InputAt(0);
  Node* right = node->InputAt(1);
  VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
1503 1504 1505
               g.UseRegister(right), cont);
}

1506
}  // namespace
1507 1508

// Shared routine for word comparisons against zero.
1509 1510
void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
                                               FlagsContinuation* cont) {
1511
  // Try to combine with comparisons against 0 by simply inverting the branch.
1512
  while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1513 1514 1515 1516 1517 1518 1519 1520
    Int32BinopMatcher m(value);
    if (!m.right().Is(0)) break;

    user = value;
    value = m.left().node();
    cont->Negate();
  }

1521
  if (CanCover(user, value)) {
1522
    switch (value->opcode()) {
1523
      case IrOpcode::kWord32Equal:
1524
        cont->OverwriteAndNegateIfEqual(kEqual);
1525
        return VisitWord32Compare(this, value, cont);
1526 1527
      case IrOpcode::kInt32LessThan:
        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1528
        return VisitWord32Compare(this, value, cont);
1529 1530
      case IrOpcode::kInt32LessThanOrEqual:
        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1531
        return VisitWord32Compare(this, value, cont);
1532 1533
      case IrOpcode::kUint32LessThan:
        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1534
        return VisitWord32Compare(this, value, cont);
1535 1536
      case IrOpcode::kUint32LessThanOrEqual:
        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1537
        return VisitWord32Compare(this, value, cont);
1538
#if V8_TARGET_ARCH_PPC64
1539
      case IrOpcode::kWord64Equal:
1540
        cont->OverwriteAndNegateIfEqual(kEqual);
1541
        return VisitWord64Compare(this, value, cont);
1542 1543
      case IrOpcode::kInt64LessThan:
        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1544
        return VisitWord64Compare(this, value, cont);
1545 1546
      case IrOpcode::kInt64LessThanOrEqual:
        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1547
        return VisitWord64Compare(this, value, cont);
1548 1549
      case IrOpcode::kUint64LessThan:
        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1550
        return VisitWord64Compare(this, value, cont);
1551 1552
      case IrOpcode::kUint64LessThanOrEqual:
        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1553
        return VisitWord64Compare(this, value, cont);
1554
#endif
1555 1556
      case IrOpcode::kFloat32Equal:
        cont->OverwriteAndNegateIfEqual(kEqual);
1557
        return VisitFloat32Compare(this, value, cont);
1558 1559
      case IrOpcode::kFloat32LessThan:
        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1560
        return VisitFloat32Compare(this, value, cont);
1561 1562
      case IrOpcode::kFloat32LessThanOrEqual:
        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1563
        return VisitFloat32Compare(this, value, cont);
1564 1565
      case IrOpcode::kFloat64Equal:
        cont->OverwriteAndNegateIfEqual(kEqual);
1566
        return VisitFloat64Compare(this, value, cont);
1567 1568
      case IrOpcode::kFloat64LessThan:
        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1569
        return VisitFloat64Compare(this, value, cont);
1570 1571
      case IrOpcode::kFloat64LessThanOrEqual:
        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1572
        return VisitFloat64Compare(this, value, cont);
1573 1574 1575 1576 1577 1578
      case IrOpcode::kProjection:
        // Check if this is the overflow output projection of an
        // <Operation>WithOverflow node.
        if (ProjectionIndexOf(value->op()) == 1u) {
          // We cannot combine the <Operation>WithOverflow with this branch
          // unless the 0th projection (the use of the actual value of the
1579
          // <Operation> is either nullptr, which means there's no use of the
1580 1581 1582 1583
          // actual value, or was already defined, which means it is scheduled
          // *AFTER* this branch).
          Node* const node = value->InputAt(0);
          Node* const result = NodeProperties::FindProjection(node, 0);
1584
          if (result == nullptr || IsDefined(result)) {
1585 1586 1587 1588
            switch (node->opcode()) {
              case IrOpcode::kInt32AddWithOverflow:
                cont->OverwriteAndNegateIfEqual(kOverflow);
                return VisitBinop<Int32BinopMatcher>(
1589
                    this, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
1590 1591
              case IrOpcode::kInt32SubWithOverflow:
                cont->OverwriteAndNegateIfEqual(kOverflow);
1592 1593
                return VisitBinop<Int32BinopMatcher>(
                    this, node, kPPC_SubWithOverflow32, kInt16Imm_Negate, cont);
1594 1595
              case IrOpcode::kInt32MulWithOverflow:
                cont->OverwriteAndNegateIfEqual(kNotEqual);
1596
                return EmitInt32MulWithOverflow(this, node, cont);
1597 1598 1599
#if V8_TARGET_ARCH_PPC64
              case IrOpcode::kInt64AddWithOverflow:
                cont->OverwriteAndNegateIfEqual(kOverflow);
1600
                return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64,
1601 1602 1603
                                                     kInt16Imm, cont);
              case IrOpcode::kInt64SubWithOverflow:
                cont->OverwriteAndNegateIfEqual(kOverflow);
1604
                return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub,
1605 1606
                                                     kInt16Imm_Negate, cont);
#endif
1607 1608 1609 1610 1611 1612 1613
              default:
                break;
            }
          }
        }
        break;
      case IrOpcode::kInt32Sub:
1614
        return VisitWord32Compare(this, value, cont);
1615 1616
      case IrOpcode::kWord32And:
        // TODO(mbandy): opportunity for rlwinm?
1617
        return VisitWordCompare(this, value, kPPC_Tst32, cont, true,
1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
                                kInt16Imm_Unsigned);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt32Add:
// case IrOpcode::kWord32Or:
// case IrOpcode::kWord32Xor:
// case IrOpcode::kWord32Sar:
// case IrOpcode::kWord32Shl:
// case IrOpcode::kWord32Shr:
// case IrOpcode::kWord32Ror:
#if V8_TARGET_ARCH_PPC64
      case IrOpcode::kInt64Sub:
1629
        return VisitWord64Compare(this, value, cont);
1630 1631
      case IrOpcode::kWord64And:
        // TODO(mbandy): opportunity for rldic?
1632
        return VisitWordCompare(this, value, kPPC_Tst64, cont, true,
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
                                kInt16Imm_Unsigned);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt64Add:
// case IrOpcode::kWord64Or:
// case IrOpcode::kWord64Xor:
// case IrOpcode::kWord64Sar:
// case IrOpcode::kWord64Shl:
// case IrOpcode::kWord64Shr:
// case IrOpcode::kWord64Ror:
#endif
1643 1644 1645
      case IrOpcode::kStackPointerGreaterThan:
        cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
        return VisitStackPointerGreaterThan(value, cont);
1646 1647 1648 1649 1650 1651
      default:
        break;
    }
  }

  // Branch could not be combined with a compare, emit compare against 0.
1652 1653
  PPCOperandGenerator g(this);
  VisitCompare(this, kPPC_Cmp32, g.UseRegister(value), g.TempImmediate(0),
1654 1655 1656
               cont);
}

1657
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1658 1659 1660
  PPCOperandGenerator g(this);
  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));

1661
  // Emit either ArchTableSwitch or ArchLookupSwitch.
1662 1663
  if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
    static const size_t kMaxTableSwitchValueRange = 2 << 16;
1664
    size_t table_space_cost = 4 + sw.value_range();
1665
    size_t table_time_cost = 3;
1666 1667 1668
    size_t lookup_space_cost = 3 + 2 * sw.case_count();
    size_t lookup_time_cost = sw.case_count();
    if (sw.case_count() > 0 &&
1669 1670
        table_space_cost + 3 * table_time_cost <=
            lookup_space_cost + 3 * lookup_time_cost &&
1671 1672
        sw.min_value() > std::numeric_limits<int32_t>::min() &&
        sw.value_range() <= kMaxTableSwitchValueRange) {
1673
      InstructionOperand index_operand = value_operand;
1674
      if (sw.min_value()) {
1675 1676
        index_operand = g.TempRegister();
        Emit(kPPC_Sub, index_operand, value_operand,
1677
             g.TempImmediate(sw.min_value()));
1678 1679 1680
      }
      // Generate a table lookup.
      return EmitTableSwitch(sw, index_operand);
1681 1682 1683
    }
  }

1684 1685
  // Generate a tree of conditional jumps.
  return EmitBinarySearchSwitch(sw, value_operand);
1686 1687
}

1688
void InstructionSelector::VisitWord32Equal(Node* const node) {
1689
  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1690 1691 1692 1693
  VisitWord32Compare(this, node, &cont);
}

void InstructionSelector::VisitInt32LessThan(Node* node) {
1694
  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1695 1696 1697 1698
  VisitWord32Compare(this, node, &cont);
}

void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1699 1700
  FlagsContinuation cont =
      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1701 1702 1703 1704
  VisitWord32Compare(this, node, &cont);
}

void InstructionSelector::VisitUint32LessThan(Node* node) {
1705
  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1706 1707 1708 1709
  VisitWord32Compare(this, node, &cont);
}

void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1710 1711
  FlagsContinuation cont =
      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1712 1713 1714 1715 1716
  VisitWord32Compare(this, node, &cont);
}

#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Equal(Node* const node) {
1717
  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1718 1719 1720 1721
  VisitWord64Compare(this, node, &cont);
}

void InstructionSelector::VisitInt64LessThan(Node* node) {
1722
  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1723 1724 1725 1726
  VisitWord64Compare(this, node, &cont);
}

void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1727 1728
  FlagsContinuation cont =
      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1729 1730 1731
  VisitWord64Compare(this, node, &cont);
}

1732
void InstructionSelector::VisitUint64LessThan(Node* node) {
1733
  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1734 1735 1736
  VisitWord64Compare(this, node, &cont);
}

1737
void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
1738 1739
  FlagsContinuation cont =
      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1740 1741 1742 1743
  VisitWord64Compare(this, node, &cont);
}
#endif

1744 1745 1746 1747 1748 1749 1750 1751 1752
void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
    FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
    return EmitInt32MulWithOverflow(this, node, &cont);
  }
  FlagsContinuation cont;
  EmitInt32MulWithOverflow(this, node, &cont);
}

1753
void InstructionSelector::VisitFloat32Equal(Node* node) {
1754
  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1755 1756 1757 1758
  VisitFloat32Compare(this, node, &cont);
}

void InstructionSelector::VisitFloat32LessThan(Node* node) {
1759
  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1760 1761 1762 1763
  VisitFloat32Compare(this, node, &cont);
}

void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1764 1765
  FlagsContinuation cont =
      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1766 1767 1768
  VisitFloat32Compare(this, node, &cont);
}

1769
void InstructionSelector::VisitFloat64Equal(Node* node) {
1770
  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1771 1772 1773 1774
  VisitFloat64Compare(this, node, &cont);
}

void InstructionSelector::VisitFloat64LessThan(Node* node) {
1775
  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1776 1777 1778 1779
  VisitFloat64Compare(this, node, &cont);
}

void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1780 1781
  FlagsContinuation cont =
      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1782 1783 1784
  VisitFloat64Compare(this, node, &cont);
}

1785
void InstructionSelector::EmitPrepareArguments(
1786
    ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1787
    Node* node) {
1788
  PPCOperandGenerator g(this);
1789 1790

  // Prepare for C function call.
1791 1792 1793
  if (call_descriptor->IsCFunctionCall()) {
    Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
                                         call_descriptor->ParameterCount())),
1794 1795 1796 1797
         0, nullptr, 0, nullptr);

    // Poke any stack arguments.
    int slot = kStackFrameExtraParamSlot;
1798
    for (PushParameter input : (*arguments)) {
1799
      if (input.node == nullptr) continue;
1800
      Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1801 1802 1803 1804 1805
           g.TempImmediate(slot));
      ++slot;
    }
  } else {
    // Push any stack arguments.
1806 1807
    for (PushParameter input : base::Reversed(*arguments)) {
      // Skip any alignment holes in pushed nodes.
1808 1809
      if (input.node == nullptr) continue;
      Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node));
1810
    }
1811
  }
1812 1813
}

1814
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1815

1816
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1817

1818 1819
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
  PPCOperandGenerator g(this);
1820
  Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node),
1821 1822 1823 1824 1825
       g.UseRegister(node->InputAt(0)));
}

void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
  PPCOperandGenerator g(this);
1826
  Emit(kPPC_DoubleExtractHighWord32, g.DefineAsRegister(node),
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
       g.UseRegister(node->InputAt(0)));
}

void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
  PPCOperandGenerator g(this);
  Node* left = node->InputAt(0);
  Node* right = node->InputAt(1);
  if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
      CanCover(node, left)) {
    left = left->InputAt(1);
1837
    Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
1838 1839 1840
         g.UseRegister(right));
    return;
  }
1841
  Emit(kPPC_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
       g.UseRegister(left), g.UseRegister(right));
}

void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
  PPCOperandGenerator g(this);
  Node* left = node->InputAt(0);
  Node* right = node->InputAt(1);
  if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
      CanCover(node, left)) {
    left = left->InputAt(1);
1852
    Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
1853 1854 1855
         g.UseRegister(left));
    return;
  }
1856
  Emit(kPPC_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
1857 1858 1859
       g.UseRegister(left), g.UseRegister(right));
}

1860 1861 1862 1863 1864
void InstructionSelector::VisitMemoryBarrier(Node* node) {
  PPCOperandGenerator g(this);
  Emit(kPPC_Sync, g.NoOutput());
}

1865
void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
1866

1867
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
1868

1869
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
1870
  VisitStore(node);
1871 1872
}

1873
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
1874
  VisitStore(node);
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
}

void VisitAtomicExchange(InstructionSelector* selector, Node* node,
                         ArchOpcode opcode) {
  PPCOperandGenerator g(selector);
  Node* base = node->InputAt(0);
  Node* index = node->InputAt(1);
  Node* value = node->InputAt(2);

  AddressingMode addressing_mode = kMode_MRR;
  InstructionOperand inputs[3];
  size_t input_count = 0;
  inputs[input_count++] = g.UseUniqueRegister(base);
  inputs[input_count++] = g.UseUniqueRegister(index);
  inputs[input_count++] = g.UseUniqueRegister(value);
  InstructionOperand outputs[1];
  outputs[0] = g.UseUniqueRegister(node);
  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
  selector->Emit(code, 1, outputs, input_count, inputs);
}

void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
1897
  ArchOpcode opcode = kArchNop;
1898
  MachineType type = AtomicOpType(node->op());
1899
  if (type == MachineType::Int8()) {
1900
    opcode = kWord32AtomicExchangeInt8;
1901
  } else if (type == MachineType::Uint8()) {
1902
    opcode = kPPC_AtomicExchangeUint8;
1903
  } else if (type == MachineType::Int16()) {
1904
    opcode = kWord32AtomicExchangeInt16;
1905
  } else if (type == MachineType::Uint16()) {
1906
    opcode = kPPC_AtomicExchangeUint16;
1907
  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1908
    opcode = kPPC_AtomicExchangeWord32;
1909 1910 1911 1912
  } else {
    UNREACHABLE();
    return;
  }
1913 1914 1915 1916 1917 1918 1919
  VisitAtomicExchange(this, node, opcode);
}

void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
  ArchOpcode opcode = kArchNop;
  MachineType type = AtomicOpType(node->op());
  if (type == MachineType::Uint8()) {
1920
    opcode = kPPC_AtomicExchangeUint8;
1921
  } else if (type == MachineType::Uint16()) {
1922
    opcode = kPPC_AtomicExchangeUint16;
1923
  } else if (type == MachineType::Uint32()) {
1924
    opcode = kPPC_AtomicExchangeWord32;
1925
  } else if (type == MachineType::Uint64()) {
1926
    opcode = kPPC_AtomicExchangeWord64;
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
  } else {
    UNREACHABLE();
    return;
  }
  VisitAtomicExchange(this, node, opcode);
}

void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
                                ArchOpcode opcode) {
  PPCOperandGenerator g(selector);
  Node* base = node->InputAt(0);
  Node* index = node->InputAt(1);
  Node* old_value = node->InputAt(2);
  Node* new_value = node->InputAt(3);
1941 1942

  AddressingMode addressing_mode = kMode_MRR;
1943 1944 1945
  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);

  InstructionOperand inputs[4];
1946 1947 1948
  size_t input_count = 0;
  inputs[input_count++] = g.UseUniqueRegister(base);
  inputs[input_count++] = g.UseUniqueRegister(index);
1949 1950 1951
  inputs[input_count++] = g.UseUniqueRegister(old_value);
  inputs[input_count++] = g.UseUniqueRegister(new_value);

1952
  InstructionOperand outputs[1];
1953 1954 1955 1956
  size_t output_count = 0;
  outputs[output_count++] = g.DefineAsRegister(node);

  selector->Emit(code, output_count, outputs, input_count, inputs);
1957
}
1958

1959
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
1960
  MachineType type = AtomicOpType(node->op());
1961 1962 1963 1964
  ArchOpcode opcode = kArchNop;
  if (type == MachineType::Int8()) {
    opcode = kWord32AtomicCompareExchangeInt8;
  } else if (type == MachineType::Uint8()) {
1965
    opcode = kPPC_AtomicCompareExchangeUint8;
1966 1967 1968
  } else if (type == MachineType::Int16()) {
    opcode = kWord32AtomicCompareExchangeInt16;
  } else if (type == MachineType::Uint16()) {
1969
    opcode = kPPC_AtomicCompareExchangeUint16;
1970
  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1971
    opcode = kPPC_AtomicCompareExchangeWord32;
1972 1973 1974 1975
  } else {
    UNREACHABLE();
    return;
  }
1976 1977 1978 1979 1980 1981 1982
  VisitAtomicCompareExchange(this, node, opcode);
}

void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
  MachineType type = AtomicOpType(node->op());
  ArchOpcode opcode = kArchNop;
  if (type == MachineType::Uint8()) {
1983
    opcode = kPPC_AtomicCompareExchangeUint8;
1984
  } else if (type == MachineType::Uint16()) {
1985
    opcode = kPPC_AtomicCompareExchangeUint16;
1986
  } else if (type == MachineType::Uint32()) {
1987
    opcode = kPPC_AtomicCompareExchangeWord32;
1988
  } else if (type == MachineType::Uint64()) {
1989
    opcode = kPPC_AtomicCompareExchangeWord64;
1990 1991 1992 1993 1994 1995 1996 1997
  } else {
    UNREACHABLE();
    return;
  }
  VisitAtomicCompareExchange(this, node, opcode);
}

void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
1998 1999 2000 2001
                                ArchOpcode int8_op, ArchOpcode uint8_op,
                                ArchOpcode int16_op, ArchOpcode uint16_op,
                                ArchOpcode int32_op, ArchOpcode uint32_op,
                                ArchOpcode int64_op, ArchOpcode uint64_op) {
2002 2003 2004 2005
  PPCOperandGenerator g(selector);
  Node* base = node->InputAt(0);
  Node* index = node->InputAt(1);
  Node* value = node->InputAt(2);
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029
  MachineType type = AtomicOpType(node->op());

  ArchOpcode opcode = kArchNop;

  if (type == MachineType::Int8()) {
    opcode = int8_op;
  } else if (type == MachineType::Uint8()) {
    opcode = uint8_op;
  } else if (type == MachineType::Int16()) {
    opcode = int16_op;
  } else if (type == MachineType::Uint16()) {
    opcode = uint16_op;
  } else if (type == MachineType::Int32()) {
    opcode = int32_op;
  } else if (type == MachineType::Uint32()) {
    opcode = uint32_op;
  } else if (type == MachineType::Int64()) {
    opcode = int64_op;
  } else if (type == MachineType::Uint64()) {
    opcode = uint64_op;
  } else {
    UNREACHABLE();
    return;
  }
2030

2031 2032
  AddressingMode addressing_mode = kMode_MRR;
  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2033
  InstructionOperand inputs[3];
2034 2035 2036 2037

  size_t input_count = 0;
  inputs[input_count++] = g.UseUniqueRegister(base);
  inputs[input_count++] = g.UseUniqueRegister(index);
2038
  inputs[input_count++] = g.UseUniqueRegister(value);
2039 2040 2041 2042 2043

  InstructionOperand outputs[1];
  size_t output_count = 0;
  outputs[output_count++] = g.DefineAsRegister(node);

2044
  selector->Emit(code, output_count, outputs, input_count, inputs);
2045 2046
}

2047 2048 2049
void InstructionSelector::VisitWord32AtomicBinaryOperation(
    Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
    ArchOpcode uint16_op, ArchOpcode word32_op) {
2050 2051 2052
  // Unused
  UNREACHABLE();
}
2053

2054 2055 2056 2057 2058
void InstructionSelector::VisitWord64AtomicBinaryOperation(
    Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
    ArchOpcode uint64_op) {
  // Unused
  UNREACHABLE();
2059 2060
}

2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
#define VISIT_ATOMIC_BINOP(op)                                     \
  void InstructionSelector::VisitWord32Atomic##op(Node* node) {    \
    VisitAtomicBinaryOperation(                                    \
        this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
        kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16,           \
        kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32,           \
        kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64);          \
  }                                                                \
  void InstructionSelector::VisitWord64Atomic##op(Node* node) {    \
    VisitAtomicBinaryOperation(                                    \
        this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
        kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16,           \
        kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32,           \
        kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64);          \
2075 2076 2077 2078 2079 2080 2081
  }
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
VISIT_ATOMIC_BINOP(And)
VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
2082

2083 2084 2085 2086 2087 2088 2089 2090
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
  UNREACHABLE();
}

void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
  UNREACHABLE();
}

2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)                              \
  void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
    UNIMPLEMENTED();                                                     \
  }
SIMD_VISIT_EXTRACT_LANE(F64x2, )
SIMD_VISIT_EXTRACT_LANE(F32x4, )
SIMD_VISIT_EXTRACT_LANE(I32x4, )
SIMD_VISIT_EXTRACT_LANE(I16x8, U)
SIMD_VISIT_EXTRACT_LANE(I16x8, S)
SIMD_VISIT_EXTRACT_LANE(I8x16, U)
SIMD_VISIT_EXTRACT_LANE(I8x16, S)
#undef SIMD_VISIT_EXTRACT_LANE
2103

2104
void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195

void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }

2196 2197 2198 2199 2200 2201 2202 2203
void InstructionSelector::VisitI16x8RoundingAverageU(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI8x16RoundingAverageU(Node* node) {
  UNIMPLEMENTED();
}

2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }

2258 2259
void InstructionSelector::VisitS128AndNot(Node* node) { UNIMPLEMENTED(); }

2260 2261
void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }

2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }

2274 2275 2276
void InstructionSelector::EmitPrepareResults(
    ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
    Node* node) {
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
  PPCOperandGenerator g(this);

  int reverse_slot = 0;
  for (PushParameter output : *results) {
    if (!output.location.IsCallerFrameSlot()) continue;
    // Skip any alignment holes in nodes.
    if (output.node != nullptr) {
      DCHECK(!call_descriptor->IsCFunctionCall());
      if (output.location.GetType() == MachineType::Float32()) {
        MarkAsFloat32(output.node);
      } else if (output.location.GetType() == MachineType::Float64()) {
        MarkAsFloat64(output.node);
      }
      Emit(kPPC_Peek, g.DefineAsRegister(output.node),
           g.UseImmediate(reverse_slot));
    }
    reverse_slot += output.location.GetSizeInPointers();
  }
2295 2296
}

2297 2298 2299 2300 2301 2302
void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }

2303 2304
void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); }

2305 2306
void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }

2307 2308 2309 2310
void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }

2311 2312
void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }

2313 2314 2315 2316
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }

2317 2318 2319 2320 2321 2322
void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }

2323 2324 2325 2326
void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }

2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
  UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
  UNIMPLEMENTED();
}

void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }

2402 2403 2404 2405 2406 2407 2408 2409
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }

2410 2411
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }

2412 2413
void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); }

2414 2415 2416 2417
void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }

2418 2419 2420 2421
void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }

2422 2423
void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); }

2424 2425 2426 2427 2428 2429 2430 2431
void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }

2432 2433 2434 2435 2436 2437 2438 2439
void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }

2440 2441
void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }

2442 2443 2444 2445
void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }

2446 2447 2448 2449 2450 2451
void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }

2452 2453
void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }

2454 2455 2456
void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }

void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
2457 2458 2459

void InstructionSelector::VisitLoadTransform(Node* node) { UNIMPLEMENTED(); }

2460 2461 2462
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
2463 2464
  return MachineOperatorBuilder::kFloat32RoundDown |
         MachineOperatorBuilder::kFloat64RoundDown |
2465
         MachineOperatorBuilder::kFloat32RoundUp |
2466
         MachineOperatorBuilder::kFloat64RoundUp |
2467
         MachineOperatorBuilder::kFloat32RoundTruncate |
2468
         MachineOperatorBuilder::kFloat64RoundTruncate |
2469
         MachineOperatorBuilder::kFloat64RoundTiesAway |
2470 2471
         MachineOperatorBuilder::kWord32Popcnt |
         MachineOperatorBuilder::kWord64Popcnt;
2472
  // We omit kWord32ShiftIsSafe as s[rl]w use 0x3F as a mask rather than 0x1F.
2473 2474
}

2475 2476 2477 2478 2479 2480 2481
// static
MachineOperatorBuilder::AlignmentRequirements
InstructionSelector::AlignmentRequirements() {
  return MachineOperatorBuilder::AlignmentRequirements::
      FullUnalignedAccessSupport();
}

2482 2483 2484
}  // namespace compiler
}  // namespace internal
}  // namespace v8