effect-control-linearizer.cc 244 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/compiler/effect-control-linearizer.h"

7
#include "include/v8-fast-api-calls.h"
8
#include "src/base/bits.h"
9
#include "src/codegen/code-factory.h"
10
#include "src/codegen/interface-descriptors-inl.h"
11
#include "src/codegen/machine-type.h"
12
#include "src/common/ptr-compr-inl.h"
13
#include "src/compiler/access-builder.h"
14
#include "src/compiler/compiler-source-position-table.h"
15
#include "src/compiler/fast-api-calls.h"
16
#include "src/compiler/feedback-source.h"
17
#include "src/compiler/graph-assembler.h"
18
#include "src/compiler/js-graph.h"
19
#include "src/compiler/js-heap-broker.h"
20
#include "src/compiler/linkage.h"
21
#include "src/compiler/memory-lowering.h"
22
#include "src/compiler/node-matchers.h"
23
#include "src/compiler/node-origin-table.h"
24 25 26
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
27
#include "src/compiler/select-lowering.h"
28
#include "src/execution/frames.h"
29
#include "src/heap/factory-inl.h"
30
#include "src/objects/heap-number.h"
31
#include "src/objects/oddball.h"
32
#include "src/objects/ordered-hash-table.h"
33
#include "src/objects/turbofan-types.h"
34

35 36 37 38
namespace v8 {
namespace internal {
namespace compiler {

39 40
enum class MaintainSchedule { kMaintain, kDiscard };

41 42 43
class EffectControlLinearizer {
 public:
  EffectControlLinearizer(JSGraph* js_graph, Schedule* schedule,
44
                          JSGraphAssembler* graph_assembler, Zone* temp_zone,
45 46
                          SourcePositionTable* source_positions,
                          NodeOriginTable* node_origins,
47 48
                          MaintainSchedule maintain_schedule,
                          JSHeapBroker* broker)
49 50 51
      : js_graph_(js_graph),
        schedule_(schedule),
        temp_zone_(temp_zone),
52
        maintain_schedule_(maintain_schedule),
53 54
        source_positions_(source_positions),
        node_origins_(node_origins),
55
        broker_(broker),
56
        graph_assembler_(graph_assembler),
57
        frame_state_zapper_(nullptr) {}
58 59

  void Run();
60

61
 private:
62 63
  void UpdateEffectControlForNode(Node* node);
  void ProcessNode(Node* node, Node** frame_state);
64

65
  bool TryWireInStateEffect(Node* node, Node* frame_state);
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
  Node* LowerChangeBitToTagged(Node* node);
  Node* LowerChangeInt31ToTaggedSigned(Node* node);
  Node* LowerChangeInt32ToTagged(Node* node);
  Node* LowerChangeInt64ToTagged(Node* node);
  Node* LowerChangeUint32ToTagged(Node* node);
  Node* LowerChangeUint64ToTagged(Node* node);
  Node* LowerChangeFloat64ToTagged(Node* node);
  Node* LowerChangeFloat64ToTaggedPointer(Node* node);
  Node* LowerChangeTaggedSignedToInt32(Node* node);
  Node* LowerChangeTaggedSignedToInt64(Node* node);
  Node* LowerChangeTaggedToBit(Node* node);
  Node* LowerChangeTaggedToInt32(Node* node);
  Node* LowerChangeTaggedToUint32(Node* node);
  Node* LowerChangeTaggedToInt64(Node* node);
  Node* LowerChangeTaggedToTaggedSigned(Node* node);
  Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
  void LowerCheckMaps(Node* node, Node* frame_state);
  Node* LowerCompareMaps(Node* node);
  Node* LowerCheckNumber(Node* node, Node* frame_state);
85
  Node* LowerCheckClosure(Node* node, Node* frame_state);
86 87 88
  Node* LowerCheckReceiver(Node* node, Node* frame_state);
  Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
  Node* LowerCheckString(Node* node, Node* frame_state);
89
  Node* LowerCheckBigInt(Node* node, Node* frame_state);
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
  Node* LowerCheckSymbol(Node* node, Node* frame_state);
  void LowerCheckIf(Node* node, Node* frame_state);
  Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
  Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
  Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
  Node* LowerCheckedInt32Mod(Node* node, Node* frame_state);
  Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
  Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
  Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
  Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
  Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state);
  Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state);
  Node* LowerCheckedUint32Bounds(Node* node, Node* frame_state);
  Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
  Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
  Node* LowerCheckedUint64Bounds(Node* node, Node* frame_state);
  Node* LowerCheckedUint64ToInt32(Node* node, Node* frame_state);
  Node* LowerCheckedUint64ToTaggedSigned(Node* node, Node* frame_state);
  Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
  Node* LowerCheckedFloat64ToInt64(Node* node, Node* frame_state);
  Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
111
  Node* LowerCheckedTaggedToArrayIndex(Node* node, Node* frame_state);
112 113 114 115 116
  Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
  Node* LowerCheckedTaggedToInt64(Node* node, Node* frame_state);
  Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
  Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
  Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
117
  Node* LowerChangeInt64ToBigInt(Node* node);
118
  Node* LowerChangeUint64ToBigInt(Node* node);
119
  Node* LowerTruncateBigIntToWord64(Node* node);
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
  Node* LowerChangeTaggedToFloat64(Node* node);
  void TruncateTaggedPointerToBit(Node* node, GraphAssemblerLabel<1>* done);
  Node* LowerTruncateTaggedToBit(Node* node);
  Node* LowerTruncateTaggedPointerToBit(Node* node);
  Node* LowerTruncateTaggedToFloat64(Node* node);
  Node* LowerTruncateTaggedToWord32(Node* node);
  Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
  Node* LowerAllocate(Node* node);
  Node* LowerNumberToString(Node* node);
  Node* LowerObjectIsArrayBufferView(Node* node);
  Node* LowerObjectIsBigInt(Node* node);
  Node* LowerObjectIsCallable(Node* node);
  Node* LowerObjectIsConstructor(Node* node);
  Node* LowerObjectIsDetectableCallable(Node* node);
  Node* LowerObjectIsMinusZero(Node* node);
  Node* LowerNumberIsMinusZero(Node* node);
  Node* LowerObjectIsNaN(Node* node);
  Node* LowerNumberIsNaN(Node* node);
  Node* LowerObjectIsNonCallable(Node* node);
  Node* LowerObjectIsNumber(Node* node);
  Node* LowerObjectIsReceiver(Node* node);
  Node* LowerObjectIsSmi(Node* node);
  Node* LowerObjectIsString(Node* node);
  Node* LowerObjectIsSymbol(Node* node);
  Node* LowerObjectIsUndetectable(Node* node);
  Node* LowerNumberIsFloat64Hole(Node* node);
  Node* LowerNumberIsFinite(Node* node);
  Node* LowerObjectIsFiniteNumber(Node* node);
  Node* LowerNumberIsInteger(Node* node);
  Node* LowerObjectIsInteger(Node* node);
  Node* LowerNumberIsSafeInteger(Node* node);
  Node* LowerObjectIsSafeInteger(Node* node);
  Node* LowerArgumentsLength(Node* node);
153
  Node* LowerRestLength(Node* node);
154 155 156 157 158 159 160 161 162 163 164
  Node* LowerNewDoubleElements(Node* node);
  Node* LowerNewSmiOrObjectElements(Node* node);
  Node* LowerNewArgumentsElements(Node* node);
  Node* LowerNewConsString(Node* node);
  Node* LowerSameValue(Node* node);
  Node* LowerSameValueNumbersOnly(Node* node);
  Node* LowerNumberSameValue(Node* node);
  Node* LowerDeadValue(Node* node);
  Node* LowerStringConcat(Node* node);
  Node* LowerStringToNumber(Node* node);
  Node* LowerStringCharCodeAt(Node* node);
165
  Node* StringCharCodeAt(Node* receiver, Node* position);
166
  Node* LowerStringCodePointAt(Node* node);
167 168 169 170 171 172
  Node* LowerStringToLowerCaseIntl(Node* node);
  Node* LowerStringToUpperCaseIntl(Node* node);
  Node* LowerStringFromSingleCharCode(Node* node);
  Node* LowerStringFromSingleCodePoint(Node* node);
  Node* LowerStringIndexOf(Node* node);
  Node* LowerStringSubstring(Node* node);
173
  Node* LowerStringFromCodePointAt(Node* node);
174 175 176 177
  Node* LowerStringLength(Node* node);
  Node* LowerStringEqual(Node* node);
  Node* LowerStringLessThan(Node* node);
  Node* LowerStringLessThanOrEqual(Node* node);
178
  Node* LowerBigIntAdd(Node* node, Node* frame_state);
179
  Node* LowerBigIntSubtract(Node* node, Node* frame_state);
180
  Node* LowerBigIntNegate(Node* node);
181 182 183 184 185 186 187 188 189 190 191 192 193 194
  Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
  Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
  Node* LowerConvertTaggedHoleToUndefined(Node* node);
  void LowerCheckEqualsInternalizedString(Node* node, Node* frame_state);
  void LowerCheckEqualsSymbol(Node* node, Node* frame_state);
  Node* LowerTypeOf(Node* node);
  Node* LowerToBoolean(Node* node);
  Node* LowerPlainPrimitiveToNumber(Node* node);
  Node* LowerPlainPrimitiveToWord32(Node* node);
  Node* LowerPlainPrimitiveToFloat64(Node* node);
  Node* LowerEnsureWritableFastElements(Node* node);
  Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
  void LowerTransitionElementsKind(Node* node);
  Node* LowerLoadFieldByIndex(Node* node);
195
  Node* LowerLoadMessage(Node* node);
196 197 198
  Node* AdaptFastCallTypedArrayArgument(Node* node,
                                        ElementsKind expected_elements_kind,
                                        GraphAssemblerLabel<0>* bailout);
199 200
  Node* AdaptFastCallArgument(Node* node, CTypeInfo arg_type,
                              GraphAssemblerLabel<0>* if_error);
201 202 203 204 205 206 207 208 209 210 211

  struct AdaptOverloadedFastCallResult {
    Node* target_address;
    Node* argument;
  };
  AdaptOverloadedFastCallResult AdaptOverloadedFastCallArgument(
      Node* node, const FastApiCallFunctionVector& c_functions,
      const fast_api_call::OverloadsResolutionResult&
          overloads_resolution_result,
      GraphAssemblerLabel<0>* if_error);

212 213 214 215
  Node* WrapFastCall(const CallDescriptor* call_descriptor, int inputs_size,
                     Node** inputs, Node* target,
                     const CFunctionInfo* c_signature, int c_arg_count,
                     Node* stack_slot);
216
  Node* GenerateSlowApiCall(Node* node);
217
  Node* LowerFastApiCall(Node* node);
218 219
  Node* LowerLoadTypedElement(Node* node);
  Node* LowerLoadDataViewElement(Node* node);
220
  Node* LowerLoadStackArgument(Node* node);
221
  void LowerStoreMessage(Node* node);
222 223 224 225 226 227 228 229 230
  void LowerStoreTypedElement(Node* node);
  void LowerStoreDataViewElement(Node* node);
  void LowerStoreSignedSmallElement(Node* node);
  Node* LowerFindOrderedHashMapEntry(Node* node);
  Node* LowerFindOrderedHashMapEntryForInt32Key(Node* node);
  void LowerTransitionAndStoreElement(Node* node);
  void LowerTransitionAndStoreNumberElement(Node* node);
  void LowerTransitionAndStoreNonNumberElement(Node* node);
  void LowerRuntimeAbort(Node* node);
231
  Node* LowerAssertType(Node* node);
232
  Node* LowerFoldConstant(Node* node);
233 234 235 236 237 238 239 240 241 242 243
  Node* LowerConvertReceiver(Node* node);
  Node* LowerDateNow(Node* node);

  // Lowering of optional operators.
  Maybe<Node*> LowerFloat64RoundUp(Node* node);
  Maybe<Node*> LowerFloat64RoundDown(Node* node);
  Maybe<Node*> LowerFloat64RoundTiesEven(Node* node);
  Maybe<Node*> LowerFloat64RoundTruncate(Node* node);

  Node* AllocateHeapNumberWithValue(Node* node);
  Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
244
                                   const FeedbackSource& feedback, Node* value,
245 246
                                   Node* frame_state);
  Node* BuildCheckedFloat64ToInt64(CheckForMinusZeroMode mode,
247
                                   const FeedbackSource& feedback, Node* value,
248
                                   Node* frame_state);
249 250
  Node* BuildCheckedFloat64ToIndex(const FeedbackSource& feedback, Node* value,
                                   Node* frame_state);
251
  Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
252
                                                 const FeedbackSource& feedback,
253 254 255 256 257
                                                 Node* value,
                                                 Node* frame_state);
  Node* BuildReverseBytes(ExternalArrayType type, Node* value);
  Node* BuildFloat64RoundDown(Node* value);
  Node* BuildFloat64RoundTruncate(Node* input);
258 259 260 261 262
  template <size_t VarCount, size_t VarCount2>
  void SmiTagOrOverflow(Node* value, GraphAssemblerLabel<VarCount>* if_overflow,
                        GraphAssemblerLabel<VarCount2>* done);
  Node* SmiTagOrDeopt(Node* value, const CheckParameters& params,
                      Node* frame_state);
263 264 265 266 267
  Node* BuildUint32Mod(Node* lhs, Node* rhs);
  Node* ComputeUnseededHash(Node* value);
  Node* LowerStringComparison(Callable const& callable, Node* node);
  Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);

268 269
  Node* BuildTypedArrayDataPointer(Node* base, Node* external);

270
  template <typename... Args>
271
  Node* CallBuiltin(Builtin builtin, Operator::Properties properties, Args...);
272

273
  Node* ChangeBitToTagged(Node* value);
274
  Node* ChangeFloat64ToTagged(Node* value, CheckForMinusZeroMode mode);
275
  Node* ChangeInt32ToSmi(Node* value);
276 277 278 279 280 281
  // In pointer compression, we smi-corrupt. This means the upper bits of a Smi
  // are not important. ChangeTaggedInt32ToSmi has a known tagged int32 as input
  // and takes advantage of the smi corruption by emitting a Bitcast node
  // instead of a Change node in order to save instructions.
  // In non pointer compression, it behaves like ChangeInt32ToSmi.
  Node* ChangeTaggedInt32ToSmi(Node* value);
282
  Node* ChangeInt32ToIntPtr(Node* value);
283
  Node* ChangeInt32ToTagged(Node* value);
284 285 286 287 288
  Node* ChangeInt64ToSmi(Node* value);
  Node* ChangeIntPtrToInt32(Node* value);
  Node* ChangeIntPtrToSmi(Node* value);
  Node* ChangeUint32ToUintPtr(Node* value);
  Node* ChangeUint32ToSmi(Node* value);
289
  Node* ChangeUint32ToTagged(Node* value);
290 291 292 293 294
  Node* ChangeSmiToIntPtr(Node* value);
  Node* ChangeSmiToInt32(Node* value);
  Node* ChangeSmiToInt64(Node* value);
  Node* ObjectIsSmi(Node* value);
  Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
295
  Node* TruncateWordToInt32(Node* value);
296
  Node* MakeWeakForComparison(Node* heap_object);
297
  Node* BuildIsWeakReferenceTo(Node* maybe_object, Node* value);
298
  Node* BuildIsClearedWeakReference(Node* maybe_object);
299
  Node* BuildIsStrongReference(Node* value);
300
  Node* BuildStrongReferenceFromWeakReference(Node* value);
301 302
  Node* SmiMaxValueConstant();
  Node* SmiShiftBitsConstant();
303 304 305 306

  // Pass {bitfield} = {digit} = nullptr to construct the canoncial 0n BigInt.
  Node* BuildAllocateBigInt(Node* bitfield, Node* digit);

307 308 309
  void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
                            ElementsKind to);

310 311 312 313 314 315
  // This function tries to migrate |value| if its map |value_map| is
  // deprecated. It deopts, if either |value_map| isn't deprecated or migration
  // fails.
  void MigrateInstanceOrDeopt(Node* value, Node* value_map, Node* frame_state,
                              FeedbackSource const& feedback_source,
                              DeoptimizeReason reason);
316 317 318
  // Tries to migrate |value| if its map |value_map| is deprecated, but doesn't
  // deopt on failure.
  void TryMigrateInstance(Node* value, Node* value_map);
319

320 321 322 323
  bool should_maintain_schedule() const {
    return maintain_schedule_ == MaintainSchedule::kMaintain;
  }

324 325 326 327 328 329 330 331 332 333 334
  Factory* factory() const { return isolate()->factory(); }
  Isolate* isolate() const { return jsgraph()->isolate(); }
  JSGraph* jsgraph() const { return js_graph_; }
  Graph* graph() const { return js_graph_->graph(); }
  Schedule* schedule() const { return schedule_; }
  Zone* temp_zone() const { return temp_zone_; }
  CommonOperatorBuilder* common() const { return js_graph_->common(); }
  SimplifiedOperatorBuilder* simplified() const {
    return js_graph_->simplified();
  }
  MachineOperatorBuilder* machine() const { return js_graph_->machine(); }
335
  JSGraphAssembler* gasm() const { return graph_assembler_; }
336
  JSHeapBroker* broker() const { return broker_; }
337 338 339 340

  JSGraph* js_graph_;
  Schedule* schedule_;
  Zone* temp_zone_;
341
  MaintainSchedule maintain_schedule_;
342
  RegionObservability region_observability_ = RegionObservability::kObservable;
343
  bool inside_region_ = false;
344 345
  SourcePositionTable* source_positions_;
  NodeOriginTable* node_origins_;
346
  JSHeapBroker* broker_;
347
  JSGraphAssembler* graph_assembler_;
348 349
  Node* frame_state_zapper_;  // For tracking down compiler::Node::New crashes.
};
350 351 352

namespace {

353
struct BlockEffectControlData {
354 355
  Node* current_effect = nullptr;       // New effect.
  Node* current_control = nullptr;      // New control.
356
  Node* current_frame_state = nullptr;  // New frame state.
357 358
};

359 360 361 362 363
class BlockEffectControlMap {
 public:
  explicit BlockEffectControlMap(Zone* temp_zone) : map_(temp_zone) {}

  BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) {
364
    return map_[std::make_pair(from->id().ToInt(), to->id().ToInt())];
365 366 367
  }

  const BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) const {
368
    return map_.at(std::make_pair(from->id().ToInt(), to->id().ToInt()));
369 370 371
  }

 private:
372 373
  using Key = std::pair<int32_t, int32_t>;
  using Map = ZoneMap<Key, BlockEffectControlData>;
374 375 376 377

  Map map_;
};

378 379 380 381 382 383 384 385 386
// Effect phis that need to be updated after the first pass.
struct PendingEffectPhi {
  Node* effect_phi;
  BasicBlock* block;

  PendingEffectPhi(Node* effect_phi, BasicBlock* block)
      : effect_phi(effect_phi), block(block) {}
};

387
void UpdateEffectPhi(Node* node, BasicBlock* block,
388
                     BlockEffectControlMap* block_effects) {
389 390 391
  // Update all inputs to an effect phi with the effects from the given
  // block->effect map.
  DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
392 393
  DCHECK_EQ(static_cast<size_t>(node->op()->EffectInputCount()),
            block->PredecessorCount());
394 395 396
  for (int i = 0; i < node->op()->EffectInputCount(); i++) {
    Node* input = node->InputAt(i);
    BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
397 398
    const BlockEffectControlData& block_effect =
        block_effects->For(predecessor, block);
399 400 401
    Node* effect = block_effect.current_effect;
    if (input != effect) {
      node->ReplaceInput(i, effect);
402 403 404 405
    }
  }
}

406
void UpdateBlockControl(BasicBlock* block,
407
                        BlockEffectControlMap* block_effects) {
408 409 410 411 412 413 414
  Node* control = block->NodeAt(0);
  DCHECK(NodeProperties::IsControl(control));

  // Do not rewire the end node.
  if (control->opcode() == IrOpcode::kEnd) return;

  // Update all inputs to the given control node with the correct control.
415
  DCHECK(control->opcode() == IrOpcode::kMerge ||
416 417 418 419
         static_cast<size_t>(control->op()->ControlInputCount()) ==
             block->PredecessorCount());
  if (static_cast<size_t>(control->op()->ControlInputCount()) !=
      block->PredecessorCount()) {
420 421
    return;  // We already re-wired the control inputs of this node.
  }
422 423 424
  for (int i = 0; i < control->op()->ControlInputCount(); i++) {
    Node* input = NodeProperties::GetControlInput(control, i);
    BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
425 426 427 428 429
    const BlockEffectControlData& block_effect =
        block_effects->For(predecessor, block);
    if (input != block_effect.current_control) {
      NodeProperties::ReplaceControlInput(control, block_effect.current_control,
                                          i);
430 431 432 433
    }
  }
}

434
void RemoveRenameNode(Node* node) {
435
  DCHECK(IrOpcode::kFinishRegion == node->opcode() ||
436 437
         IrOpcode::kBeginRegion == node->opcode() ||
         IrOpcode::kTypeGuard == node->opcode());
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
  // Update the value/context uses to the value input of the finish node and
  // the effect uses to the effect input.
  for (Edge edge : node->use_edges()) {
    DCHECK(!edge.from()->IsDead());
    if (NodeProperties::IsEffectEdge(edge)) {
      edge.UpdateTo(NodeProperties::GetEffectInput(node));
    } else {
      DCHECK(!NodeProperties::IsControlEdge(edge));
      DCHECK(!NodeProperties::IsFrameStateEdge(edge));
      edge.UpdateTo(node->InputAt(0));
    }
  }
  node->Kill();
}

453 454
void TryCloneBranch(Node* node, BasicBlock* block, Zone* temp_zone,
                    Graph* graph, CommonOperatorBuilder* common,
455
                    BlockEffectControlMap* block_effects,
456 457
                    SourcePositionTable* source_positions,
                    NodeOriginTable* node_origins) {
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
  DCHECK_EQ(IrOpcode::kBranch, node->opcode());

  // This optimization is a special case of (super)block cloning. It takes an
  // input graph as shown below and clones the Branch node for every predecessor
  // to the Merge, essentially removing the Merge completely. This avoids
  // materializing the bit for the Phi and may offer potential for further
  // branch folding optimizations (i.e. because one or more inputs to the Phi is
  // a constant). Note that there may be more Phi nodes hanging off the Merge,
  // but we can only a certain subset of them currently (actually only Phi and
  // EffectPhi nodes whose uses have either the IfTrue or IfFalse as control
  // input).

  //   Control1 ... ControlN
  //      ^            ^
  //      |            |   Cond1 ... CondN
  //      +----+  +----+     ^         ^
  //           |  |          |         |
  //           |  |     +----+         |
  //          Merge<--+ | +------------+
  //            ^      \|/
  //            |      Phi
  //            |       |
  //          Branch----+
  //            ^
  //            |
  //      +-----+-----+
  //      |           |
  //    IfTrue     IfFalse
  //      ^           ^
  //      |           |

  // The resulting graph (modulo the Phi and EffectPhi nodes) looks like this:

  // Control1 Cond1 ... ControlN CondN
  //    ^      ^           ^      ^
  //    \      /           \      /
  //     Branch     ...     Branch
  //       ^                  ^
  //       |                  |
  //   +---+---+          +---+----+
  //   |       |          |        |
  // IfTrue IfFalse ... IfTrue  IfFalse
  //   ^       ^          ^        ^
  //   |       |          |        |
  //   +--+ +-------------+        |
  //      | |  +--------------+ +--+
  //      | |                 | |
  //     Merge               Merge
  //       ^                   ^
  //       |                   |

509 510
  SourcePositionTable::Scope scope(source_positions,
                                   source_positions->GetSourcePosition(node));
511
  NodeOriginTable::Scope origin_scope(node_origins, "clone branch", node);
512 513 514 515 516 517 518 519 520 521 522
  Node* branch = node;
  Node* cond = NodeProperties::GetValueInput(branch, 0);
  if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return;
  Node* merge = NodeProperties::GetControlInput(branch);
  if (merge->opcode() != IrOpcode::kMerge ||
      NodeProperties::GetControlInput(cond) != merge) {
    return;
  }
  // Grab the IfTrue/IfFalse projections of the Branch.
  BranchMatcher matcher(branch);
  // Check/collect other Phi/EffectPhi nodes hanging off the Merge.
523
  NodeVector phis(temp_zone);
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
  for (Node* const use : merge->uses()) {
    if (use == branch || use == cond) continue;
    // We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
    // Merge. Ideally, we would just clone the nodes (and everything that
    // depends on it to some distant join point), but that requires knowledge
    // about dominance/post-dominance.
    if (!NodeProperties::IsPhi(use)) return;
    for (Edge edge : use->use_edges()) {
      // Right now we can only handle Phi/EffectPhi nodes whose uses are
      // directly control-dependend on either the IfTrue or the IfFalse
      // successor, because we know exactly how to update those uses.
      if (edge.from()->op()->ControlInputCount() != 1) return;
      Node* control = NodeProperties::GetControlInput(edge.from());
      if (NodeProperties::IsPhi(edge.from())) {
        control = NodeProperties::GetControlInput(control, edge.index());
      }
      if (control != matcher.IfTrue() && control != matcher.IfFalse()) return;
    }
    phis.push_back(use);
  }
  BranchHint const hint = BranchHintOf(branch->op());
  int const input_count = merge->op()->ControlInputCount();
  DCHECK_LE(1, input_count);
  Node** const inputs = graph->zone()->NewArray<Node*>(2 * input_count);
  Node** const merge_true_inputs = &inputs[0];
  Node** const merge_false_inputs = &inputs[input_count];
  for (int index = 0; index < input_count; ++index) {
    Node* cond1 = NodeProperties::GetValueInput(cond, index);
    Node* control1 = NodeProperties::GetControlInput(merge, index);
    Node* branch1 = graph->NewNode(common->Branch(hint), cond1, control1);
    merge_true_inputs[index] = graph->NewNode(common->IfTrue(), branch1);
    merge_false_inputs[index] = graph->NewNode(common->IfFalse(), branch1);
  }
  Node* const merge_true = matcher.IfTrue();
  Node* const merge_false = matcher.IfFalse();
  merge_true->TrimInputCount(0);
  merge_false->TrimInputCount(0);
  for (int i = 0; i < input_count; ++i) {
    merge_true->AppendInput(graph->zone(), merge_true_inputs[i]);
    merge_false->AppendInput(graph->zone(), merge_false_inputs[i]);
  }
565
  DCHECK_EQ(2u, block->SuccessorCount());
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
  NodeProperties::ChangeOp(matcher.IfTrue(), common->Merge(input_count));
  NodeProperties::ChangeOp(matcher.IfFalse(), common->Merge(input_count));
  int const true_index =
      block->SuccessorAt(0)->NodeAt(0) == matcher.IfTrue() ? 0 : 1;
  BlockEffectControlData* true_block_data =
      &block_effects->For(block, block->SuccessorAt(true_index));
  BlockEffectControlData* false_block_data =
      &block_effects->For(block, block->SuccessorAt(true_index ^ 1));
  for (Node* const phi : phis) {
    for (int index = 0; index < input_count; ++index) {
      inputs[index] = phi->InputAt(index);
    }
    inputs[input_count] = merge_true;
    Node* phi_true = graph->NewNode(phi->op(), input_count + 1, inputs);
    inputs[input_count] = merge_false;
    Node* phi_false = graph->NewNode(phi->op(), input_count + 1, inputs);
    if (phi->UseCount() == 0) {
      DCHECK_EQ(phi->opcode(), IrOpcode::kEffectPhi);
    } else {
      for (Edge edge : phi->use_edges()) {
        Node* control = NodeProperties::GetControlInput(edge.from());
        if (NodeProperties::IsPhi(edge.from())) {
          control = NodeProperties::GetControlInput(control, edge.index());
        }
        DCHECK(control == matcher.IfTrue() || control == matcher.IfFalse());
        edge.UpdateTo((control == matcher.IfTrue()) ? phi_true : phi_false);
      }
    }
594 595 596 597
    if (phi->opcode() == IrOpcode::kEffectPhi) {
      true_block_data->current_effect = phi_true;
      false_block_data->current_effect = phi_false;
    }
598 599 600 601 602 603 604 605 606 607 608
    phi->Kill();
  }
  // Fix up IfTrue and IfFalse and kill all dead nodes.
  if (branch == block->control_input()) {
    true_block_data->current_control = merge_true;
    false_block_data->current_control = merge_false;
  }
  branch->Kill();
  cond->Kill();
  merge->Kill();
}
609

610 611 612
}  // namespace

void EffectControlLinearizer::Run() {
613
  BlockEffectControlMap block_effects(temp_zone());
614
  ZoneVector<PendingEffectPhi> pending_effect_phis(temp_zone());
615
  ZoneVector<BasicBlock*> pending_block_controls(temp_zone());
616 617
  NodeVector inputs_buffer(temp_zone());

618 619
  // TODO(rmcilroy) We should not depend on having rpo_order on schedule, and
  // instead just do our own RPO walk here.
620
  for (BasicBlock* block : *(schedule()->rpo_order())) {
621 622 623 624 625 626
    if (block != schedule()->start() && block->PredecessorCount() == 0) {
      // Block has been removed from the schedule by a preceeding unreachable
      // node, just skip it.
      continue;
    }

627
    gasm()->Reset();
628

629 630 631
    BasicBlock::iterator instr = block->begin();
    BasicBlock::iterator end_instr = block->end();

632
    // The control node should be the first.
633 634 635
    Node* control = *instr;
    gasm()->AddNode(control);

636
    DCHECK(NodeProperties::IsControl(control));
637
    bool has_incoming_backedge = IrOpcode::kLoop == control->opcode();
638
    // Update the control inputs.
639
    if (has_incoming_backedge) {
640
      // If there are back edges, we need to update later because we have not
641
      // computed the control yet.
642 643 644 645 646
      pending_block_controls.push_back(block);
    } else {
      // If there are no back edges, we can update now.
      UpdateBlockControl(block, &block_effects);
    }
647 648 649
    instr++;

    // Iterate over the phis and update the effect phis.
650
    Node* effect_phi = nullptr;
651
    Node* terminate = nullptr;
652 653
    for (; instr != end_instr; instr++) {
      Node* node = *instr;
654 655 656
      // Only go through the phis and effect phis.
      if (node->opcode() == IrOpcode::kEffectPhi) {
        // There should be at most one effect phi in a block.
657
        DCHECK_NULL(effect_phi);
658 659
        // IfException blocks should not have effect phis.
        DCHECK_NE(IrOpcode::kIfException, control->opcode());
660
        effect_phi = node;
661
      } else if (node->opcode() == IrOpcode::kPhi) {
662
        // Just skip phis.
663
      } else if (node->opcode() == IrOpcode::kTerminate) {
664
        DCHECK_NULL(terminate);
665 666 667 668
        terminate = node;
      } else {
        break;
      }
669
      gasm()->AddNode(node);
670 671
    }

672 673
    if (effect_phi) {
      // Make sure we update the inputs to the incoming blocks' effects.
674
      if (has_incoming_backedge) {
675 676 677 678 679
        // In case of loops, we do not update the effect phi immediately
        // because the back predecessor has not been handled yet. We just
        // record the effect phi for later processing.
        pending_effect_phis.push_back(PendingEffectPhi(effect_phi, block));
      } else {
680
        UpdateEffectPhi(effect_phi, block, &block_effects);
681 682 683 684
      }
    }

    Node* effect = effect_phi;
685 686 687 688 689 690 691 692 693 694 695 696
    if (effect == nullptr) {
      // There was no effect phi.
      if (block == schedule()->start()) {
        // Start block => effect is start.
        DCHECK_EQ(graph()->start(), control);
        effect = graph()->start();
      } else if (control->opcode() == IrOpcode::kEnd) {
        // End block is just a dummy, no effect needed.
        DCHECK_EQ(BasicBlock::kNone, block->control());
        DCHECK_EQ(1u, block->size());
        effect = nullptr;
      } else {
697
        // If all the predecessors have the same effect, we can use it as our
698
        // current effect.
699 700 701 702 703
        for (size_t i = 0; i < block->PredecessorCount(); ++i) {
          const BlockEffectControlData& data =
              block_effects.For(block->PredecessorAt(i), block);
          if (!effect) effect = data.current_effect;
          if (data.current_effect != effect) {
704 705 706 707 708 709 710 711 712
            effect = nullptr;
            break;
          }
        }
        if (effect == nullptr) {
          DCHECK_NE(IrOpcode::kIfException, control->opcode());
          // The input blocks do not have the same effect. We have
          // to create an effect phi node.
          inputs_buffer.clear();
713
          inputs_buffer.resize(block->PredecessorCount(), jsgraph()->Dead());
714 715 716 717
          inputs_buffer.push_back(control);
          effect = graph()->NewNode(
              common()->EffectPhi(static_cast<int>(block->PredecessorCount())),
              static_cast<int>(inputs_buffer.size()), &(inputs_buffer.front()));
718
          gasm()->AddNode(effect);
719 720 721 722
          // For loops, we update the effect phi node later to break cycles.
          if (control->opcode() == IrOpcode::kLoop) {
            pending_effect_phis.push_back(PendingEffectPhi(effect, block));
          } else {
723
            UpdateEffectPhi(effect, block, &block_effects);
724
          }
725 726
        } else if (control->opcode() == IrOpcode::kIfException) {
          // The IfException is connected into the effect chain, so we need
727 728 729
          // to update the effect here.
          NodeProperties::ReplaceEffectInput(control, effect);
          effect = control;
730 731 732 733 734 735 736 737 738
        }
      }
    }

    // Fixup the Terminate node.
    if (terminate != nullptr) {
      NodeProperties::ReplaceEffectInput(terminate, effect);
    }

739 740 741 742 743 744 745
    // The frame state at block entry is determined by the frame states leaving
    // all predecessors. In case there is no frame state dominating this block,
    // we can rely on a checkpoint being present before the next deoptimization.
    Node* frame_state = nullptr;
    if (block != schedule()->start()) {
      // If all the predecessors have the same effect, we can use it
      // as our current effect.
746 747
      frame_state =
          block_effects.For(block->PredecessorAt(0), block).current_frame_state;
748
      for (size_t i = 1; i < block->PredecessorCount(); i++) {
749 750
        if (block_effects.For(block->PredecessorAt(i), block)
                .current_frame_state != frame_state) {
751
          frame_state = nullptr;
752
          frame_state_zapper_ = graph()->end();
753 754 755 756 757
          break;
        }
      }
    }

758 759
    gasm()->InitializeEffectControl(effect, control);

760
    // Process the ordinary instructions.
761 762 763
    for (; instr != end_instr; instr++) {
      Node* node = *instr;
      ProcessNode(node, &frame_state);
764 765 766 767 768 769 770 771 772 773 774 775
    }

    switch (block->control()) {
      case BasicBlock::kGoto:
      case BasicBlock::kNone:
        break;
      case BasicBlock::kCall:
      case BasicBlock::kTailCall:
      case BasicBlock::kSwitch:
      case BasicBlock::kReturn:
      case BasicBlock::kDeoptimize:
      case BasicBlock::kThrow:
776
      case BasicBlock::kBranch:
777 778
        UpdateEffectControlForNode(block->control_input());
        gasm()->UpdateEffectControlWith(block->control_input());
779
        break;
780 781
    }

782 783
    if (!should_maintain_schedule() &&
        block->control() == BasicBlock::kBranch) {
784 785 786 787 788
      TryCloneBranch(block->control_input(), block, temp_zone(), graph(),
                     common(), &block_effects, source_positions_,
                     node_origins_);
    }

789 790 791 792
    // Store the effect, control and frame state for later use.
    for (BasicBlock* successor : block->successors()) {
      BlockEffectControlData* data = &block_effects.For(block, successor);
      if (data->current_effect == nullptr) {
793
        data->current_effect = gasm()->effect();
794 795
      }
      if (data->current_control == nullptr) {
796
        data->current_control = gasm()->control();
797 798 799
      }
      data->current_frame_state = frame_state;
    }
800 801
  }

802 803 804
  for (BasicBlock* pending_block_control : pending_block_controls) {
    UpdateBlockControl(pending_block_control, &block_effects);
  }
805 806 807
  // Update the incoming edges of the effect phis that could not be processed
  // during the first pass (because they could have incoming back edges).
  for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) {
808
    UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block,
809
                    &block_effects);
810
  }
811

812 813 814 815 816 817 818
  schedule_->rpo_order()->clear();
}

void EffectControlLinearizer::UpdateEffectControlForNode(Node* node) {
  // If the node takes an effect, replace with the current one.
  if (node->op()->EffectInputCount() > 0) {
    DCHECK_EQ(1, node->op()->EffectInputCount());
819
    NodeProperties::ReplaceEffectInput(node, gasm()->effect());
820 821 822 823 824 825 826 827
  } else {
    // New effect chain is only started with a Start or ValueEffect node.
    DCHECK(node->op()->EffectOutputCount() == 0 ||
           node->opcode() == IrOpcode::kStart);
  }

  // Rewire control inputs.
  for (int i = 0; i < node->op()->ControlInputCount(); i++) {
828
    NodeProperties::ReplaceControlInput(node, gasm()->control(), i);
829
  }
830 831
}

832
void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state) {
833 834
  SourcePositionTable::Scope scope(source_positions_,
                                   source_positions_->GetSourcePosition(node));
835
  NodeOriginTable::Scope origin_scope(node_origins_, "process node", node);
836

837 838
  // If basic block is unreachable after this point, update the node's effect
  // and control inputs to mark it as dead, but don't process further.
839
  if (gasm()->effect() == jsgraph()->Dead()) {
840 841 842 843
    UpdateEffectControlForNode(node);
    return;
  }

844
  // If the node needs to be wired into the effect/control chain, do this
845
  // here. Pass current frame state for lowering to eager deoptimization.
846
  if (TryWireInStateEffect(node, *frame_state)) {
847 848 849
    return;
  }

850 851 852
  // If the node has a visible effect, then there must be a checkpoint in the
  // effect chain before we are allowed to place another eager deoptimization
  // point. We zap the frame state to ensure this invariant is maintained.
853 854 855
  if (region_observability_ == RegionObservability::kObservable &&
      !node->op()->HasProperty(Operator::kNoWrite)) {
    *frame_state = nullptr;
856
    frame_state_zapper_ = node;
857
  }
858

859 860
  // Remove the end markers of 'atomic' allocation region because the
  // region should be wired-in now.
861 862 863
  if (node->opcode() == IrOpcode::kFinishRegion) {
    // Reset the current region observability.
    region_observability_ = RegionObservability::kObservable;
864
    inside_region_ = false;
865 866
    // Update the value uses to the value input of the finish node and
    // the effect uses to the effect input.
867
    return RemoveRenameNode(node);
868 869 870 871 872 873 874
  }
  if (node->opcode() == IrOpcode::kBeginRegion) {
    // Determine the observability for this region and use that for all
    // nodes inside the region (i.e. ignore the absence of kNoWrite on
    // StoreField and other operators).
    DCHECK_NE(RegionObservability::kNotObservable, region_observability_);
    region_observability_ = RegionObservabilityOf(node->op());
875
    inside_region_ = true;
876 877
    // Update the value uses to the value input of the finish node and
    // the effect uses to the effect input.
878 879 880 881
    return RemoveRenameNode(node);
  }
  if (node->opcode() == IrOpcode::kTypeGuard) {
    return RemoveRenameNode(node);
882 883
  }

884 885
  // Special treatment for checkpoint nodes.
  if (node->opcode() == IrOpcode::kCheckpoint) {
886
    // Unlink the check point; effect uses will be updated to the incoming
887
    // effect that is passed. The frame state is preserved for lowering.
888
    DCHECK_EQ(RegionObservability::kObservable, region_observability_);
889
    *frame_state = NodeProperties::GetFrameStateInput(node);
890 891 892
    return;
  }

893 894 895 896 897 898 899 900
  if (node->opcode() == IrOpcode::kStoreField) {
    // Mark stores outside a region as non-initializing and non-transitioning.
    if (!inside_region_) {
      const FieldAccess access = FieldAccessOf(node->op());
      NodeProperties::ChangeOp(node, simplified()->StoreField(access, false));
    }
  }

901 902 903
  // The IfSuccess nodes should always start a basic block (and basic block
  // start nodes are not handled in the ProcessNode method).
  DCHECK_NE(IrOpcode::kIfSuccess, node->opcode());
904

905
  UpdateEffectControlForNode(node);
906

907 908
  gasm()->AddNode(node);

909
  if (node->opcode() == IrOpcode::kUnreachable) {
910 911
    // Break the effect chain on {Unreachable} and reconnect to the graph end.
    // Mark the following code for deletion by connecting to the {Dead} node.
912
    gasm()->ConnectUnreachableToEnd();
913
  }
914 915
}

916
bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
917
                                                   Node* frame_state) {
918
  Node* result = nullptr;
919
  switch (node->opcode()) {
920
    case IrOpcode::kChangeBitToTagged:
921
      result = LowerChangeBitToTagged(node);
922 923
      break;
    case IrOpcode::kChangeInt31ToTaggedSigned:
924
      result = LowerChangeInt31ToTaggedSigned(node);
925
      break;
926
    case IrOpcode::kChangeInt32ToTagged:
927
      result = LowerChangeInt32ToTagged(node);
928
      break;
929 930 931
    case IrOpcode::kChangeInt64ToTagged:
      result = LowerChangeInt64ToTagged(node);
      break;
932
    case IrOpcode::kChangeUint32ToTagged:
933
      result = LowerChangeUint32ToTagged(node);
934
      break;
935 936 937
    case IrOpcode::kChangeUint64ToTagged:
      result = LowerChangeUint64ToTagged(node);
      break;
938
    case IrOpcode::kChangeFloat64ToTagged:
939
      result = LowerChangeFloat64ToTagged(node);
940
      break;
941
    case IrOpcode::kChangeFloat64ToTaggedPointer:
942
      result = LowerChangeFloat64ToTaggedPointer(node);
943
      break;
944
    case IrOpcode::kChangeTaggedSignedToInt32:
945
      result = LowerChangeTaggedSignedToInt32(node);
946
      break;
947 948 949
    case IrOpcode::kChangeTaggedSignedToInt64:
      result = LowerChangeTaggedSignedToInt64(node);
      break;
950
    case IrOpcode::kChangeTaggedToBit:
951
      result = LowerChangeTaggedToBit(node);
952
      break;
953
    case IrOpcode::kChangeTaggedToInt32:
954
      result = LowerChangeTaggedToInt32(node);
955 956
      break;
    case IrOpcode::kChangeTaggedToUint32:
957
      result = LowerChangeTaggedToUint32(node);
958
      break;
959 960 961
    case IrOpcode::kChangeTaggedToInt64:
      result = LowerChangeTaggedToInt64(node);
      break;
962
    case IrOpcode::kChangeTaggedToFloat64:
963
      result = LowerChangeTaggedToFloat64(node);
964
      break;
965 966 967
    case IrOpcode::kChangeTaggedToTaggedSigned:
      result = LowerChangeTaggedToTaggedSigned(node);
      break;
968
    case IrOpcode::kTruncateTaggedToBit:
969
      result = LowerTruncateTaggedToBit(node);
970
      break;
971 972 973
    case IrOpcode::kTruncateTaggedPointerToBit:
      result = LowerTruncateTaggedPointerToBit(node);
      break;
974
    case IrOpcode::kTruncateTaggedToFloat64:
975
      result = LowerTruncateTaggedToFloat64(node);
976
      break;
977 978 979
    case IrOpcode::kCheckClosure:
      result = LowerCheckClosure(node, frame_state);
      break;
980
    case IrOpcode::kCheckMaps:
981
      LowerCheckMaps(node, frame_state);
982
      break;
983 984 985
    case IrOpcode::kCompareMaps:
      result = LowerCompareMaps(node);
      break;
986
    case IrOpcode::kCheckNumber:
987
      result = LowerCheckNumber(node, frame_state);
988
      break;
989 990 991
    case IrOpcode::kCheckReceiver:
      result = LowerCheckReceiver(node, frame_state);
      break;
992 993
    case IrOpcode::kCheckReceiverOrNullOrUndefined:
      result = LowerCheckReceiverOrNullOrUndefined(node, frame_state);
994
      break;
995 996 997
    case IrOpcode::kCheckSymbol:
      result = LowerCheckSymbol(node, frame_state);
      break;
998
    case IrOpcode::kCheckString:
999
      result = LowerCheckString(node, frame_state);
1000
      break;
1001 1002 1003
    case IrOpcode::kCheckBigInt:
      result = LowerCheckBigInt(node, frame_state);
      break;
1004
    case IrOpcode::kCheckInternalizedString:
1005
      result = LowerCheckInternalizedString(node, frame_state);
1006
      break;
1007
    case IrOpcode::kCheckIf:
1008
      LowerCheckIf(node, frame_state);
1009
      break;
1010
    case IrOpcode::kCheckedInt32Add:
1011
      result = LowerCheckedInt32Add(node, frame_state);
1012 1013
      break;
    case IrOpcode::kCheckedInt32Sub:
1014
      result = LowerCheckedInt32Sub(node, frame_state);
1015
      break;
1016
    case IrOpcode::kCheckedInt32Div:
1017
      result = LowerCheckedInt32Div(node, frame_state);
1018 1019
      break;
    case IrOpcode::kCheckedInt32Mod:
1020
      result = LowerCheckedInt32Mod(node, frame_state);
1021
      break;
1022
    case IrOpcode::kCheckedUint32Div:
1023
      result = LowerCheckedUint32Div(node, frame_state);
1024 1025
      break;
    case IrOpcode::kCheckedUint32Mod:
1026
      result = LowerCheckedUint32Mod(node, frame_state);
1027
      break;
1028
    case IrOpcode::kCheckedInt32Mul:
1029
      result = LowerCheckedInt32Mul(node, frame_state);
1030
      break;
1031
    case IrOpcode::kCheckedInt32ToTaggedSigned:
1032
      result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
1033
      break;
1034 1035 1036 1037 1038 1039
    case IrOpcode::kCheckedInt64ToInt32:
      result = LowerCheckedInt64ToInt32(node, frame_state);
      break;
    case IrOpcode::kCheckedInt64ToTaggedSigned:
      result = LowerCheckedInt64ToTaggedSigned(node, frame_state);
      break;
1040 1041 1042
    case IrOpcode::kCheckedUint32Bounds:
      result = LowerCheckedUint32Bounds(node, frame_state);
      break;
1043
    case IrOpcode::kCheckedUint32ToInt32:
1044
      result = LowerCheckedUint32ToInt32(node, frame_state);
1045
      break;
1046
    case IrOpcode::kCheckedUint32ToTaggedSigned:
1047
      result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
1048
      break;
1049 1050 1051
    case IrOpcode::kCheckedUint64Bounds:
      result = LowerCheckedUint64Bounds(node, frame_state);
      break;
1052 1053 1054 1055 1056 1057
    case IrOpcode::kCheckedUint64ToInt32:
      result = LowerCheckedUint64ToInt32(node, frame_state);
      break;
    case IrOpcode::kCheckedUint64ToTaggedSigned:
      result = LowerCheckedUint64ToTaggedSigned(node, frame_state);
      break;
1058
    case IrOpcode::kCheckedFloat64ToInt32:
1059
      result = LowerCheckedFloat64ToInt32(node, frame_state);
1060
      break;
1061 1062 1063
    case IrOpcode::kCheckedFloat64ToInt64:
      result = LowerCheckedFloat64ToInt64(node, frame_state);
      break;
1064
    case IrOpcode::kCheckedTaggedSignedToInt32:
1065
      if (frame_state == nullptr) {
1066 1067
        FATAL("No frame state (zapped by #%d: %s)", frame_state_zapper_->id(),
              frame_state_zapper_->op()->mnemonic());
1068
      }
1069
      result = LowerCheckedTaggedSignedToInt32(node, frame_state);
1070
      break;
1071 1072 1073
    case IrOpcode::kCheckedTaggedToArrayIndex:
      result = LowerCheckedTaggedToArrayIndex(node, frame_state);
      break;
1074
    case IrOpcode::kCheckedTaggedToInt32:
1075
      result = LowerCheckedTaggedToInt32(node, frame_state);
1076
      break;
1077 1078 1079
    case IrOpcode::kCheckedTaggedToInt64:
      result = LowerCheckedTaggedToInt64(node, frame_state);
      break;
1080
    case IrOpcode::kCheckedTaggedToFloat64:
1081
      result = LowerCheckedTaggedToFloat64(node, frame_state);
1082
      break;
1083
    case IrOpcode::kCheckedTaggedToTaggedSigned:
1084
      result = LowerCheckedTaggedToTaggedSigned(node, frame_state);
1085
      break;
1086
    case IrOpcode::kCheckedTaggedToTaggedPointer:
1087
      result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
1088
      break;
1089 1090 1091
    case IrOpcode::kChangeInt64ToBigInt:
      result = LowerChangeInt64ToBigInt(node);
      break;
1092 1093 1094
    case IrOpcode::kChangeUint64ToBigInt:
      result = LowerChangeUint64ToBigInt(node);
      break;
1095 1096
    case IrOpcode::kTruncateBigIntToWord64:
      result = LowerTruncateBigIntToWord64(node);
1097
      break;
1098
    case IrOpcode::kTruncateTaggedToWord32:
1099
      result = LowerTruncateTaggedToWord32(node);
1100
      break;
1101
    case IrOpcode::kCheckedTruncateTaggedToWord32:
1102
      result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
1103
      break;
1104 1105 1106
    case IrOpcode::kNumberToString:
      result = LowerNumberToString(node);
      break;
1107 1108 1109
    case IrOpcode::kObjectIsArrayBufferView:
      result = LowerObjectIsArrayBufferView(node);
      break;
1110 1111 1112
    case IrOpcode::kObjectIsBigInt:
      result = LowerObjectIsBigInt(node);
      break;
1113 1114 1115
    case IrOpcode::kObjectIsCallable:
      result = LowerObjectIsCallable(node);
      break;
1116 1117 1118
    case IrOpcode::kObjectIsConstructor:
      result = LowerObjectIsConstructor(node);
      break;
1119 1120
    case IrOpcode::kObjectIsDetectableCallable:
      result = LowerObjectIsDetectableCallable(node);
1121
      break;
1122 1123 1124
    case IrOpcode::kObjectIsMinusZero:
      result = LowerObjectIsMinusZero(node);
      break;
1125 1126 1127
    case IrOpcode::kNumberIsMinusZero:
      result = LowerNumberIsMinusZero(node);
      break;
1128 1129 1130
    case IrOpcode::kObjectIsNaN:
      result = LowerObjectIsNaN(node);
      break;
1131 1132 1133
    case IrOpcode::kNumberIsNaN:
      result = LowerNumberIsNaN(node);
      break;
1134 1135 1136
    case IrOpcode::kObjectIsNonCallable:
      result = LowerObjectIsNonCallable(node);
      break;
1137
    case IrOpcode::kObjectIsNumber:
1138
      result = LowerObjectIsNumber(node);
1139 1140
      break;
    case IrOpcode::kObjectIsReceiver:
1141
      result = LowerObjectIsReceiver(node);
1142
      break;
1143
    case IrOpcode::kObjectIsSmi:
1144
      result = LowerObjectIsSmi(node);
1145
      break;
1146
    case IrOpcode::kObjectIsString:
1147
      result = LowerObjectIsString(node);
1148
      break;
1149 1150 1151
    case IrOpcode::kObjectIsSymbol:
      result = LowerObjectIsSymbol(node);
      break;
1152
    case IrOpcode::kObjectIsUndetectable:
1153
      result = LowerObjectIsUndetectable(node);
1154
      break;
1155 1156
    case IrOpcode::kArgumentsLength:
      result = LowerArgumentsLength(node);
1157
      break;
1158 1159 1160
    case IrOpcode::kRestLength:
      result = LowerRestLength(node);
      break;
1161 1162 1163
    case IrOpcode::kToBoolean:
      result = LowerToBoolean(node);
      break;
1164 1165 1166
    case IrOpcode::kTypeOf:
      result = LowerTypeOf(node);
      break;
1167 1168
    case IrOpcode::kNewDoubleElements:
      result = LowerNewDoubleElements(node);
1169
      break;
1170 1171
    case IrOpcode::kNewSmiOrObjectElements:
      result = LowerNewSmiOrObjectElements(node);
1172
      break;
1173 1174
    case IrOpcode::kNewArgumentsElements:
      result = LowerNewArgumentsElements(node);
1175
      break;
1176 1177 1178
    case IrOpcode::kNewConsString:
      result = LowerNewConsString(node);
      break;
1179 1180 1181
    case IrOpcode::kSameValue:
      result = LowerSameValue(node);
      break;
1182 1183 1184
    case IrOpcode::kSameValueNumbersOnly:
      result = LowerSameValueNumbersOnly(node);
      break;
1185 1186 1187
    case IrOpcode::kNumberSameValue:
      result = LowerNumberSameValue(node);
      break;
1188 1189
    case IrOpcode::kDeadValue:
      result = LowerDeadValue(node);
1190
      break;
1191 1192 1193
    case IrOpcode::kStringConcat:
      result = LowerStringConcat(node);
      break;
1194 1195
    case IrOpcode::kStringFromSingleCharCode:
      result = LowerStringFromSingleCharCode(node);
1196
      break;
1197 1198
    case IrOpcode::kStringFromSingleCodePoint:
      result = LowerStringFromSingleCodePoint(node);
1199
      break;
1200 1201 1202
    case IrOpcode::kStringIndexOf:
      result = LowerStringIndexOf(node);
      break;
1203 1204 1205
    case IrOpcode::kStringFromCodePointAt:
      result = LowerStringFromCodePointAt(node);
      break;
1206 1207 1208
    case IrOpcode::kStringLength:
      result = LowerStringLength(node);
      break;
1209 1210 1211
    case IrOpcode::kStringToNumber:
      result = LowerStringToNumber(node);
      break;
1212
    case IrOpcode::kStringCharCodeAt:
1213
      result = LowerStringCharCodeAt(node);
1214
      break;
1215
    case IrOpcode::kStringCodePointAt:
1216
      result = LowerStringCodePointAt(node);
1217
      break;
1218 1219 1220 1221 1222 1223
    case IrOpcode::kStringToLowerCaseIntl:
      result = LowerStringToLowerCaseIntl(node);
      break;
    case IrOpcode::kStringToUpperCaseIntl:
      result = LowerStringToUpperCaseIntl(node);
      break;
1224 1225 1226
    case IrOpcode::kStringSubstring:
      result = LowerStringSubstring(node);
      break;
1227
    case IrOpcode::kStringEqual:
1228
      result = LowerStringEqual(node);
1229 1230
      break;
    case IrOpcode::kStringLessThan:
1231
      result = LowerStringLessThan(node);
1232 1233
      break;
    case IrOpcode::kStringLessThanOrEqual:
1234
      result = LowerStringLessThanOrEqual(node);
1235
      break;
1236 1237
    case IrOpcode::kBigIntAdd:
      result = LowerBigIntAdd(node, frame_state);
1238
      break;
1239 1240 1241
    case IrOpcode::kBigIntSubtract:
      result = LowerBigIntSubtract(node, frame_state);
      break;
1242 1243
    case IrOpcode::kBigIntNegate:
      result = LowerBigIntNegate(node);
1244
      break;
1245 1246 1247
    case IrOpcode::kNumberIsFloat64Hole:
      result = LowerNumberIsFloat64Hole(node);
      break;
1248 1249 1250 1251 1252 1253
    case IrOpcode::kNumberIsFinite:
      result = LowerNumberIsFinite(node);
      break;
    case IrOpcode::kObjectIsFiniteNumber:
      result = LowerObjectIsFiniteNumber(node);
      break;
1254 1255 1256 1257 1258 1259
    case IrOpcode::kNumberIsInteger:
      result = LowerNumberIsInteger(node);
      break;
    case IrOpcode::kObjectIsInteger:
      result = LowerObjectIsInteger(node);
      break;
1260 1261 1262 1263 1264 1265
    case IrOpcode::kNumberIsSafeInteger:
      result = LowerNumberIsSafeInteger(node);
      break;
    case IrOpcode::kObjectIsSafeInteger:
      result = LowerObjectIsSafeInteger(node);
      break;
1266
    case IrOpcode::kCheckFloat64Hole:
1267
      result = LowerCheckFloat64Hole(node, frame_state);
1268
      break;
1269 1270 1271
    case IrOpcode::kCheckNotTaggedHole:
      result = LowerCheckNotTaggedHole(node, frame_state);
      break;
1272
    case IrOpcode::kConvertTaggedHoleToUndefined:
1273
      result = LowerConvertTaggedHoleToUndefined(node);
1274
      break;
1275 1276 1277
    case IrOpcode::kCheckEqualsInternalizedString:
      LowerCheckEqualsInternalizedString(node, frame_state);
      break;
1278 1279 1280
    case IrOpcode::kAllocate:
      result = LowerAllocate(node);
      break;
1281 1282 1283
    case IrOpcode::kCheckEqualsSymbol:
      LowerCheckEqualsSymbol(node, frame_state);
      break;
1284
    case IrOpcode::kPlainPrimitiveToNumber:
1285
      result = LowerPlainPrimitiveToNumber(node);
1286 1287
      break;
    case IrOpcode::kPlainPrimitiveToWord32:
1288
      result = LowerPlainPrimitiveToWord32(node);
1289 1290
      break;
    case IrOpcode::kPlainPrimitiveToFloat64:
1291
      result = LowerPlainPrimitiveToFloat64(node);
1292
      break;
1293
    case IrOpcode::kEnsureWritableFastElements:
1294
      result = LowerEnsureWritableFastElements(node);
1295
      break;
1296
    case IrOpcode::kMaybeGrowFastElements:
1297
      result = LowerMaybeGrowFastElements(node, frame_state);
1298
      break;
1299
    case IrOpcode::kTransitionElementsKind:
1300
      LowerTransitionElementsKind(node);
1301
      break;
1302 1303 1304 1305 1306 1307
    case IrOpcode::kLoadMessage:
      result = LowerLoadMessage(node);
      break;
    case IrOpcode::kStoreMessage:
      LowerStoreMessage(node);
      break;
1308 1309 1310
    case IrOpcode::kFastApiCall:
      result = LowerFastApiCall(node);
      break;
1311 1312 1313
    case IrOpcode::kLoadFieldByIndex:
      result = LowerLoadFieldByIndex(node);
      break;
1314
    case IrOpcode::kLoadTypedElement:
1315
      result = LowerLoadTypedElement(node);
1316
      break;
1317 1318 1319
    case IrOpcode::kLoadDataViewElement:
      result = LowerLoadDataViewElement(node);
      break;
1320 1321 1322
    case IrOpcode::kLoadStackArgument:
      result = LowerLoadStackArgument(node);
      break;
1323
    case IrOpcode::kStoreTypedElement:
1324
      LowerStoreTypedElement(node);
1325
      break;
1326 1327 1328
    case IrOpcode::kStoreDataViewElement:
      LowerStoreDataViewElement(node);
      break;
1329 1330 1331
    case IrOpcode::kStoreSignedSmallElement:
      LowerStoreSignedSmallElement(node);
      break;
1332 1333
    case IrOpcode::kFindOrderedHashMapEntry:
      result = LowerFindOrderedHashMapEntry(node);
1334
      break;
1335 1336
    case IrOpcode::kFindOrderedHashMapEntryForInt32Key:
      result = LowerFindOrderedHashMapEntryForInt32Key(node);
1337
      break;
1338 1339 1340 1341 1342 1343
    case IrOpcode::kTransitionAndStoreNumberElement:
      LowerTransitionAndStoreNumberElement(node);
      break;
    case IrOpcode::kTransitionAndStoreNonNumberElement:
      LowerTransitionAndStoreNonNumberElement(node);
      break;
1344 1345
    case IrOpcode::kTransitionAndStoreElement:
      LowerTransitionAndStoreElement(node);
1346
      break;
1347 1348 1349
    case IrOpcode::kRuntimeAbort:
      LowerRuntimeAbort(node);
      break;
1350 1351 1352
    case IrOpcode::kAssertType:
      result = LowerAssertType(node);
      break;
1353 1354 1355
    case IrOpcode::kConvertReceiver:
      result = LowerConvertReceiver(node);
      break;
1356
    case IrOpcode::kFloat64RoundUp:
1357 1358 1359
      if (!LowerFloat64RoundUp(node).To(&result)) {
        return false;
      }
1360 1361
      break;
    case IrOpcode::kFloat64RoundDown:
1362 1363 1364
      if (!LowerFloat64RoundDown(node).To(&result)) {
        return false;
      }
1365 1366
      break;
    case IrOpcode::kFloat64RoundTruncate:
1367 1368 1369
      if (!LowerFloat64RoundTruncate(node).To(&result)) {
        return false;
      }
1370
      break;
1371
    case IrOpcode::kFloat64RoundTiesEven:
1372 1373 1374
      if (!LowerFloat64RoundTiesEven(node).To(&result)) {
        return false;
      }
1375
      break;
1376 1377 1378
    case IrOpcode::kDateNow:
      result = LowerDateNow(node);
      break;
1379 1380 1381
    case IrOpcode::kFoldConstant:
      result = LowerFoldConstant(node);
      break;
1382 1383 1384
    default:
      return false;
  }
1385 1386

  if ((result ? 1 : 0) != node->op()->ValueOutputCount()) {
1387 1388 1389 1390
    FATAL(
        "Effect control linearizer lowering of '%s':"
        " value output count does not agree.",
        node->op()->mnemonic());
1391 1392
  }

1393 1394
  NodeProperties::ReplaceUses(node, result, gasm()->effect(),
                              gasm()->control());
1395 1396 1397
  return true;
}

1398 1399 1400
#define __ gasm()->

Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
1401
  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
1402
  Node* value = node->InputAt(0);
1403 1404
  return ChangeFloat64ToTagged(value, mode);
}
1405

1406 1407
Node* EffectControlLinearizer::ChangeFloat64ToTagged(
    Node* value, CheckForMinusZeroMode mode) {
1408 1409 1410
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
  auto if_heapnumber = __ MakeDeferredLabel();
  auto if_int32 = __ MakeLabel();
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420

  Node* value32 = __ RoundFloat64ToInt32(value);
  __ GotoIf(__ Float64Equal(value, __ ChangeInt32ToFloat64(value32)),
            &if_int32);
  __ Goto(&if_heapnumber);

  __ Bind(&if_int32);
  {
    if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
      Node* zero = __ Int32Constant(0);
1421 1422
      auto if_zero = __ MakeDeferredLabel();
      auto if_smi = __ MakeLabel();
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437

      __ GotoIf(__ Word32Equal(value32, zero), &if_zero);
      __ Goto(&if_smi);

      __ Bind(&if_zero);
      {
        // In case of 0, we need to check the high bits for the IEEE -0 pattern.
        __ GotoIf(__ Int32LessThan(__ Float64ExtractHighWord32(value), zero),
                  &if_heapnumber);
        __ Goto(&if_smi);
      }

      __ Bind(&if_smi);
    }

1438
    if (SmiValuesAre32Bits()) {
1439 1440 1441
      Node* value_smi = ChangeInt32ToSmi(value32);
      __ Goto(&done, value_smi);
    } else {
1442
      SmiTagOrOverflow(value32, &if_heapnumber, &done);
1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
    }
  }

  __ Bind(&if_heapnumber);
  {
    Node* value_number = AllocateHeapNumberWithValue(value);
    __ Goto(&done, value_number);
  }

  __ Bind(&done);
  return done.PhiAt(0);
1454 1455
}

1456
Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
1457
  Node* value = node->InputAt(0);
1458
  return AllocateHeapNumberWithValue(value);
1459 1460
}

1461
Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
1462
  Node* value = node->InputAt(0);
1463 1464
  return ChangeBitToTagged(value);
}
1465

1466
Node* EffectControlLinearizer::ChangeBitToTagged(Node* value) {
1467 1468
  auto if_true = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
1469

1470 1471
  __ GotoIf(value, &if_true);
  __ Goto(&done, __ FalseConstant());
1472

1473 1474
  __ Bind(&if_true);
  __ Goto(&done, __ TrueConstant());
1475

1476 1477
  __ Bind(&done);
  return done.PhiAt(0);
1478 1479
}

1480
Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
1481
  Node* value = node->InputAt(0);
1482
  return ChangeInt32ToSmi(value);
1483 1484
}

1485
Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
1486
  Node* value = node->InputAt(0);
1487 1488
  return ChangeInt32ToTagged(value);
}
1489

1490
Node* EffectControlLinearizer::ChangeInt32ToTagged(Node* value) {
1491
  if (SmiValuesAre32Bits()) {
1492
    return ChangeInt32ToSmi(value);
1493
  }
1494
  DCHECK(SmiValuesAre31Bits());
1495

1496 1497
  auto if_overflow = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
1498

1499
  SmiTagOrOverflow(value, &if_overflow, &done);
1500

1501 1502 1503
  __ Bind(&if_overflow);
  Node* number = AllocateHeapNumberWithValue(__ ChangeInt32ToFloat64(value));
  __ Goto(&done, number);
1504

1505 1506
  __ Bind(&done);
  return done.PhiAt(0);
1507 1508
}

1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
Node* EffectControlLinearizer::LowerChangeInt64ToTagged(Node* node) {
  Node* value = node->InputAt(0);

  auto if_not_in_smi_range = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kTagged);

  Node* value32 = __ TruncateInt64ToInt32(value);
  __ GotoIfNot(__ Word64Equal(__ ChangeInt32ToInt64(value32), value),
               &if_not_in_smi_range);

  if (SmiValuesAre32Bits()) {
    Node* value_smi = ChangeInt64ToSmi(value);
    __ Goto(&done, value_smi);
  } else {
1523
    SmiTagOrOverflow(value32, &if_not_in_smi_range, &done);
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
  }

  __ Bind(&if_not_in_smi_range);
  Node* number = AllocateHeapNumberWithValue(__ ChangeInt64ToFloat64(value));
  __ Goto(&done, number);

  __ Bind(&done);
  return done.PhiAt(0);
}

1534
Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
1535
  Node* value = node->InputAt(0);
1536 1537
  return ChangeUint32ToTagged(value);
}
1538

1539
Node* EffectControlLinearizer::ChangeUint32ToTagged(Node* value) {
1540 1541
  auto if_not_in_smi_range = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
1542

1543
  Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
1544
  __ GotoIfNot(check, &if_not_in_smi_range);
1545
  __ Goto(&done, ChangeUint32ToSmi(value));
1546

1547 1548
  __ Bind(&if_not_in_smi_range);
  Node* number = AllocateHeapNumberWithValue(__ ChangeUint32ToFloat64(value));
1549

1550 1551
  __ Goto(&done, number);
  __ Bind(&done);
1552

1553
  return done.PhiAt(0);
1554 1555
}

1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575
Node* EffectControlLinearizer::LowerChangeUint64ToTagged(Node* node) {
  Node* value = node->InputAt(0);

  auto if_not_in_smi_range = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kTagged);

  Node* check =
      __ Uint64LessThanOrEqual(value, __ Int64Constant(Smi::kMaxValue));
  __ GotoIfNot(check, &if_not_in_smi_range);
  __ Goto(&done, ChangeInt64ToSmi(value));

  __ Bind(&if_not_in_smi_range);
  Node* number = AllocateHeapNumberWithValue(__ ChangeInt64ToFloat64(value));

  __ Goto(&done, number);
  __ Bind(&done);

  return done.PhiAt(0);
}

1576
Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
1577
  Node* value = node->InputAt(0);
1578
  return ChangeSmiToInt32(value);
1579 1580
}

1581 1582 1583 1584 1585
Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) {
  Node* value = node->InputAt(0);
  return ChangeSmiToInt64(value);
}

1586
Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
1587
  Node* value = node->InputAt(0);
1588
  return __ TaggedEqual(value, __ TrueConstant());
1589 1590
}

1591 1592
void EffectControlLinearizer::TruncateTaggedPointerToBit(
    Node* node, GraphAssemblerLabel<1>* done) {
1593 1594
  Node* value = node->InputAt(0);

1595
  auto if_heapnumber = __ MakeDeferredLabel();
1596
  auto if_bigint = __ MakeDeferredLabel();
1597 1598 1599

  Node* zero = __ Int32Constant(0);
  Node* fzero = __ Float64Constant(0.0);
1600

1601
  // Check if {value} is false.
1602
  __ GotoIf(__ TaggedEqual(value, __ FalseConstant()), done, zero);
1603

1604
  // Check if {value} is the empty string.
1605
  __ GotoIf(__ TaggedEqual(value, __ EmptyStringConstant()), done, zero);
1606 1607

  // Load the map of {value}.
1608
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1609 1610

  // Check if the {value} is undetectable and immediately return false.
1611
  // This includes undefined and null.
1612 1613
  Node* value_map_bitfield =
      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
1614
  __ GotoIfNot(
1615 1616
      __ Word32Equal(
          __ Word32And(value_map_bitfield,
1617
                       __ Int32Constant(Map::Bits1::IsUndetectableBit::kMask)),
1618
          zero),
1619
      done, zero);
1620 1621

  // Check if {value} is a HeapNumber.
1622
  __ GotoIf(__ TaggedEqual(value_map, __ HeapNumberMapConstant()),
1623 1624
            &if_heapnumber);

1625
  // Check if {value} is a BigInt.
1626
  __ GotoIf(__ TaggedEqual(value_map, __ BigIntMapConstant()), &if_bigint);
1627

1628
  // All other values that reach here are true.
1629
  __ Goto(done, __ Int32Constant(1));
1630 1631 1632 1633 1634 1635 1636

  __ Bind(&if_heapnumber);
  {
    // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
    // NaN.
    Node* value_value =
        __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1637
    __ Goto(done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
1638
  }
1639 1640 1641 1642

  __ Bind(&if_bigint);
  {
    Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
1643 1644 1645
    Node* length_is_zero = __ Word32Equal(
        __ Word32And(bitfield, __ Int32Constant(BigInt::LengthBits::kMask)),
        __ Int32Constant(0));
1646 1647
    __ Goto(done, __ Word32Equal(length_is_zero, zero));
  }
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
}

Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
  auto done = __ MakeLabel(MachineRepresentation::kBit);
  auto if_smi = __ MakeDeferredLabel();

  Node* value = node->InputAt(0);
  __ GotoIf(ObjectIsSmi(value), &if_smi);

  TruncateTaggedPointerToBit(node, &done);
1658

1659
  __ Bind(&if_smi);
1660 1661
  {
    // If {value} is a Smi, then we only need to check that it's not zero.
1662
    __ Goto(&done, __ Word32Equal(__ TaggedEqual(value, __ SmiConstant(0)),
1663
                                  __ Int32Constant(0)));
1664
  }
1665

1666 1667
  __ Bind(&done);
  return done.PhiAt(0);
1668 1669
}

1670
Node* EffectControlLinearizer::LowerTruncateTaggedPointerToBit(Node* node) {
1671
  auto done = __ MakeLabel(MachineRepresentation::kBit);
1672

1673
  TruncateTaggedPointerToBit(node, &done);
1674 1675 1676 1677 1678

  __ Bind(&done);
  return done.PhiAt(0);
}

1679
Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
1680 1681
  Node* value = node->InputAt(0);

1682 1683
  auto if_not_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord32);
1684

1685
  Node* check = ObjectIsSmi(value);
1686
  __ GotoIfNot(check, &if_not_smi);
1687
  __ Goto(&done, ChangeSmiToInt32(value));
1688

1689
  __ Bind(&if_not_smi);
1690 1691
  STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
                                    Oddball::kToNumberRawOffset);
1692 1693 1694
  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
  vfalse = __ ChangeFloat64ToInt32(vfalse);
  __ Goto(&done, vfalse);
1695

1696 1697
  __ Bind(&done);
  return done.PhiAt(0);
1698 1699
}

1700
Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
1701 1702
  Node* value = node->InputAt(0);

1703 1704
  auto if_not_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord32);
1705

1706
  Node* check = ObjectIsSmi(value);
1707
  __ GotoIfNot(check, &if_not_smi);
1708
  __ Goto(&done, ChangeSmiToInt32(value));
1709

1710
  __ Bind(&if_not_smi);
1711 1712
  STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
                                    Oddball::kToNumberRawOffset);
1713 1714 1715
  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
  vfalse = __ ChangeFloat64ToUint32(vfalse);
  __ Goto(&done, vfalse);
1716

1717 1718
  __ Bind(&done);
  return done.PhiAt(0);
1719 1720
}

1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
Node* EffectControlLinearizer::LowerChangeTaggedToInt64(Node* node) {
  Node* value = node->InputAt(0);

  auto if_not_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord64);

  Node* check = ObjectIsSmi(value);
  __ GotoIfNot(check, &if_not_smi);
  __ Goto(&done, ChangeSmiToInt64(value));

  __ Bind(&if_not_smi);
1732 1733
  STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
                                    Oddball::kToNumberRawOffset);
1734 1735 1736 1737 1738 1739 1740 1741
  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
  vfalse = __ ChangeFloat64ToInt64(vfalse);
  __ Goto(&done, vfalse);

  __ Bind(&done);
  return done.PhiAt(0);
}

1742 1743
Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
  return LowerTruncateTaggedToFloat64(node);
1744 1745
}

1746 1747 1748
Node* EffectControlLinearizer::LowerChangeTaggedToTaggedSigned(Node* node) {
  Node* value = node->InputAt(0);

1749 1750
  auto if_not_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord32);
1751 1752

  Node* check = ObjectIsSmi(value);
1753
  __ GotoIfNot(check, &if_not_smi);
1754 1755 1756
  __ Goto(&done, value);

  __ Bind(&if_not_smi);
1757 1758
  STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
                                    Oddball::kToNumberRawOffset);
1759 1760 1761 1762 1763 1764 1765 1766 1767
  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
  vfalse = __ ChangeFloat64ToInt32(vfalse);
  vfalse = ChangeInt32ToSmi(vfalse);
  __ Goto(&done, vfalse);

  __ Bind(&done);
  return done.PhiAt(0);
}

1768
Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
1769 1770
  Node* value = node->InputAt(0);

1771 1772
  auto if_not_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kFloat64);
1773

1774
  Node* check = ObjectIsSmi(value);
1775
  __ GotoIfNot(check, &if_not_smi);
1776 1777 1778
  Node* vtrue = ChangeSmiToInt32(value);
  vtrue = __ ChangeInt32ToFloat64(vtrue);
  __ Goto(&done, vtrue);
1779

1780
  __ Bind(&if_not_smi);
1781 1782
  STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
                                    Oddball::kToNumberRawOffset);
1783 1784
  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
  __ Goto(&done, vfalse);
1785

1786 1787
  __ Bind(&done);
  return done.PhiAt(0);
1788 1789
}

1790 1791 1792 1793 1794 1795 1796 1797 1798
Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
                                                 Node* frame_state) {
  Handle<FeedbackCell> feedback_cell = FeedbackCellOf(node->op());
  Node* value = node->InputAt(0);

  // Check that {value} is actually a JSFunction.
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1799 1800 1801 1802
  Node* check_instance_type = __ Uint32LessThanOrEqual(
      __ Int32Sub(value_instance_type,
                  __ Int32Constant(FIRST_JS_FUNCTION_TYPE)),
      __ Int32Constant(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
  __ DeoptimizeIfNot(DeoptimizeReason::kWrongCallTarget, FeedbackSource(),
                     check_instance_type, frame_state);

  // Check that the {value}s feedback vector cell matches the one
  // we recorded before.
  Node* value_cell =
      __ LoadField(AccessBuilder::ForJSFunctionFeedbackCell(), value);
  Node* check_cell = __ WordEqual(value_cell, __ HeapConstant(feedback_cell));
  __ DeoptimizeIfNot(DeoptimizeReason::kWrongFeedbackCell, FeedbackSource(),
                     check_cell, frame_state);
  return value;
}

1816 1817 1818 1819 1820 1821 1822 1823 1824
void EffectControlLinearizer::MigrateInstanceOrDeopt(
    Node* value, Node* value_map, Node* frame_state,
    FeedbackSource const& feedback_source, DeoptimizeReason reason) {
  // If map is not deprecated the migration attempt does not make sense.
  Node* bitfield3 = __ LoadField(AccessBuilder::ForMapBitField3(), value_map);
  Node* is_not_deprecated = __ Word32Equal(
      __ Word32And(bitfield3,
                   __ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)),
      __ Int32Constant(0));
1825
  __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state);
1826 1827 1828 1829 1830 1831 1832 1833 1834
  Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
  Runtime::FunctionId id = Runtime::kTryMigrateInstance;
  auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
      graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
  Node* result = __ Call(call_descriptor, __ CEntryStubConstant(1), value,
                         __ ExternalConstant(ExternalReference::Create(id)),
                         __ Int32Constant(1), __ NoContextConstant());
  Node* check = ObjectIsSmi(result);
  __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, feedback_source,
1835
                  check, frame_state);
1836 1837
}

1838
void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
1839
  CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
1840 1841
  Node* value = node->InputAt(0);

1842
  ZoneHandleSet<Map> const& maps = p.maps();
1843 1844 1845
  size_t const map_count = maps.size();

  if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
1846
    auto done = __ MakeLabel();
1847
    auto migrate = __ MakeDeferredLabel();
1848 1849 1850 1851 1852 1853 1854

    // Load the current map of the {value}.
    Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);

    // Perform the map checks.
    for (size_t i = 0; i < map_count; ++i) {
      Node* map = __ HeapConstant(maps[i]);
1855
      Node* check = __ TaggedEqual(value_map, map);
1856
      if (i == map_count - 1) {
1857
        __ BranchWithCriticalSafetyCheck(check, &done, &migrate);
1858
      } else {
1859
        auto next_map = __ MakeLabel();
1860
        __ BranchWithCriticalSafetyCheck(check, &done, &next_map);
1861
        __ Bind(&next_map);
1862 1863
      }
    }
1864

1865 1866
    // Perform the (deferred) instance migration.
    __ Bind(&migrate);
1867 1868
    MigrateInstanceOrDeopt(value, value_map, frame_state, p.feedback(),
                           DeoptimizeReason::kWrongMap);
1869

1870 1871
    // Reload the current map of the {value}.
    value_map = __ LoadField(AccessBuilder::ForMap(), value);
1872

1873 1874 1875
    // Perform the map checks again.
    for (size_t i = 0; i < map_count; ++i) {
      Node* map = __ HeapConstant(maps[i]);
1876
      Node* check = __ TaggedEqual(value_map, map);
1877
      if (i == map_count - 1) {
1878
        __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
1879
                           frame_state);
1880
      } else {
1881
        auto next_map = __ MakeLabel();
1882
        __ BranchWithCriticalSafetyCheck(check, &done, &next_map);
1883
        __ Bind(&next_map);
1884
      }
1885
    }
1886 1887 1888 1889

    __ Goto(&done);
    __ Bind(&done);
  } else {
1890
    auto done = __ MakeLabel();
1891 1892

    // Load the current map of the {value}.
1893
    Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1894 1895

    for (size_t i = 0; i < map_count; ++i) {
1896
      Node* map = __ HeapConstant(maps[i]);
1897
      Node* check = __ TaggedEqual(value_map, map);
1898

1899
      if (i == map_count - 1) {
1900
        __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
1901
                           frame_state);
1902
      } else {
1903
        auto next_map = __ MakeLabel();
1904
        __ BranchWithCriticalSafetyCheck(check, &done, &next_map);
1905
        __ Bind(&next_map);
1906 1907 1908 1909
      }
    }
    __ Goto(&done);
    __ Bind(&done);
1910 1911 1912
  }
}

1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
void EffectControlLinearizer::TryMigrateInstance(Node* value, Node* value_map) {
  auto done = __ MakeLabel();
  // If map is not deprecated the migration attempt does not make sense.
  Node* bitfield3 = __ LoadField(AccessBuilder::ForMapBitField3(), value_map);
  Node* is_not_deprecated = __ Word32Equal(
      __ Word32And(bitfield3,
                   __ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)),
      __ Int32Constant(0));
  __ GotoIf(is_not_deprecated, &done);
  Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
  Runtime::FunctionId id = Runtime::kTryMigrateInstance;
  auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
      graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
  __ Call(call_descriptor, __ CEntryStubConstant(1), value,
          __ ExternalConstant(ExternalReference::Create(id)),
          __ Int32Constant(1), __ NoContextConstant());
  __ Goto(&done);
  __ Bind(&done);
}

1933
Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
1934
  ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
1935 1936 1937
  size_t const map_count = maps.size();
  Node* value = node->InputAt(0);

1938
  auto done = __ MakeLabel(MachineRepresentation::kBit);
1939 1940

  // Load the current map of the {value}.
1941
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1942 1943

  for (size_t i = 0; i < map_count; ++i) {
1944
    Node* map = __ HeapConstant(maps[i]);
1945
    Node* check = __ TaggedEqual(value_map, map);
1946

1947 1948
    auto next_map = __ MakeLabel();
    auto passed = __ MakeLabel();
1949
    __ BranchWithCriticalSafetyCheck(check, &passed, &next_map);
1950 1951 1952 1953 1954

    __ Bind(&passed);
    __ Goto(&done, __ Int32Constant(1));

    __ Bind(&next_map);
1955 1956 1957 1958 1959 1960 1961
  }
  __ Goto(&done, __ Int32Constant(0));

  __ Bind(&done);
  return done.PhiAt(0);
}

1962
Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
1963
  Node* value = node->InputAt(0);
1964
  const CheckParameters& params = CheckParametersOf(node->op());
1965

1966 1967
  auto if_not_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel();
1968

1969
  Node* check0 = ObjectIsSmi(value);
1970
  __ GotoIfNot(check0, &if_not_smi);
1971
  __ Goto(&done);
1972

1973 1974
  __ Bind(&if_not_smi);
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1975
  Node* check1 = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
1976
  __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
1977
                     check1, frame_state);
1978
  __ Goto(&done);
1979

1980 1981
  __ Bind(&done);
  return value;
1982 1983
}

1984 1985 1986 1987 1988 1989 1990 1991
Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
                                                  Node* frame_state) {
  Node* value = node->InputAt(0);

  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);

1992
  static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1993 1994
  Node* check = __ Uint32LessThanOrEqual(
      __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
1995
  __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, FeedbackSource(),
1996
                     check, frame_state);
1997 1998 1999
  return value;
}

2000 2001
Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined(
    Node* node, Node* frame_state) {
2002 2003 2004 2005 2006 2007
  Node* value = node->InputAt(0);

  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);

2008
  // Rule out all primitives except oddballs (true, false, undefined, null).
2009 2010
  static_assert(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
  static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2011 2012 2013
  Node* check0 = __ Uint32LessThanOrEqual(__ Uint32Constant(ODDBALL_TYPE),
                                          value_instance_type);
  __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
2014
                     FeedbackSource(), check0, frame_state);
2015 2016

  // Rule out booleans.
2017
  Node* check1 = __ TaggedEqual(value_map, __ BooleanMapConstant());
2018
  __ DeoptimizeIf(DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
2019
                  FeedbackSource(), check1, frame_state);
2020 2021 2022
  return value;
}

2023 2024 2025 2026 2027 2028
Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
  Node* value = node->InputAt(0);

  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);

  Node* check =
2029
      __ TaggedEqual(value_map, __ HeapConstant(factory()->symbol_map()));
2030
  __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, FeedbackSource(), check,
2031
                     frame_state);
2032 2033 2034
  return value;
}

2035
Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
2036
  Node* value = node->InputAt(0);
2037
  const CheckParameters& params = CheckParametersOf(node->op());
2038

2039 2040 2041
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
2042

2043 2044
  Node* check = __ Uint32LessThan(value_instance_type,
                                  __ Uint32Constant(FIRST_NONSTRING_TYPE));
2045 2046
  __ DeoptimizeIfNot(DeoptimizeReason::kNotAString, params.feedback(), check,
                     frame_state);
2047
  return value;
2048 2049
}

2050 2051 2052
Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
                                                            Node* frame_state) {
  Node* value = node->InputAt(0);
2053

2054 2055 2056
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
2057

2058
  Node* check = __ Word32Equal(
2059 2060 2061
      __ Word32And(value_instance_type,
                   __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
      __ Int32Constant(kInternalizedTag));
2062
  __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, FeedbackSource(),
2063
                     check, frame_state);
2064

2065
  return value;
2066 2067
}

2068
void EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
2069
  Node* value = node->InputAt(0);
2070 2071
  const CheckIfParameters& p = CheckIfParametersOf(node->op());
  __ DeoptimizeIfNot(p.reason(), p.feedback(), value, frame_state);
2072 2073
}

2074 2075 2076
Node* EffectControlLinearizer::LowerStringConcat(Node* node) {
  Node* lhs = node->InputAt(1);
  Node* rhs = node->InputAt(2);
2077 2078

  Callable const callable =
2079
      CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE);
2080
  auto call_descriptor = Linkage::GetStubCallDescriptor(
2081 2082
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
2083 2084
      Operator::kNoDeopt | Operator::kNoWrite | Operator::kNoThrow);

2085 2086
  Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
                        rhs, __ NoContextConstant());
2087 2088 2089 2090

  return value;
}

2091 2092
Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
                                                    Node* frame_state) {
2093 2094 2095
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);

2096 2097
  Node* value = __ Int32AddWithOverflow(lhs, rhs);
  Node* check = __ Projection(1, value);
2098
  __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check,
2099
                  frame_state);
2100 2101
  return __ Projection(0, value);
}
2102

2103 2104 2105 2106
Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
                                                    Node* frame_state) {
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);
2107

2108 2109
  Node* value = __ Int32SubWithOverflow(lhs, rhs);
  Node* check = __ Projection(1, value);
2110
  __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check,
2111
                  frame_state);
2112
  return __ Projection(0, value);
2113 2114
}

2115 2116
Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
                                                    Node* frame_state) {
2117 2118
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);
2119 2120
  Node* zero = __ Int32Constant(0);

2121 2122 2123 2124 2125 2126 2127 2128
  // Check if the {rhs} is a known power of two.
  Int32Matcher m(rhs);
  if (m.IsPowerOf2()) {
    // Since we know that {rhs} is a power of two, we can perform a fast
    // check to see if the relevant least significant bits of the {lhs}
    // are all zero, and if so we know that we can perform a division
    // safely (and fast by doing an arithmetic - aka sign preserving -
    // right shift on {lhs}).
2129
    int32_t divisor = m.ResolvedValue();
2130
    Node* mask = __ Int32Constant(divisor - 1);
2131
    Node* shift = __ Int32Constant(base::bits::WhichPowerOfTwo(divisor));
2132
    Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
2133
    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
2134 2135 2136 2137 2138 2139
                       check, frame_state);
    return __ Word32Sar(lhs, shift);
  } else {
    auto if_rhs_positive = __ MakeLabel();
    auto if_rhs_negative = __ MakeDeferredLabel();
    auto done = __ MakeLabel(MachineRepresentation::kWord32);
2140

2141 2142 2143
    // Check if {rhs} is positive (and not zero).
    Node* check_rhs_positive = __ Int32LessThan(zero, rhs);
    __ Branch(check_rhs_positive, &if_rhs_positive, &if_rhs_negative);
2144

2145 2146 2147 2148 2149
    __ Bind(&if_rhs_positive);
    {
      // Fast case, no additional checking required.
      __ Goto(&done, __ Int32Div(lhs, rhs));
    }
2150

2151 2152 2153 2154 2155 2156 2157
    __ Bind(&if_rhs_negative);
    {
      auto if_lhs_minint = __ MakeDeferredLabel();
      auto if_lhs_notminint = __ MakeLabel();

      // Check if {rhs} is zero.
      Node* check_rhs_zero = __ Word32Equal(rhs, zero);
2158
      __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(),
2159 2160 2161 2162
                      check_rhs_zero, frame_state);

      // Check if {lhs} is zero, as that would produce minus zero.
      Node* check_lhs_zero = __ Word32Equal(lhs, zero);
2163
      __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(),
2164 2165 2166 2167
                      check_lhs_zero, frame_state);

      // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
      // to return -kMinInt, which is not representable as Word32.
2168
      Node* check_lhs_minint = __ Word32Equal(lhs, __ Int32Constant(kMinInt));
2169 2170 2171 2172 2173 2174
      __ Branch(check_lhs_minint, &if_lhs_minint, &if_lhs_notminint);

      __ Bind(&if_lhs_minint);
      {
        // Check that {rhs} is not -1, otherwise result would be -kMinInt.
        Node* check_rhs_minusone = __ Word32Equal(rhs, __ Int32Constant(-1));
2175
        __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(),
2176
                        check_rhs_minusone, frame_state);
2177

2178 2179 2180
        // Perform the actual integer division.
        __ Goto(&done, __ Int32Div(lhs, rhs));
      }
2181

2182 2183 2184 2185 2186 2187
      __ Bind(&if_lhs_notminint);
      {
        // Perform the actual integer division.
        __ Goto(&done, __ Int32Div(lhs, rhs));
      }
    }
2188

2189 2190
    __ Bind(&done);
    Node* value = done.PhiAt(0);
2191

2192 2193
    // Check if the remainder is non-zero.
    Node* check = __ Word32Equal(lhs, __ Int32Mul(value, rhs));
2194
    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
2195
                       check, frame_state);
2196

2197 2198
    return value;
  }
2199 2200
}

2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
template <size_t VarCount, size_t VarCount2>
void EffectControlLinearizer::SmiTagOrOverflow(
    Node* value, GraphAssemblerLabel<VarCount>* if_overflow,
    GraphAssemblerLabel<VarCount2>* done) {
  DCHECK(SmiValuesAre31Bits());
  // Check for overflow at the same time that we are smi tagging.
  // Since smi tagging shifts left by one, it's the same as adding value twice.
  Node* add = __ Int32AddWithOverflow(value, value);
  Node* ovf = __ Projection(1, add);
  __ GotoIf(ovf, if_overflow);
  Node* value_smi = __ Projection(0, add);
  value_smi = ChangeTaggedInt32ToSmi(value_smi);
  __ Goto(done, value_smi);
}

Node* EffectControlLinearizer::SmiTagOrDeopt(Node* value,
                                             const CheckParameters& params,
                                             Node* frame_state) {
  DCHECK(SmiValuesAre31Bits());
  // Check for the lost precision at the same time that we are smi tagging.
  // Since smi tagging shifts left by one, it's the same as adding value twice.
  Node* add = __ Int32AddWithOverflow(value, value);
  Node* check = __ Projection(1, add);
  __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), check,
                  frame_state);
  Node* result = __ Projection(0, add);
  return ChangeTaggedInt32ToSmi(result);
}

2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
Node* EffectControlLinearizer::BuildUint32Mod(Node* lhs, Node* rhs) {
  auto if_rhs_power_of_two = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord32);

  // Compute the mask for the {rhs}.
  Node* one = __ Int32Constant(1);
  Node* msk = __ Int32Sub(rhs, one);

  // Check if the {rhs} is a power of two.
  __ GotoIf(__ Word32Equal(__ Word32And(rhs, msk), __ Int32Constant(0)),
            &if_rhs_power_of_two);
  {
    // The {rhs} is not a power of two, do a generic Uint32Mod.
    __ Goto(&done, __ Uint32Mod(lhs, rhs));
  }

  __ Bind(&if_rhs_power_of_two);
  {
    // The {rhs} is a power of two, just do a fast bit masking.
    __ Goto(&done, __ Word32And(lhs, msk));
  }

  __ Bind(&done);
  return done.PhiAt(0);
}

2256 2257
Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
                                                    Node* frame_state) {
2258 2259 2260
  // General case for signed integer modulus, with optimization for (unknown)
  // power of 2 right hand side.
  //
2261 2262 2263
  //   if rhs <= 0 then
  //     rhs = -rhs
  //     deopt if rhs == 0
2264
  //   let msk = rhs - 1 in
2265
  //   if lhs < 0 then
2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
  //     let lhs_abs = -lsh in
  //     let res = if rhs & msk == 0 then
  //                 lhs_abs & msk
  //               else
  //                 lhs_abs % rhs in
  //     if lhs < 0 then
  //       deopt if res == 0
  //       -res
  //     else
  //       res
2276
  //   else
2277
  //     if rhs & msk == 0 then
2278
  //       lhs & msk
2279 2280 2281
  //     else
  //       lhs % rhs
  //
2282 2283 2284
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);

2285 2286
  auto if_rhs_not_positive = __ MakeDeferredLabel();
  auto if_lhs_negative = __ MakeDeferredLabel();
2287
  auto if_rhs_power_of_two = __ MakeLabel();
2288 2289
  auto rhs_checked = __ MakeLabel(MachineRepresentation::kWord32);
  auto done = __ MakeLabel(MachineRepresentation::kWord32);
2290 2291 2292

  Node* zero = __ Int32Constant(0);

2293
  // Check if {rhs} is not strictly positive.
2294 2295 2296
  Node* check0 = __ Int32LessThanOrEqual(rhs, zero);
  __ GotoIf(check0, &if_rhs_not_positive);
  __ Goto(&rhs_checked, rhs);
2297

2298
  __ Bind(&if_rhs_not_positive);
2299
  {
2300 2301
    // Negate {rhs}, might still produce a negative result in case of
    // -2^31, but that is handled safely below.
2302
    Node* vtrue0 = __ Int32Sub(zero, rhs);
2303

2304
    // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
2305
    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(),
2306
                    __ Word32Equal(vtrue0, zero), frame_state);
2307
    __ Goto(&rhs_checked, vtrue0);
2308
  }
2309

2310 2311
  __ Bind(&rhs_checked);
  rhs = rhs_checked.PhiAt(0);
2312

2313
  __ GotoIf(__ Int32LessThan(lhs, zero), &if_lhs_negative);
2314
  {
2315 2316
    // The {lhs} is a non-negative integer.
    __ Goto(&done, BuildUint32Mod(lhs, rhs));
2317
  }
2318

2319 2320
  __ Bind(&if_lhs_negative);
  {
2321 2322 2323 2324 2325
    // The {lhs} is a negative integer. This is very unlikely and
    // we intentionally don't use the BuildUint32Mod() here, which
    // would try to figure out whether {rhs} is a power of two,
    // since this is intended to be a slow-path.
    Node* res = __ Uint32Mod(__ Int32Sub(zero, lhs), rhs);
2326

2327
    // Check if we would have to return -0.
2328
    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(),
2329 2330
                    __ Word32Equal(res, zero), frame_state);
    __ Goto(&done, __ Int32Sub(zero, res));
2331 2332
  }

2333 2334
  __ Bind(&done);
  return done.PhiAt(0);
2335 2336
}

2337 2338
Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
                                                     Node* frame_state) {
2339 2340
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);
2341 2342
  Node* zero = __ Int32Constant(0);

2343 2344 2345 2346 2347 2348 2349 2350
  // Check if the {rhs} is a known power of two.
  Uint32Matcher m(rhs);
  if (m.IsPowerOf2()) {
    // Since we know that {rhs} is a power of two, we can perform a fast
    // check to see if the relevant least significant bits of the {lhs}
    // are all zero, and if so we know that we can perform a division
    // safely (and fast by doing a logical - aka zero extending - right
    // shift on {lhs}).
2351
    uint32_t divisor = m.ResolvedValue();
2352
    Node* mask = __ Uint32Constant(divisor - 1);
2353
    Node* shift = __ Uint32Constant(base::bits::WhichPowerOfTwo(divisor));
2354
    Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
2355
    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
2356 2357 2358 2359 2360
                       check, frame_state);
    return __ Word32Shr(lhs, shift);
  } else {
    // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
    Node* check = __ Word32Equal(rhs, zero);
2361
    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), check,
2362
                    frame_state);
2363

2364 2365
    // Perform the actual unsigned integer division.
    Node* value = __ Uint32Div(lhs, rhs);
2366

2367 2368
    // Check if the remainder is non-zero.
    check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
2369
    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, FeedbackSource(),
2370 2371 2372
                       check, frame_state);
    return value;
  }
2373 2374
}

2375 2376
Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
                                                     Node* frame_state) {
2377 2378 2379
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);

2380 2381
  Node* zero = __ Int32Constant(0);

2382
  // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
2383
  Node* check = __ Word32Equal(rhs, zero);
2384
  __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, FeedbackSource(), check,
2385
                  frame_state);
2386 2387

  // Perform the actual unsigned integer modulus.
2388
  return BuildUint32Mod(lhs, rhs);
2389 2390
}

2391 2392
Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
                                                    Node* frame_state) {
2393
  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
2394 2395 2396
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);

2397 2398
  Node* projection = __ Int32MulWithOverflow(lhs, rhs);
  Node* check = __ Projection(1, projection);
2399
  __ DeoptimizeIf(DeoptimizeReason::kOverflow, FeedbackSource(), check,
2400
                  frame_state);
2401

2402
  Node* value = __ Projection(0, projection);
2403

2404
  if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
2405 2406
    auto if_zero = __ MakeDeferredLabel();
    auto check_done = __ MakeLabel();
2407 2408 2409 2410
    Node* zero = __ Int32Constant(0);
    Node* check_zero = __ Word32Equal(value, zero);
    __ GotoIf(check_zero, &if_zero);
    __ Goto(&check_done);
2411

2412 2413 2414
    __ Bind(&if_zero);
    // We may need to return negative zero.
    Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
2415
    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, FeedbackSource(), check_or,
2416
                    frame_state);
2417
    __ Goto(&check_done);
2418

2419
    __ Bind(&check_done);
2420
  }
2421

2422
  return value;
2423 2424
}

2425 2426
Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
    Node* node, Node* frame_state) {
2427 2428
  DCHECK(SmiValuesAre31Bits());
  Node* value = node->InputAt(0);
2429
  const CheckParameters& params = CheckParametersOf(node->op());
2430
  return SmiTagOrDeopt(value, params, frame_state);
2431 2432
}

2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
Node* EffectControlLinearizer::LowerCheckedInt64ToInt32(Node* node,
                                                        Node* frame_state) {
  Node* value = node->InputAt(0);
  const CheckParameters& params = CheckParametersOf(node->op());

  Node* value32 = __ TruncateInt64ToInt32(value);
  Node* check = __ Word64Equal(__ ChangeInt32ToInt64(value32), value);
  __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
                     frame_state);
  return value32;
}

Node* EffectControlLinearizer::LowerCheckedInt64ToTaggedSigned(
    Node* node, Node* frame_state) {
  Node* value = node->InputAt(0);
  const CheckParameters& params = CheckParametersOf(node->op());

  Node* value32 = __ TruncateInt64ToInt32(value);
  Node* check = __ Word64Equal(__ ChangeInt32ToInt64(value32), value);
  __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
                     frame_state);

  if (SmiValuesAre32Bits()) {
    return ChangeInt64ToSmi(value);
  } else {
2458
    return SmiTagOrDeopt(value32, params, frame_state);
2459 2460 2461
  }
}

2462 2463 2464 2465
Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
                                                        Node* frame_state) {
  Node* index = node->InputAt(0);
  Node* limit = node->InputAt(1);
2466
  const CheckBoundsParameters& params = CheckBoundsParametersOf(node->op());
2467 2468

  Node* check = __ Uint32LessThan(index, limit);
2469 2470
  if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
    __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
2471 2472
                       params.check_parameters().feedback(), check,
                       frame_state);
2473 2474 2475
  } else {
    auto if_abort = __ MakeDeferredLabel();
    auto done = __ MakeLabel();
2476

2477
    __ Branch(check, &done, &if_abort);
2478

2479
    __ Bind(&if_abort);
2480
    __ Unreachable(&done);
2481

2482
    __ Bind(&done);
2483 2484
  }

2485 2486 2487
  return index;
}

2488 2489
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
                                                         Node* frame_state) {
2490
  Node* value = node->InputAt(0);
2491
  const CheckParameters& params = CheckParametersOf(node->op());
2492
  Node* unsafe = __ Int32LessThan(value, __ Int32Constant(0));
2493
  __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), unsafe,
2494
                  frame_state);
2495
  return value;
2496 2497
}

2498 2499
Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
    Node* node, Node* frame_state) {
2500
  Node* value = node->InputAt(0);
2501
  const CheckParameters& params = CheckParametersOf(node->op());
2502
  Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
2503
  __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
2504
                     frame_state);
2505 2506 2507
  return ChangeUint32ToSmi(value);
}

2508 2509 2510 2511
Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
                                                        Node* frame_state) {
  Node* const index = node->InputAt(0);
  Node* const limit = node->InputAt(1);
2512
  const CheckBoundsParameters& params = CheckBoundsParametersOf(node->op());
2513 2514

  Node* check = __ Uint64LessThan(index, limit);
2515 2516
  if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
    __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
2517 2518
                       params.check_parameters().feedback(), check,
                       frame_state);
2519 2520 2521
  } else {
    auto if_abort = __ MakeDeferredLabel();
    auto done = __ MakeLabel();
2522

2523
    __ Branch(check, &done, &if_abort);
2524

2525
    __ Bind(&if_abort);
2526
    __ Unreachable(&done);
2527

2528
    __ Bind(&done);
2529
  }
2530 2531 2532
  return index;
}

2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555
Node* EffectControlLinearizer::LowerCheckedUint64ToInt32(Node* node,
                                                         Node* frame_state) {
  Node* value = node->InputAt(0);
  const CheckParameters& params = CheckParametersOf(node->op());

  Node* check = __ Uint64LessThanOrEqual(value, __ Int64Constant(kMaxInt));
  __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
                     frame_state);
  return __ TruncateInt64ToInt32(value);
}

Node* EffectControlLinearizer::LowerCheckedUint64ToTaggedSigned(
    Node* node, Node* frame_state) {
  Node* value = node->InputAt(0);
  const CheckParameters& params = CheckParametersOf(node->op());

  Node* check =
      __ Uint64LessThanOrEqual(value, __ Int64Constant(Smi::kMaxValue));
  __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
                     frame_state);
  return ChangeInt64ToSmi(value);
}

2556
Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
2557
    CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value,
2558
    Node* frame_state) {
2559 2560
  Node* value32 = __ RoundFloat64ToInt32(value);
  Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
2561
  __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
2562
                     check_same, frame_state);
2563

2564 2565
  if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
    // Check if {value} is -0.
2566 2567
    auto if_zero = __ MakeDeferredLabel();
    auto check_done = __ MakeLabel();
2568

2569 2570 2571
    Node* check_zero = __ Word32Equal(value32, __ Int32Constant(0));
    __ GotoIf(check_zero, &if_zero);
    __ Goto(&check_done);
2572

2573
    __ Bind(&if_zero);
2574
    // In case of 0, we need to check the high bits for the IEEE -0 pattern.
2575 2576
    Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
                                            __ Int32Constant(0));
2577 2578
    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, feedback, check_negative,
                    frame_state);
2579
    __ Goto(&check_done);
2580

2581 2582 2583
    __ Bind(&check_done);
  }
  return value32;
2584 2585
}

2586 2587 2588
Node* EffectControlLinearizer::BuildCheckedFloat64ToIndex(
    const FeedbackSource& feedback, Node* value, Node* frame_state) {
  if (machine()->Is64()) {
2589 2590 2591 2592 2593 2594
    Node* value64 =
        __ TruncateFloat64ToInt64(value, TruncateKind::kArchitectureDefault);
    // The TruncateKind above means there will be a precision loss in case
    // INT64_MAX input is passed, but that precision loss would not be
    // detected and would not lead to a deoptimization from the first check.
    // But in this case, we'll deopt anyway because of the following checks.
2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
    Node* check_same = __ Float64Equal(value, __ ChangeInt64ToFloat64(value64));
    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
                       check_same, frame_state);
    Node* check_max =
        __ IntLessThan(value64, __ Int64Constant(kMaxSafeInteger));
    __ DeoptimizeIfNot(DeoptimizeReason::kNotAnArrayIndex, feedback, check_max,
                       frame_state);
    Node* check_min =
        __ IntLessThan(__ Int64Constant(-kMaxSafeInteger), value64);
    __ DeoptimizeIfNot(DeoptimizeReason::kNotAnArrayIndex, feedback, check_min,
                       frame_state);
    return value64;
  } else {
    Node* value32 = __ RoundFloat64ToInt32(value);
    Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
    __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
                       check_same, frame_state);
    return value32;
  }
}

2616 2617
Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
                                                          Node* frame_state) {
2618 2619
  const CheckMinusZeroParameters& params =
      CheckMinusZeroParametersOf(node->op());
2620
  Node* value = node->InputAt(0);
2621 2622
  return BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), value,
                                    frame_state);
2623 2624
}

2625
Node* EffectControlLinearizer::BuildCheckedFloat64ToInt64(
2626
    CheckForMinusZeroMode mode, const FeedbackSource& feedback, Node* value,
2627
    Node* frame_state) {
2628 2629
  Node* value64 =
      __ TruncateFloat64ToInt64(value, TruncateKind::kSetOverflowToMin);
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664
  Node* check_same = __ Float64Equal(value, __ ChangeInt64ToFloat64(value64));
  __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
                     check_same, frame_state);

  if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
    // Check if {value} is -0.
    auto if_zero = __ MakeDeferredLabel();
    auto check_done = __ MakeLabel();

    Node* check_zero = __ Word64Equal(value64, __ Int64Constant(0));
    __ GotoIf(check_zero, &if_zero);
    __ Goto(&check_done);

    __ Bind(&if_zero);
    // In case of 0, we need to check the high bits for the IEEE -0 pattern.
    Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
                                            __ Int32Constant(0));
    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, feedback, check_negative,
                    frame_state);
    __ Goto(&check_done);

    __ Bind(&check_done);
  }
  return value64;
}

Node* EffectControlLinearizer::LowerCheckedFloat64ToInt64(Node* node,
                                                          Node* frame_state) {
  const CheckMinusZeroParameters& params =
      CheckMinusZeroParametersOf(node->op());
  Node* value = node->InputAt(0);
  return BuildCheckedFloat64ToInt64(params.mode(), params.feedback(), value,
                                    frame_state);
}

2665 2666
Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
    Node* node, Node* frame_state) {
2667
  Node* value = node->InputAt(0);
2668
  const CheckParameters& params = CheckParametersOf(node->op());
2669
  Node* check = ObjectIsSmi(value);
2670
  __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
2671
                     frame_state);
2672
  return ChangeSmiToInt32(value);
2673 2674
}

2675 2676 2677 2678 2679 2680
Node* EffectControlLinearizer::LowerCheckedTaggedToArrayIndex(
    Node* node, Node* frame_state) {
  CheckParameters const& params = CheckParametersOf(node->op());
  Node* value = node->InputAt(0);

  auto if_not_smi = __ MakeDeferredLabel();
2681
  auto done = __ MakeLabel(MachineType::PointerRepresentation());
2682 2683

  __ GotoIfNot(ObjectIsSmi(value), &if_not_smi);
2684 2685
  // In the Smi case, just convert to intptr_t.
  __ Goto(&done, ChangeSmiToIntPtr(value));
2686

2687
  // In the non-Smi case, check the heap numberness, load the number and convert
2688
  // to integer.
2689
  __ Bind(&if_not_smi);
2690 2691 2692 2693 2694 2695
  auto if_not_heap_number = __ MakeDeferredLabel();
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* is_heap_number = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
  __ GotoIfNot(is_heap_number, &if_not_heap_number);

  Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
2696
  number = BuildCheckedFloat64ToIndex(params.feedback(), number, frame_state);
2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
  __ Goto(&done, number);

  __ Bind(&if_not_heap_number);
  auto calculate_index = __ MakeDeferredLabel();
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
  Node* is_string = __ Uint32LessThan(value_instance_type,
                                      __ Uint32Constant(FIRST_NONSTRING_TYPE));
  __ DeoptimizeIfNot(DeoptimizeReason::kNotAString, params.feedback(),
                     is_string, frame_state);

2708
  MachineSignature::Builder builder(graph()->zone(), 1, 1);
2709
  builder.AddReturn(MachineType::IntPtr());
2710
  builder.AddParam(MachineType::TaggedPointer());
2711 2712
  Node* string_to_array_index_function =
      __ ExternalConstant(ExternalReference::string_to_array_index_function());
2713 2714 2715
  auto call_descriptor =
      Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
  Node* index = __ Call(common()->Call(call_descriptor),
2716
                        string_to_array_index_function, value);
2717 2718

  __ DeoptimizeIf(DeoptimizeReason::kNotAnArrayIndex, params.feedback(),
2719
                  __ Word32Equal(index, __ Int32Constant(-1)), frame_state);
2720 2721 2722 2723 2724 2725 2726

  __ Goto(&done, index);

  __ Bind(&done);
  return done.PhiAt(0);
}

2727 2728
Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
                                                         Node* frame_state) {
2729 2730
  const CheckMinusZeroParameters& params =
      CheckMinusZeroParametersOf(node->op());
2731 2732
  Node* value = node->InputAt(0);

2733 2734
  auto if_not_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord32);
2735
  Node* check = ObjectIsSmi(value);
2736
  __ GotoIfNot(check, &if_not_smi);
2737
  // In the Smi case, just convert to int32.
2738
  __ Goto(&done, ChangeSmiToInt32(value));
2739 2740 2741

  // In the non-Smi case, check the heap numberness, load the number and convert
  // to int32.
2742 2743
  __ Bind(&if_not_smi);
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2744
  Node* check_map = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
2745
  __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
2746
                     check_map, frame_state);
2747
  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
2748 2749
  vfalse = BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), vfalse,
                                      frame_state);
2750 2751 2752 2753 2754 2755
  __ Goto(&done, vfalse);

  __ Bind(&done);
  return done.PhiAt(0);
}

2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773
Node* EffectControlLinearizer::LowerCheckedTaggedToInt64(Node* node,
                                                         Node* frame_state) {
  const CheckMinusZeroParameters& params =
      CheckMinusZeroParametersOf(node->op());
  Node* value = node->InputAt(0);

  auto if_not_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord64);

  Node* check = ObjectIsSmi(value);
  __ GotoIfNot(check, &if_not_smi);
  // In the Smi case, just convert to int64.
  __ Goto(&done, ChangeSmiToInt64(value));

  // In the non-Smi case, check the heap numberness, load the number and convert
  // to int64.
  __ Bind(&if_not_smi);
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2774
  Node* check_map = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785
  __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
                     check_map, frame_state);
  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
  vfalse = BuildCheckedFloat64ToInt64(params.mode(), params.feedback(), vfalse,
                                      frame_state);
  __ Goto(&done, vfalse);

  __ Bind(&done);
  return done.PhiAt(0);
}

2786
Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
2787
    CheckTaggedInputMode mode, const FeedbackSource& feedback, Node* value,
2788
    Node* frame_state) {
2789
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2790
  Node* check_number = __ TaggedEqual(value_map, __ HeapNumberMapConstant());
2791 2792
  switch (mode) {
    case CheckTaggedInputMode::kNumber: {
2793
      __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, feedback,
2794
                         check_number, frame_state);
2795 2796
      break;
    }
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810
    case CheckTaggedInputMode::kNumberOrBoolean: {
      auto check_done = __ MakeLabel();

      __ GotoIf(check_number, &check_done);
      __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrBoolean, feedback,
                         __ TaggedEqual(value_map, __ BooleanMapConstant()),
                         frame_state);
      STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
                                        Oddball::kToNumberRawOffset);
      __ Goto(&check_done);

      __ Bind(&check_done);
      break;
    }
2811
    case CheckTaggedInputMode::kNumberOrOddball: {
2812
      auto check_done = __ MakeLabel();
2813

2814
      __ GotoIf(check_number, &check_done);
2815 2816
      // For oddballs also contain the numeric value, let us just check that
      // we have an oddball here.
2817 2818
      Node* instance_type =
          __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
2819
      Node* check_oddball =
2820
          __ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
2821 2822
      __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrOddball, feedback,
                         check_oddball, frame_state);
2823 2824
      STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
                                        Oddball::kToNumberRawOffset);
2825
      __ Goto(&check_done);
2826

2827
      __ Bind(&check_done);
2828 2829 2830
      break;
    }
  }
2831
  return __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
2832 2833
}

2834 2835
Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
                                                           Node* frame_state) {
2836 2837
  CheckTaggedInputParameters const& p =
      CheckTaggedInputParametersOf(node->op());
2838 2839
  Node* value = node->InputAt(0);

2840 2841
  auto if_smi = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kFloat64);
2842

2843
  Node* check = ObjectIsSmi(value);
2844
  __ GotoIf(check, &if_smi);
2845 2846 2847

  // In the Smi case, just convert to int32 and then float64.
  // Otherwise, check heap numberness and load the number.
2848
  Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
2849
      p.mode(), p.feedback(), value, frame_state);
2850
  __ Goto(&done, number);
2851

2852 2853 2854 2855
  __ Bind(&if_smi);
  Node* from_smi = ChangeSmiToInt32(value);
  from_smi = __ ChangeInt32ToFloat64(from_smi);
  __ Goto(&done, from_smi);
2856

2857 2858
  __ Bind(&done);
  return done.PhiAt(0);
2859 2860
}

2861 2862
Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
    Node* node, Node* frame_state) {
2863
  Node* value = node->InputAt(0);
2864
  const CheckParameters& params = CheckParametersOf(node->op());
2865 2866

  Node* check = ObjectIsSmi(value);
2867
  __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
2868
                     frame_state);
2869

2870
  return value;
2871 2872
}

2873 2874
Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
    Node* node, Node* frame_state) {
2875
  Node* value = node->InputAt(0);
2876
  const CheckParameters& params = CheckParametersOf(node->op());
2877

2878
  Node* check = ObjectIsSmi(value);
2879 2880
  __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), check,
                  frame_state);
2881 2882
  return value;
}
2883

2884
Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) {
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894
  Node* value = node->InputAt(0);
  const CheckParameters& params = CheckParametersOf(node->op());

  // Check for Smi.
  Node* smi_check = ObjectIsSmi(value);
  __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), smi_check,
                  frame_state);

  // Check for BigInt.
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
2895
  Node* bi_check = __ TaggedEqual(value_map, __ BigIntMapConstant());
2896 2897 2898 2899 2900 2901
  __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
                     bi_check, frame_state);

  return value;
}

2902
Node* EffectControlLinearizer::LowerChangeInt64ToBigInt(Node* node) {
2903 2904
  DCHECK(machine()->Is64());

2905
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
2906
  Node* value = node->InputAt(0);
2907

2908
  // BigInts with value 0 must be of size 0 (canonical form).
2909 2910
  __ GotoIf(__ Word64Equal(value, __ IntPtrConstant(0)), &done,
            BuildAllocateBigInt(nullptr, nullptr));
2911

2912 2913 2914 2915 2916
  // Shift sign bit into BigInt's sign bit position.
  Node* sign =
      __ Word64Shr(value, __ IntPtrConstant(63 - BigInt::SignBits::kShift));
  Node* bitfield =
      __ Word32Or(__ Int32Constant(BigInt::LengthBits::encode(1)), sign);
2917

2918 2919 2920 2921 2922
  // We use (value XOR (value >>> 63)) - (value >>> 63) to compute the
  // absolute value, in a branchless fashion.
  Node* sign_mask = __ Word64Sar(value, __ Int64Constant(63));
  Node* absolute_value = __ Int64Sub(__ Word64Xor(value, sign_mask), sign_mask);
  __ Goto(&done, BuildAllocateBigInt(bitfield, absolute_value));
2923

2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
  __ Bind(&done);
  return done.PhiAt(0);
}

Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
  DCHECK(machine()->Is64());

  auto done = __ MakeLabel(MachineRepresentation::kTagged);
  Node* value = node->InputAt(0);

  // BigInts with value 0 must be of size 0 (canonical form).
  __ GotoIf(__ Word64Equal(value, __ IntPtrConstant(0)), &done,
            BuildAllocateBigInt(nullptr, nullptr));

  const auto bitfield = BigInt::LengthBits::encode(1);
  __ Goto(&done, BuildAllocateBigInt(__ Int32Constant(bitfield), value));
2940 2941 2942 2943 2944

  __ Bind(&done);
  return done.PhiAt(0);
}

2945
Node* EffectControlLinearizer::LowerTruncateBigIntToWord64(Node* node) {
2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
  DCHECK(machine()->Is64());

  auto done = __ MakeLabel(MachineRepresentation::kWord64);
  auto if_neg = __ MakeLabel();
  auto if_not_zero = __ MakeLabel();

  Node* value = node->InputAt(0);

  Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
  __ GotoIfNot(__ Word32Equal(bitfield, __ Int32Constant(0)), &if_not_zero);
  __ Goto(&done, __ Int64Constant(0));

  __ Bind(&if_not_zero);
  {
    Node* lsd =
        __ LoadField(AccessBuilder::ForBigIntLeastSignificantDigit64(), value);
    Node* sign =
        __ Word32And(bitfield, __ Int32Constant(BigInt::SignBits::kMask));
    __ GotoIf(__ Word32Equal(sign, __ Int32Constant(1)), &if_neg);
    __ Goto(&done, lsd);

    __ Bind(&if_neg);
    __ Goto(&done, __ Int64Sub(__ Int64Constant(0), lsd));
  }

  __ Bind(&done);
  return done.PhiAt(0);
}

2975 2976
Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
  Node* value = node->InputAt(0);
2977

2978 2979
  auto if_not_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord32);
2980

2981
  Node* check = ObjectIsSmi(value);
2982
  __ GotoIfNot(check, &if_not_smi);
2983 2984 2985
  __ Goto(&done, ChangeSmiToInt32(value));

  __ Bind(&if_not_smi);
2986 2987
  STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
                                    Oddball::kToNumberRawOffset);
2988 2989 2990
  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
  vfalse = __ TruncateFloat64ToWord32(vfalse);
  __ Goto(&done, vfalse);
2991

2992 2993
  __ Bind(&done);
  return done.PhiAt(0);
2994 2995
}

2996 2997
Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
    Node* node, Node* frame_state) {
2998 2999
  const CheckTaggedInputParameters& params =
      CheckTaggedInputParametersOf(node->op());
3000 3001
  Node* value = node->InputAt(0);

3002 3003
  auto if_not_smi = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord32);
3004

3005
  Node* check = ObjectIsSmi(value);
3006
  __ GotoIfNot(check, &if_not_smi);
3007
  // In the Smi case, just convert to int32.
3008
  __ Goto(&done, ChangeSmiToInt32(value));
3009 3010 3011

  // Otherwise, check that it's a heap number or oddball and truncate the value
  // to int32.
3012
  __ Bind(&if_not_smi);
3013 3014
  Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
      params.mode(), params.feedback(), value, frame_state);
3015 3016 3017 3018 3019 3020 3021
  number = __ TruncateFloat64ToWord32(number);
  __ Goto(&done, number);

  __ Bind(&done);
  return done.PhiAt(0);
}

3022 3023
Node* EffectControlLinearizer::LowerAllocate(Node* node) {
  Node* size = node->InputAt(0);
3024 3025
  AllocationType allocation = AllocationTypeOf(node->op());
  Node* new_node = __ Allocate(allocation, size);
3026 3027 3028
  return new_node;
}

3029 3030 3031 3032
Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
  Node* argument = node->InputAt(0);

  Callable const callable =
3033
      Builtins::CallableFor(isolate(), Builtin::kNumberToString);
3034 3035
  Operator::Properties properties = Operator::kEliminatable;
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
3036
  auto call_descriptor = Linkage::GetStubCallDescriptor(
3037 3038
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
3039
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), argument,
3040 3041 3042
                 __ NoContextConstant());
}

3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055
Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
  Node* value = node->InputAt(0);

  auto if_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);

  Node* check = ObjectIsSmi(value);
  __ GotoIf(check, &if_smi);

  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
  Node* vfalse = __ Uint32LessThan(
3056 3057 3058 3059
      __ Int32Sub(value_instance_type,
                  __ Int32Constant(FIRST_JS_ARRAY_BUFFER_VIEW_TYPE)),
      __ Int32Constant(LAST_JS_ARRAY_BUFFER_VIEW_TYPE -
                       FIRST_JS_ARRAY_BUFFER_VIEW_TYPE + 1));
3060 3061 3062 3063 3064 3065 3066 3067 3068
  __ Goto(&done, vfalse);

  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(0));

  __ Bind(&done);
  return done.PhiAt(0);
}

3069 3070 3071 3072 3073 3074 3075 3076 3077
Node* EffectControlLinearizer::LowerObjectIsBigInt(Node* node) {
  Node* value = node->InputAt(0);

  auto if_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);

  Node* check = ObjectIsSmi(value);
  __ GotoIf(check, &if_smi);
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3078
  Node* vfalse = __ TaggedEqual(value_map, __ BigIntMapConstant());
3079 3080 3081 3082 3083 3084 3085 3086 3087
  __ Goto(&done, vfalse);

  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(0));

  __ Bind(&done);
  return done.PhiAt(0);
}

3088 3089 3090
Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
  Node* value = node->InputAt(0);

3091 3092
  auto if_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);
3093 3094 3095 3096 3097 3098 3099

  Node* check = ObjectIsSmi(value);
  __ GotoIf(check, &if_smi);

  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_bit_field =
      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
3100 3101 3102 3103
  Node* vfalse = __ Word32Equal(
      __ Int32Constant(Map::Bits1::IsCallableBit::kMask),
      __ Word32And(value_bit_field,
                   __ Int32Constant(Map::Bits1::IsCallableBit::kMask)));
3104 3105 3106 3107 3108 3109 3110 3111 3112
  __ Goto(&done, vfalse);

  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(0));

  __ Bind(&done);
  return done.PhiAt(0);
}

3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
Node* EffectControlLinearizer::LowerObjectIsConstructor(Node* node) {
  Node* value = node->InputAt(0);

  auto if_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);

  Node* check = ObjectIsSmi(value);
  __ GotoIf(check, &if_smi);

  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_bit_field =
      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
3125
  Node* vfalse = __ Word32Equal(
3126
      __ Int32Constant(Map::Bits1::IsConstructorBit::kMask),
3127
      __ Word32And(value_bit_field,
3128
                   __ Int32Constant(Map::Bits1::IsConstructorBit::kMask)));
3129 3130 3131 3132 3133 3134 3135 3136 3137
  __ Goto(&done, vfalse);

  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(0));

  __ Bind(&done);
  return done.PhiAt(0);
}

3138
Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
3139 3140
  Node* value = node->InputAt(0);

3141 3142
  auto if_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);
3143

3144 3145
  Node* check = ObjectIsSmi(value);
  __ GotoIf(check, &if_smi);
3146

3147 3148 3149 3150
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_bit_field =
      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
  Node* vfalse = __ Word32Equal(
3151
      __ Int32Constant(Map::Bits1::IsCallableBit::kMask),
3152
      __ Word32And(value_bit_field,
3153 3154
                   __ Int32Constant((Map::Bits1::IsCallableBit::kMask) |
                                    (Map::Bits1::IsUndetectableBit::kMask))));
3155
  __ Goto(&done, vfalse);
3156

3157 3158
  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(0));
3159

3160 3161
  __ Bind(&done);
  return done.PhiAt(0);
3162 3163
}

3164 3165 3166 3167 3168 3169 3170
Node* EffectControlLinearizer::LowerNumberIsFloat64Hole(Node* node) {
  Node* value = node->InputAt(0);
  Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
                               __ Int32Constant(kHoleNanUpper32));
  return check;
}

3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
Node* EffectControlLinearizer::LowerNumberIsFinite(Node* node) {
  Node* number = node->InputAt(0);
  Node* diff = __ Float64Sub(number, number);
  Node* check = __ Float64Equal(diff, diff);
  return check;
}

Node* EffectControlLinearizer::LowerObjectIsFiniteNumber(Node* node) {
  Node* object = node->InputAt(0);
  Node* zero = __ Int32Constant(0);
  Node* one = __ Int32Constant(1);

  auto done = __ MakeLabel(MachineRepresentation::kBit);

3185
  // Check if {object} is a Smi.
3186 3187
  __ GotoIf(ObjectIsSmi(object), &done, one);

3188
  // Check if {object} is a HeapNumber.
3189
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
3190
  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3191 3192
               zero);

3193
  // {object} is a HeapNumber.
3194 3195 3196 3197 3198 3199 3200 3201 3202
  Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), object);
  Node* diff = __ Float64Sub(value, value);
  Node* check = __ Float64Equal(diff, diff);
  __ Goto(&done, check);

  __ Bind(&done);
  return done.PhiAt(0);
}

3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222
Node* EffectControlLinearizer::LowerNumberIsInteger(Node* node) {
  Node* number = node->InputAt(0);
  Node* trunc = BuildFloat64RoundTruncate(number);
  Node* diff = __ Float64Sub(number, trunc);
  Node* check = __ Float64Equal(diff, __ Float64Constant(0));
  return check;
}

Node* EffectControlLinearizer::LowerObjectIsInteger(Node* node) {
  Node* object = node->InputAt(0);
  Node* zero = __ Int32Constant(0);
  Node* one = __ Int32Constant(1);

  auto done = __ MakeLabel(MachineRepresentation::kBit);

  // Check if {object} is a Smi.
  __ GotoIf(ObjectIsSmi(object), &done, one);

  // Check if {object} is a HeapNumber.
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
3223
  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236
               zero);

  // {object} is a HeapNumber.
  Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), object);
  Node* trunc = BuildFloat64RoundTruncate(value);
  Node* diff = __ Float64Sub(value, trunc);
  Node* check = __ Float64Equal(diff, __ Float64Constant(0));
  __ Goto(&done, check);

  __ Bind(&done);
  return done.PhiAt(0);
}

3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265
Node* EffectControlLinearizer::LowerNumberIsSafeInteger(Node* node) {
  Node* number = node->InputAt(0);
  Node* zero = __ Int32Constant(0);
  auto done = __ MakeLabel(MachineRepresentation::kBit);

  Node* trunc = BuildFloat64RoundTruncate(number);
  Node* diff = __ Float64Sub(number, trunc);
  Node* check = __ Float64Equal(diff, __ Float64Constant(0));
  __ GotoIfNot(check, &done, zero);
  Node* in_range = __ Float64LessThanOrEqual(
      __ Float64Abs(trunc), __ Float64Constant(kMaxSafeInteger));
  __ Goto(&done, in_range);

  __ Bind(&done);
  return done.PhiAt(0);
}

Node* EffectControlLinearizer::LowerObjectIsSafeInteger(Node* node) {
  Node* object = node->InputAt(0);
  Node* zero = __ Int32Constant(0);
  Node* one = __ Int32Constant(1);

  auto done = __ MakeLabel(MachineRepresentation::kBit);

  // Check if {object} is a Smi.
  __ GotoIf(ObjectIsSmi(object), &done, one);

  // Check if {object} is a HeapNumber.
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
3266
  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
               zero);

  // {object} is a HeapNumber.
  Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), object);
  Node* trunc = BuildFloat64RoundTruncate(value);
  Node* diff = __ Float64Sub(value, trunc);
  Node* check = __ Float64Equal(diff, __ Float64Constant(0));
  __ GotoIfNot(check, &done, zero);
  Node* in_range = __ Float64LessThanOrEqual(
      __ Float64Abs(trunc), __ Float64Constant(kMaxSafeInteger));
  __ Goto(&done, in_range);

  __ Bind(&done);
  return done.PhiAt(0);
}

3283 3284
namespace {

3285 3286 3287
// There is no (currently) available constexpr version of base::bit_cast, so
// we have to make do with constructing the -0.0 bits manually (by setting the
// sign bit to 1 and everything else to 0).
3288 3289 3290 3291 3292
// TODO(leszeks): Revisit when upgrading to C++20.
constexpr int32_t kMinusZeroLoBits = static_cast<int32_t>(0);
constexpr int32_t kMinusZeroHiBits = static_cast<int32_t>(1) << 31;
constexpr int64_t kMinusZeroBits =
    (static_cast<uint64_t>(kMinusZeroHiBits) << 32) | kMinusZeroLoBits;
3293 3294 3295

}  // namespace

3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306
Node* EffectControlLinearizer::LowerObjectIsMinusZero(Node* node) {
  Node* value = node->InputAt(0);
  Node* zero = __ Int32Constant(0);

  auto done = __ MakeLabel(MachineRepresentation::kBit);

  // Check if {value} is a Smi.
  __ GotoIf(ObjectIsSmi(value), &done, zero);

  // Check if {value} is a HeapNumber.
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3307
  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3308 3309 3310 3311
               zero);

  // Check if {value} contains -0.
  Node* value_value = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
  if (machine()->Is64()) {
    Node* value64 = __ BitcastFloat64ToInt64(value_value);
    __ Goto(&done, __ Word64Equal(value64, __ Int64Constant(kMinusZeroBits)));
  } else {
    Node* value_lo = __ Float64ExtractLowWord32(value_value);
    __ GotoIfNot(__ Word32Equal(value_lo, __ Int32Constant(kMinusZeroLoBits)),
                 &done, zero);
    Node* value_hi = __ Float64ExtractHighWord32(value_value);
    __ Goto(&done,
            __ Word32Equal(value_hi, __ Int32Constant(kMinusZeroHiBits)));
  }
3323 3324 3325 3326 3327

  __ Bind(&done);
  return done.PhiAt(0);
}

3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
Node* EffectControlLinearizer::LowerNumberIsMinusZero(Node* node) {
  Node* value = node->InputAt(0);

  if (machine()->Is64()) {
    Node* value64 = __ BitcastFloat64ToInt64(value);
    return __ Word64Equal(value64, __ Int64Constant(kMinusZeroBits));
  } else {
    auto done = __ MakeLabel(MachineRepresentation::kBit);

    Node* value_lo = __ Float64ExtractLowWord32(value);
    __ GotoIfNot(__ Word32Equal(value_lo, __ Int32Constant(kMinusZeroLoBits)),
                 &done, __ Int32Constant(0));
    Node* value_hi = __ Float64ExtractHighWord32(value);
    __ Goto(&done,
            __ Word32Equal(value_hi, __ Int32Constant(kMinusZeroHiBits)));

    __ Bind(&done);
    return done.PhiAt(0);
  }
}

3349 3350 3351 3352
Node* EffectControlLinearizer::LowerObjectIsNaN(Node* node) {
  Node* value = node->InputAt(0);
  Node* zero = __ Int32Constant(0);

3353
  auto done = __ MakeLabel(MachineRepresentation::kBit);
3354 3355 3356 3357 3358 3359

  // Check if {value} is a Smi.
  __ GotoIf(ObjectIsSmi(value), &done, zero);

  // Check if {value} is a HeapNumber.
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3360
  __ GotoIfNot(__ TaggedEqual(value_map, __ HeapNumberMapConstant()), &done,
3361
               zero);
3362 3363 3364 3365 3366 3367 3368 3369 3370 3371

  // Check if {value} contains a NaN.
  Node* value_value = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
  __ Goto(&done,
          __ Word32Equal(__ Float64Equal(value_value, value_value), zero));

  __ Bind(&done);
  return done.PhiAt(0);
}

3372 3373 3374 3375 3376 3377 3378
Node* EffectControlLinearizer::LowerNumberIsNaN(Node* node) {
  Node* number = node->InputAt(0);
  Node* diff = __ Float64Equal(number, number);
  Node* check = __ Word32Equal(diff, __ Int32Constant(0));
  return check;
}

3379 3380 3381
Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
  Node* value = node->InputAt(0);

3382 3383
  auto if_primitive = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);
3384 3385 3386 3387 3388 3389 3390

  Node* check0 = ObjectIsSmi(value);
  __ GotoIf(check0, &if_primitive);

  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
3391
  static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3392 3393
  Node* check1 = __ Uint32LessThanOrEqual(
      __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
3394
  __ GotoIfNot(check1, &if_primitive);
3395 3396 3397

  Node* value_bit_field =
      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
3398 3399 3400 3401
  Node* check2 = __ Word32Equal(
      __ Int32Constant(0),
      __ Word32And(value_bit_field,
                   __ Int32Constant(Map::Bits1::IsCallableBit::kMask)));
3402 3403 3404 3405 3406 3407 3408 3409 3410
  __ Goto(&done, check2);

  __ Bind(&if_primitive);
  __ Goto(&done, __ Int32Constant(0));

  __ Bind(&done);
  return done.PhiAt(0);
}

3411
Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
3412 3413
  Node* value = node->InputAt(0);

3414 3415
  auto if_smi = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);
3416

3417 3418
  __ GotoIf(ObjectIsSmi(value), &if_smi);
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
3419
  __ Goto(&done, __ TaggedEqual(value_map, __ HeapNumberMapConstant()));
3420

3421 3422
  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(1));
3423

3424 3425
  __ Bind(&done);
  return done.PhiAt(0);
3426 3427
}

3428
Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
3429 3430
  Node* value = node->InputAt(0);

3431 3432
  auto if_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);
3433

3434
  __ GotoIf(ObjectIsSmi(value), &if_smi);
3435

3436
  static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3437 3438 3439 3440 3441 3442
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
  Node* result = __ Uint32LessThanOrEqual(
      __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
  __ Goto(&done, result);
3443

3444 3445
  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(0));
3446

3447 3448
  __ Bind(&done);
  return done.PhiAt(0);
3449 3450
}

3451
Node* EffectControlLinearizer::LowerObjectIsSmi(Node* node) {
3452
  Node* value = node->InputAt(0);
3453
  return ObjectIsSmi(value);
3454 3455
}

3456
Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
3457 3458
  Node* value = node->InputAt(0);

3459 3460
  auto if_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);
3461

3462 3463 3464 3465 3466 3467 3468 3469
  Node* check = ObjectIsSmi(value);
  __ GotoIf(check, &if_smi);
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
  Node* vfalse = __ Uint32LessThan(value_instance_type,
                                   __ Uint32Constant(FIRST_NONSTRING_TYPE));
  __ Goto(&done, vfalse);
3470

3471 3472
  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(0));
3473

3474 3475
  __ Bind(&done);
  return done.PhiAt(0);
3476 3477
}

3478 3479 3480
Node* EffectControlLinearizer::LowerObjectIsSymbol(Node* node) {
  Node* value = node->InputAt(0);

3481 3482
  auto if_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);
3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499

  Node* check = ObjectIsSmi(value);
  __ GotoIf(check, &if_smi);
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
  Node* vfalse =
      __ Word32Equal(value_instance_type, __ Uint32Constant(SYMBOL_TYPE));
  __ Goto(&done, vfalse);

  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(0));

  __ Bind(&done);
  return done.PhiAt(0);
}

3500
Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
3501 3502
  Node* value = node->InputAt(0);

3503 3504
  auto if_smi = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);
3505

3506 3507
  Node* check = ObjectIsSmi(value);
  __ GotoIf(check, &if_smi);
3508

3509 3510 3511 3512
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
  Node* value_bit_field =
      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
  Node* vfalse = __ Word32Equal(
3513 3514 3515
      __ Word32Equal(
          __ Int32Constant(0),
          __ Word32And(value_bit_field,
3516
                       __ Int32Constant(Map::Bits1::IsUndetectableBit::kMask))),
3517 3518
      __ Int32Constant(0));
  __ Goto(&done, vfalse);
3519

3520 3521
  __ Bind(&if_smi);
  __ Goto(&done, __ Int32Constant(0));
3522

3523 3524
  __ Bind(&done);
  return done.PhiAt(0);
3525 3526
}

3527 3528
Node* EffectControlLinearizer::LowerTypeOf(Node* node) {
  Node* obj = node->InputAt(0);
3529
  Callable const callable = Builtins::CallableFor(isolate(), Builtin::kTypeof);
3530 3531
  Operator::Properties const properties = Operator::kEliminatable;
  CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
3532
  auto call_descriptor = Linkage::GetStubCallDescriptor(
3533 3534
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
3535
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
3536 3537 3538
                 __ NoContextConstant());
}

3539 3540 3541
Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
  Node* obj = node->InputAt(0);
  Callable const callable =
3542
      Builtins::CallableFor(isolate(), Builtin::kToBoolean);
3543
  Operator::Properties const properties = Operator::kEliminatable;
3544
  CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
3545
  auto call_descriptor = Linkage::GetStubCallDescriptor(
3546 3547
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
3548
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj);
3549 3550
}

3551
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
3552
  Node* arguments_length = ChangeIntPtrToSmi(
3553 3554
      __ Load(MachineType::Pointer(), __ LoadFramePointer(),
              __ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
3555 3556
  arguments_length =
      __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
3557
  return arguments_length;
3558 3559 3560 3561 3562 3563
}

Node* EffectControlLinearizer::LowerRestLength(Node* node) {
  int formal_parameter_count = FormalParameterCountOf(node->op());
  DCHECK_LE(0, formal_parameter_count);

3564 3565 3566 3567 3568 3569
  auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
  Node* frame = __ LoadFramePointer();

  Node* arguments_length = ChangeIntPtrToSmi(
      __ Load(MachineType::Pointer(), frame,
              __ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
3570 3571
  arguments_length =
      __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
3572 3573 3574 3575 3576 3577 3578 3579
  Node* rest_length =
      __ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
  __ GotoIf(__ SmiLessThan(rest_length, __ SmiConstant(0)), &done,
            __ SmiConstant(0));
  __ Goto(&done, rest_length);

  __ Bind(&done);
  return done.PhiAt(0);
3580 3581
}

3582
Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
3583
  AllocationType const allocation = AllocationTypeOf(node->op());
3584 3585
  Node* length = node->InputAt(0);

3586
  auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
3587
  Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
3588
  __ GotoIf(zero_length, &done,
3589
            __ HeapConstant(factory()->empty_fixed_array()));
3590

3591
  // Compute the effective size of the backing store.
3592 3593
  Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kDoubleSizeLog2)),
                         __ IntPtrConstant(FixedDoubleArray::kHeaderSize));
3594 3595

  // Allocate the result and initialize the header.
3596
  Node* result = __ Allocate(allocation, size);
3597 3598 3599
  __ StoreField(AccessBuilder::ForMap(), result,
                __ FixedDoubleArrayMapConstant());
  __ StoreField(AccessBuilder::ForFixedArrayLength(), result,
3600
                ChangeIntPtrToSmi(length));
3601 3602

  // Initialize the backing store with holes.
3603 3604
  STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
                                    Oddball::kToNumberRawOffset);
3605 3606
  Node* the_hole =
      __ LoadField(AccessBuilder::ForHeapNumberValue(), __ TheHoleConstant());
3607 3608
  auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
  __ Goto(&loop, __ IntPtrConstant(0));
3609 3610 3611 3612
  __ Bind(&loop);
  {
    // Check if we've initialized everything.
    Node* index = loop.PhiAt(0);
3613
    Node* check = __ UintLessThan(index, length);
3614
    __ GotoIfNot(check, &done, result);
3615

3616 3617 3618 3619
    ElementAccess const access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
                                  Type::NumberOrHole(), MachineType::Float64(),
                                  kNoWriteBarrier};
    __ StoreElement(access, result, index, the_hole);
3620 3621

    // Advance the {index}.
3622
    index = __ IntAdd(index, __ IntPtrConstant(1));
3623 3624 3625
    __ Goto(&loop, index);
  }

3626 3627
  __ Bind(&done);
  return done.PhiAt(0);
3628 3629
}

3630
Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
3631
  AllocationType const allocation = AllocationTypeOf(node->op());
3632 3633
  Node* length = node->InputAt(0);

3634
  auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
3635
  Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0));
3636
  __ GotoIf(zero_length, &done,
3637
            __ HeapConstant(factory()->empty_fixed_array()));
3638

3639
  // Compute the effective size of the backing store.
3640 3641
  Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kTaggedSizeLog2)),
                         __ IntPtrConstant(FixedArray::kHeaderSize));
3642 3643

  // Allocate the result and initialize the header.
3644
  Node* result = __ Allocate(allocation, size);
3645 3646
  __ StoreField(AccessBuilder::ForMap(), result, __ FixedArrayMapConstant());
  __ StoreField(AccessBuilder::ForFixedArrayLength(), result,
3647
                ChangeIntPtrToSmi(length));
3648 3649 3650

  // Initialize the backing store with holes.
  Node* the_hole = __ TheHoleConstant();
3651 3652
  auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
  __ Goto(&loop, __ IntPtrConstant(0));
3653 3654 3655 3656
  __ Bind(&loop);
  {
    // Check if we've initialized everything.
    Node* index = loop.PhiAt(0);
3657
    Node* check = __ UintLessThan(index, length);
3658
    __ GotoIfNot(check, &done, result);
3659 3660

    // Storing "the_hole" doesn't need a write barrier.
3661 3662 3663
    ElementAccess const access = {kTaggedBase, FixedArray::kHeaderSize,
                                  Type::Any(), MachineType::AnyTagged(),
                                  kNoWriteBarrier};
3664
    __ StoreElement(access, result, index, the_hole);
3665 3666

    // Advance the {index}.
3667
    index = __ IntAdd(index, __ IntPtrConstant(1));
3668 3669 3670
    __ Goto(&loop, index);
  }

3671 3672
  __ Bind(&done);
  return done.PhiAt(0);
3673 3674
}

3675
Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
3676 3677 3678
  const NewArgumentsElementsParameters& parameters =
      NewArgumentsElementsParametersOf(node->op());
  CreateArgumentsType type = parameters.arguments_type();
3679 3680
  Operator::Properties const properties = node->op()->properties();
  CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
3681 3682
  Node* frame = __ LoadFramePointer();
  Node* arguments_count = NodeProperties::GetValueInput(node, 0);
3683
  Builtin builtin_name;
3684 3685
  switch (type) {
    case CreateArgumentsType::kMappedArguments:
3686
      builtin_name = Builtin::kNewSloppyArgumentsElements;
3687 3688
      break;
    case CreateArgumentsType::kUnmappedArguments:
3689
      builtin_name = Builtin::kNewStrictArgumentsElements;
3690 3691
      break;
    case CreateArgumentsType::kRestParameter:
3692
      builtin_name = Builtin::kNewRestArgumentsElements;
3693 3694 3695
      break;
  }
  Callable const callable = Builtins::CallableFor(isolate(), builtin_name);
3696
  auto call_descriptor = Linkage::GetStubCallDescriptor(
3697 3698
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
3699
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), frame,
3700 3701
                 __ IntPtrConstant(parameters.formal_parameter_count()),
                 arguments_count);
3702 3703
}

3704 3705 3706 3707 3708
Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
  Node* length = node->InputAt(0);
  Node* first = node->InputAt(1);
  Node* second = node->InputAt(2);

3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723
  // Determine the instance types of {first} and {second}.
  Node* first_map = __ LoadField(AccessBuilder::ForMap(), first);
  Node* first_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), first_map);
  Node* second_map = __ LoadField(AccessBuilder::ForMap(), second);
  Node* second_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), second_map);

  // Determine the proper map for the resulting ConsString.
  // If both {first} and {second} are one-byte strings, we
  // create a new ConsOneByteString, otherwise we create a
  // new ConsString instead.
  auto if_onebyte = __ MakeLabel();
  auto if_twobyte = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
3724 3725
  static_assert(kOneByteStringTag != 0);
  static_assert(kTwoByteStringTag == 0);
3726 3727 3728 3729 3730 3731
  Node* instance_type = __ Word32And(first_instance_type, second_instance_type);
  Node* encoding =
      __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask));
  __ Branch(__ Word32Equal(encoding, __ Int32Constant(kTwoByteStringTag)),
            &if_twobyte, &if_onebyte);
  __ Bind(&if_onebyte);
3732
  __ Goto(&done, __ HeapConstant(factory()->cons_one_byte_string_map()));
3733
  __ Bind(&if_twobyte);
3734
  __ Goto(&done, __ HeapConstant(factory()->cons_string_map()));
3735 3736
  __ Bind(&done);
  Node* result_map = done.PhiAt(0);
3737 3738

  // Allocate the resulting ConsString.
3739
  Node* result =
3740
      __ Allocate(AllocationType::kYoung, __ IntPtrConstant(ConsString::kSize));
3741
  __ StoreField(AccessBuilder::ForMap(), result, result_map);
3742
  __ StoreField(AccessBuilder::ForNameRawHashField(), result,
3743
                __ Int32Constant(Name::kEmptyHashField));
3744 3745 3746 3747 3748 3749
  __ StoreField(AccessBuilder::ForStringLength(), result, length);
  __ StoreField(AccessBuilder::ForConsStringFirst(), result, first);
  __ StoreField(AccessBuilder::ForConsStringSecond(), result, second);
  return result;
}

3750 3751 3752 3753 3754
Node* EffectControlLinearizer::LowerSameValue(Node* node) {
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);

  Callable const callable =
3755
      Builtins::CallableFor(isolate(), Builtin::kSameValue);
3756 3757
  Operator::Properties properties = Operator::kEliminatable;
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
3758
  auto call_descriptor = Linkage::GetStubCallDescriptor(
3759 3760
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
3761
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
3762 3763 3764
                 __ NoContextConstant());
}

3765 3766 3767 3768 3769
Node* EffectControlLinearizer::LowerSameValueNumbersOnly(Node* node) {
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);

  Callable const callable =
3770
      Builtins::CallableFor(isolate(), Builtin::kSameValueNumbersOnly);
3771 3772 3773 3774 3775 3776 3777 3778 3779
  Operator::Properties properties = Operator::kEliminatable;
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
  auto call_descriptor = Linkage::GetStubCallDescriptor(
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
                 __ NoContextConstant());
}

3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804
Node* EffectControlLinearizer::LowerNumberSameValue(Node* node) {
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);

  auto is_float64_equal = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kBit);

  __ GotoIf(__ Float64Equal(lhs, rhs), &is_float64_equal);

  // Return true iff both {lhs} and {rhs} are NaN.
  __ GotoIf(__ Float64Equal(lhs, lhs), &done, __ Int32Constant(0));
  __ GotoIf(__ Float64Equal(rhs, rhs), &done, __ Int32Constant(0));
  __ Goto(&done, __ Int32Constant(1));

  __ Bind(&is_float64_equal);
  // Even if the values are float64-equal, we still need to distinguish
  // zero and minus zero.
  Node* lhs_hi = __ Float64ExtractHighWord32(lhs);
  Node* rhs_hi = __ Float64ExtractHighWord32(rhs);
  __ Goto(&done, __ Word32Equal(lhs_hi, rhs_hi));

  __ Bind(&done);
  return done.PhiAt(0);
}

3805 3806 3807
Node* EffectControlLinearizer::LowerDeadValue(Node* node) {
  Node* input = NodeProperties::GetValueInput(node, 0);
  if (input->opcode() != IrOpcode::kUnreachable) {
3808 3809 3810 3811 3812
    // There is no fundamental reason not to connect to end here, except it
    // integrates into the way the graph is constructed in a simpler way at
    // this point.
    // TODO(jgruber): Connect to end here as well.
    Node* unreachable = __ UnreachableWithoutConnectToEnd();
3813 3814
    NodeProperties::ReplaceValueInput(node, unreachable, 0);
  }
3815
  return gasm()->AddNode(node);
3816 3817
}

3818 3819 3820 3821
Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
  Node* string = node->InputAt(0);

  Callable const callable =
3822
      Builtins::CallableFor(isolate(), Builtin::kStringToNumber);
3823 3824
  Operator::Properties properties = Operator::kEliminatable;
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
3825
  auto call_descriptor = Linkage::GetStubCallDescriptor(
3826 3827
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
3828
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
3829 3830 3831
                 __ NoContextConstant());
}

3832 3833
Node* EffectControlLinearizer::StringCharCodeAt(Node* receiver,
                                                Node* position) {
3834 3835 3836
  // We need a loop here to properly deal with indirect strings
  // (SlicedString, ConsString and ThinString).
  auto loop = __ MakeLoopLabel(MachineRepresentation::kTagged,
3837
                               MachineType::PointerRepresentation());
3838
  auto loop_next = __ MakeLabel(MachineRepresentation::kTagged,
3839
                                MachineType::PointerRepresentation());
3840 3841 3842 3843
  auto loop_done = __ MakeLabel(MachineRepresentation::kWord32);
  __ Goto(&loop, receiver, position);
  __ Bind(&loop);
  {
3844 3845
    receiver = loop.PhiAt(0);
    position = loop.PhiAt(1);
3846 3847 3848 3849 3850 3851 3852
    Node* receiver_map = __ LoadField(AccessBuilder::ForMap(), receiver);
    Node* receiver_instance_type =
        __ LoadField(AccessBuilder::ForMapInstanceType(), receiver_map);
    Node* receiver_representation = __ Word32And(
        receiver_instance_type, __ Int32Constant(kStringRepresentationMask));

    // Dispatch on the current {receiver}s string representation.
3853 3854
    auto if_lessthanoreq_cons = __ MakeLabel();
    auto if_greaterthan_cons = __ MakeLabel();
3855 3856 3857 3858 3859 3860
    auto if_seqstring = __ MakeLabel();
    auto if_consstring = __ MakeLabel();
    auto if_thinstring = __ MakeLabel();
    auto if_externalstring = __ MakeLabel();
    auto if_slicedstring = __ MakeLabel();
    auto if_runtime = __ MakeDeferredLabel();
3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884

    __ Branch(__ Int32LessThanOrEqual(receiver_representation,
                                      __ Int32Constant(kConsStringTag)),
              &if_lessthanoreq_cons, &if_greaterthan_cons);

    __ Bind(&if_lessthanoreq_cons);
    {
      __ Branch(__ Word32Equal(receiver_representation,
                               __ Int32Constant(kConsStringTag)),
                &if_consstring, &if_seqstring);
    }

    __ Bind(&if_greaterthan_cons);
    {
      __ GotoIf(__ Word32Equal(receiver_representation,
                               __ Int32Constant(kThinStringTag)),
                &if_thinstring);
      __ GotoIf(__ Word32Equal(receiver_representation,
                               __ Int32Constant(kExternalStringTag)),
                &if_externalstring);
      __ Branch(__ Word32Equal(receiver_representation,
                               __ Int32Constant(kSlicedStringTag)),
                &if_slicedstring, &if_runtime);
    }
3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900

    __ Bind(&if_seqstring);
    {
      Node* receiver_is_onebyte = __ Word32Equal(
          __ Word32Equal(__ Word32And(receiver_instance_type,
                                      __ Int32Constant(kStringEncodingMask)),
                         __ Int32Constant(kTwoByteStringTag)),
          __ Int32Constant(0));
      Node* result = LoadFromSeqString(receiver, position, receiver_is_onebyte);
      __ Goto(&loop_done, result);
    }

    __ Bind(&if_consstring);
    {
      Node* receiver_second =
          __ LoadField(AccessBuilder::ForConsStringSecond(), receiver);
3901
      __ GotoIfNot(__ TaggedEqual(receiver_second, __ EmptyStringConstant()),
3902 3903 3904 3905 3906 3907
                   &if_runtime);
      Node* receiver_first =
          __ LoadField(AccessBuilder::ForConsStringFirst(), receiver);
      __ Goto(&loop_next, receiver_first, position);
    }

3908 3909 3910 3911 3912 3913 3914
    __ Bind(&if_thinstring);
    {
      Node* receiver_actual =
          __ LoadField(AccessBuilder::ForThinStringActual(), receiver);
      __ Goto(&loop_next, receiver_actual, position);
    }

3915 3916
    __ Bind(&if_externalstring);
    {
3917
      // We need to bailout to the runtime for uncached external strings.
3918 3919
      __ GotoIf(__ Word32Equal(
                    __ Word32And(receiver_instance_type,
3920 3921
                                 __ Int32Constant(kUncachedExternalStringMask)),
                    __ Int32Constant(kUncachedExternalStringTag)),
3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936
                &if_runtime);

      Node* receiver_data = __ LoadField(
          AccessBuilder::ForExternalStringResourceData(), receiver);

      auto if_onebyte = __ MakeLabel();
      auto if_twobyte = __ MakeLabel();
      __ Branch(
          __ Word32Equal(__ Word32And(receiver_instance_type,
                                      __ Int32Constant(kStringEncodingMask)),
                         __ Int32Constant(kTwoByteStringTag)),
          &if_twobyte, &if_onebyte);

      __ Bind(&if_onebyte);
      {
3937
        Node* result = __ Load(MachineType::Uint8(), receiver_data, position);
3938 3939 3940 3941 3942
        __ Goto(&loop_done, result);
      }

      __ Bind(&if_twobyte);
      {
3943 3944
        Node* result = __ Load(MachineType::Uint16(), receiver_data,
                               __ WordShl(position, __ IntPtrConstant(1)));
3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955
        __ Goto(&loop_done, result);
      }
    }

    __ Bind(&if_slicedstring);
    {
      Node* receiver_offset =
          __ LoadField(AccessBuilder::ForSlicedStringOffset(), receiver);
      Node* receiver_parent =
          __ LoadField(AccessBuilder::ForSlicedStringParent(), receiver);
      __ Goto(&loop_next, receiver_parent,
3956
              __ IntAdd(position, ChangeSmiToIntPtr(receiver_offset)));
3957 3958 3959 3960 3961 3962
    }

    __ Bind(&if_runtime);
    {
      Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
      Runtime::FunctionId id = Runtime::kStringCharCodeAt;
3963
      auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
3964
          graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
3965
      Node* result = __ Call(call_descriptor, __ CEntryStubConstant(1),
3966
                             receiver, ChangeIntPtrToSmi(position),
3967 3968
                             __ ExternalConstant(ExternalReference::Create(id)),
                             __ Int32Constant(2), __ NoContextConstant());
3969 3970 3971 3972 3973 3974 3975 3976
      __ Goto(&loop_done, ChangeSmiToInt32(result));
    }

    __ Bind(&loop_next);
    __ Goto(&loop, loop_next.PhiAt(0), loop_next.PhiAt(1));
  }
  __ Bind(&loop_done);
  return loop_done.PhiAt(0);
3977 3978
}

3979 3980 3981 3982 3983 3984
Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
  Node* receiver = node->InputAt(0);
  Node* position = node->InputAt(1);
  return StringCharCodeAt(receiver, position);
}

3985
Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
3986 3987 3988
  Node* receiver = node->InputAt(0);
  Node* position = node->InputAt(1);

3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013
  auto return_result = __ MakeLabel(MachineRepresentation::kWord32);
  Node* first_code_unit = StringCharCodeAt(receiver, position);

  __ GotoIfNot(
      __ Word32Equal(__ Word32And(first_code_unit, __ Int32Constant(0xFC00)),
                     __ Int32Constant(0xD800)),
      &return_result, BranchHint::kFalse, first_code_unit);

  auto length = __ LoadField(AccessBuilder::ForStringLength(), receiver);
  auto next_index = __ IntAdd(position, __ IntPtrConstant(1));
  __ GotoIfNot(__ IntLessThan(next_index, length), &return_result,
               first_code_unit);
  Node* second_code_unit = StringCharCodeAt(receiver, next_index);
  __ GotoIfNot(
      __ Word32Equal(__ Word32And(second_code_unit, __ Int32Constant(0xFC00)),
                     __ Int32Constant(0xDC00)),
      &return_result, first_code_unit);

  auto surrogate_offset = __ Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
  auto result = __ Int32Add(__ Word32Shl(first_code_unit, __ Int32Constant(10)),
                            __ Int32Add(second_code_unit, surrogate_offset));
  __ Goto(&return_result, result);

  __ Bind(&return_result);
  return return_result.PhiAt(0);
4014 4015
}

4016 4017
Node* EffectControlLinearizer::LoadFromSeqString(Node* receiver, Node* position,
                                                 Node* is_one_byte) {
4018 4019
  auto one_byte_load = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord32);
4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033
  __ GotoIf(is_one_byte, &one_byte_load);
  Node* two_byte_result = __ LoadElement(
      AccessBuilder::ForSeqTwoByteStringCharacter(), receiver, position);
  __ Goto(&done, two_byte_result);

  __ Bind(&one_byte_load);
  Node* one_byte_element = __ LoadElement(
      AccessBuilder::ForSeqOneByteStringCharacter(), receiver, position);
  __ Goto(&done, one_byte_element);

  __ Bind(&done);
  return done.PhiAt(0);
}

4034
Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) {
4035
  Node* value = node->InputAt(0);
4036
  Node* code = __ Word32And(value, __ Uint32Constant(0xFFFF));
4037

4038
  auto if_not_one_byte = __ MakeDeferredLabel();
4039
  auto cache_miss = __ MakeDeferredLabel();
4040
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
4041

4042 4043 4044 4045 4046
  // Check if the {code} is a one byte character
  Node* check1 = __ Uint32LessThanOrEqual(
      code, __ Uint32Constant(String::kMaxOneByteCharCode));
  __ GotoIfNot(check1, &if_not_one_byte);
  {
4047 4048
    // Load the isolate wide single character string cache.
    Node* cache = __ HeapConstant(factory()->single_character_string_cache());
4049

4050
    // Compute the {cache} index for {code}.
4051
    Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
4052

4053 4054
    // Check if we have an entry for the {code} in the single character string
    // cache already.
4055
    Node* entry =
4056
        __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
4057

4058 4059 4060 4061
    Node* check2 = __ TaggedEqual(entry, __ UndefinedConstant());
    __ GotoIf(check2, &cache_miss);

    // Use the {entry} from the {cache}.
4062
    __ Goto(&done, entry);
4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086

    __ Bind(&cache_miss);
    {
      // Allocate a new SeqOneByteString for {code}.
      Node* vtrue2 =
          __ Allocate(AllocationType::kYoung,
                      __ IntPtrConstant(SeqOneByteString::SizeFor(1)));
      __ StoreField(AccessBuilder::ForMap(), vtrue2,
                    __ HeapConstant(factory()->one_byte_string_map()));
      __ StoreField(AccessBuilder::ForNameRawHashField(), vtrue2,
                    __ Int32Constant(Name::kEmptyHashField));
      __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
                    __ Int32Constant(1));
      __ Store(
          StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
          vtrue2,
          __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
          code);

      // Remember it in the {cache}.
      __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
                      vtrue2);
      __ Goto(&done, vtrue2);
    }
4087
  }
4088

4089
  __ Bind(&if_not_one_byte);
4090
  {
4091
    // Allocate a new SeqTwoByteString for {code}.
4092 4093 4094
    Node* vfalse1 =
        __ Allocate(AllocationType::kYoung,
                    __ IntPtrConstant(SeqTwoByteString::SizeFor(1)));
4095 4096
    __ StoreField(AccessBuilder::ForMap(), vfalse1,
                  __ HeapConstant(factory()->string_map()));
4097
    __ StoreField(AccessBuilder::ForNameRawHashField(), vfalse1,
4098 4099 4100
                  __ Int32Constant(Name::kEmptyHashField));
    __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
                  __ Int32Constant(1));
4101 4102 4103 4104 4105 4106
    __ Store(
        StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
        vfalse1,
        __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
        code);
    __ Goto(&done, vfalse1);
4107
  }
4108

4109 4110
  __ Bind(&done);
  return done.PhiAt(0);
4111 4112
}

4113 4114 4115 4116 4117 4118
#ifdef V8_INTL_SUPPORT

Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
  Node* receiver = node->InputAt(0);

  Callable callable =
4119
      Builtins::CallableFor(isolate(), Builtin::kStringToLowerCaseIntl);
4120 4121
  Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4122
  auto call_descriptor = Linkage::GetStubCallDescriptor(
4123 4124
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
4125
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
4126 4127 4128 4129 4130 4131 4132
                 __ NoContextConstant());
}

Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
  Node* receiver = node->InputAt(0);
  Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
  Runtime::FunctionId id = Runtime::kStringToUpperCaseIntl;
4133
  auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
4134
      graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
4135
  return __ Call(call_descriptor, __ CEntryStubConstant(1), receiver,
4136
                 __ ExternalConstant(ExternalReference::Create(id)),
4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151
                 __ Int32Constant(1), __ NoContextConstant());
}

#else

Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
  UNREACHABLE();
}

Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
  UNREACHABLE();
}

#endif  // V8_INTL_SUPPORT

4152
Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
4153 4154 4155
  Node* value = node->InputAt(0);
  Node* code = value;

4156 4157
  auto if_not_single_code = __ MakeDeferredLabel();
  auto if_not_one_byte = __ MakeDeferredLabel();
4158
  auto cache_miss = __ MakeDeferredLabel();
4159
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
4160 4161

  // Check if the {code} is a single code unit
4162
  Node* check0 = __ Uint32LessThanOrEqual(code, __ Uint32Constant(0xFFFF));
4163
  __ GotoIfNot(check0, &if_not_single_code);
4164 4165 4166

  {
    // Check if the {code} is a one byte character
4167 4168
    Node* check1 = __ Uint32LessThanOrEqual(
        code, __ Uint32Constant(String::kMaxOneByteCharCode));
4169
    __ GotoIfNot(check1, &if_not_one_byte);
4170
    {
4171 4172
      // Load the isolate wide single character string cache.
      Node* cache = __ HeapConstant(factory()->single_character_string_cache());
4173

4174
      // Compute the {cache} index for {code}.
4175
      Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
4176

4177 4178
      // Check if we have an entry for the {code} in the single character string
      // cache already.
4179
      Node* entry =
4180
          __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
4181

4182 4183 4184 4185
      Node* check2 = __ TaggedEqual(entry, __ UndefinedConstant());
      __ GotoIf(check2, &cache_miss);

      // Use the {entry} from the {cache}.
4186
      __ Goto(&done, entry);
4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210

      __ Bind(&cache_miss);
      {
        // Allocate a new SeqOneByteString for {code}.
        Node* vtrue2 =
            __ Allocate(AllocationType::kYoung,
                        __ IntPtrConstant(SeqOneByteString::SizeFor(1)));
        __ StoreField(AccessBuilder::ForMap(), vtrue2,
                      __ HeapConstant(factory()->one_byte_string_map()));
        __ StoreField(AccessBuilder::ForNameRawHashField(), vtrue2,
                      __ Int32Constant(Name::kEmptyHashField));
        __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
                      __ Int32Constant(1));
        __ Store(
            StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
            vtrue2,
            __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
            code);

        // Remember it in the {cache}.
        __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
                        vtrue2);
        __ Goto(&done, vtrue2);
      }
4211 4212
    }

4213
    __ Bind(&if_not_one_byte);
4214 4215
    {
      // Allocate a new SeqTwoByteString for {code}.
4216 4217
      Node* vfalse1 =
          __ Allocate(AllocationType::kYoung,
4218
                      __ IntPtrConstant(SeqTwoByteString::SizeFor(1)));
4219 4220
      __ StoreField(AccessBuilder::ForMap(), vfalse1,
                    __ HeapConstant(factory()->string_map()));
4221
      __ StoreField(AccessBuilder::ForNameRawHashField(), vfalse1,
4222 4223
                    __ IntPtrConstant(Name::kEmptyHashField));
      __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
4224
                    __ Int32Constant(1));
4225 4226 4227 4228 4229 4230
      __ Store(
          StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
          vfalse1,
          __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
          code);
      __ Goto(&done, vfalse1);
4231 4232 4233
    }
  }

4234
  __ Bind(&if_not_single_code);
4235 4236
  // Generate surrogate pair string
  {
4237 4238
    // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
    Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
4239

4240 4241 4242
    // lead = (codepoint >> 10) + LEAD_OFFSET
    Node* lead =
        __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
4243

4244 4245 4246
    // trail = (codepoint & 0x3FF) + 0xDC00;
    Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
                              __ Int32Constant(0xDC00));
4247

4248
    // codpoint = (trail << 16) | lead;
4249
#if V8_TARGET_BIG_ENDIAN
4250
    code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail);
4251
#else
4252
    code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
4253
#endif
4254 4255

    // Allocate a new SeqTwoByteString for {code}.
4256 4257 4258
    Node* vfalse0 =
        __ Allocate(AllocationType::kYoung,
                    __ IntPtrConstant(SeqTwoByteString::SizeFor(2)));
4259 4260
    __ StoreField(AccessBuilder::ForMap(), vfalse0,
                  __ HeapConstant(factory()->string_map()));
4261
    __ StoreField(AccessBuilder::ForNameRawHashField(), vfalse0,
4262 4263 4264
                  __ Int32Constant(Name::kEmptyHashField));
    __ StoreField(AccessBuilder::ForStringLength(), vfalse0,
                  __ Int32Constant(2));
4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276
    __ Store(
        StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
        vfalse0,
        __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
        code);
    __ Goto(&done, vfalse0);
  }

  __ Bind(&done);
  return done.PhiAt(0);
}

4277 4278 4279 4280 4281
Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
  Node* subject = node->InputAt(0);
  Node* search_string = node->InputAt(1);
  Node* position = node->InputAt(2);

4282
  Callable callable = Builtins::CallableFor(isolate(), Builtin::kStringIndexOf);
4283 4284
  Operator::Properties properties = Operator::kEliminatable;
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4285
  auto call_descriptor = Linkage::GetStubCallDescriptor(
4286 4287
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
4288 4289
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), subject,
                 search_string, position, __ NoContextConstant());
4290 4291
}

4292 4293 4294 4295 4296
Node* EffectControlLinearizer::LowerStringFromCodePointAt(Node* node) {
  Node* string = node->InputAt(0);
  Node* index = node->InputAt(1);

  Callable callable =
4297
      Builtins::CallableFor(isolate(), Builtin::kStringFromCodePointAt);
4298 4299 4300 4301 4302 4303 4304 4305 4306
  Operator::Properties properties = Operator::kEliminatable;
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
  auto call_descriptor = Linkage::GetStubCallDescriptor(
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
                 index, __ NoContextConstant());
}

4307 4308 4309 4310 4311 4312
Node* EffectControlLinearizer::LowerStringLength(Node* node) {
  Node* subject = node->InputAt(0);

  return __ LoadField(AccessBuilder::ForStringLength(), subject);
}

4313 4314 4315 4316
Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
                                                     Node* node) {
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);
4317

4318 4319
  Operator::Properties properties = Operator::kEliminatable;
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4320
  auto call_descriptor = Linkage::GetStubCallDescriptor(
4321 4322
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
4323
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
4324
                 __ NoContextConstant());
4325 4326
}

4327 4328
Node* EffectControlLinearizer::LowerStringSubstring(Node* node) {
  Node* receiver = node->InputAt(0);
4329 4330
  Node* start = ChangeInt32ToIntPtr(node->InputAt(1));
  Node* end = ChangeInt32ToIntPtr(node->InputAt(2));
4331 4332

  Callable callable =
4333
      Builtins::CallableFor(isolate(), Builtin::kStringSubstring);
4334 4335 4336
  Operator::Properties properties = Operator::kEliminatable;
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
  auto call_descriptor = Linkage::GetStubCallDescriptor(
4337 4338
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
4339 4340 4341 4342
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
                 start, end, __ NoContextConstant());
}

4343
Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
4344
  return LowerStringComparison(
4345
      Builtins::CallableFor(isolate(), Builtin::kStringEqual), node);
4346 4347
}

4348
Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
4349
  return LowerStringComparison(
4350
      Builtins::CallableFor(isolate(), Builtin::kStringLessThan), node);
4351 4352
}

4353
Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
4354
  return LowerStringComparison(
4355
      Builtins::CallableFor(isolate(), Builtin::kStringLessThanOrEqual), node);
4356 4357
}

4358
Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
4359 4360 4361 4362
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);

  Callable const callable =
4363
      Builtins::CallableFor(isolate(), Builtin::kBigIntAddNoThrow);
4364 4365 4366 4367
  auto call_descriptor = Linkage::GetStubCallDescriptor(
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
      Operator::kFoldable | Operator::kNoThrow);
4368 4369
  Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
                        rhs, __ NoContextConstant());
4370

4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383
  // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
  __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
                  ObjectIsSmi(value), frame_state);

  return value;
}

Node* EffectControlLinearizer::LowerBigIntSubtract(Node* node,
                                                   Node* frame_state) {
  Node* lhs = node->InputAt(0);
  Node* rhs = node->InputAt(1);

  Callable const callable =
4384
      Builtins::CallableFor(isolate(), Builtin::kBigIntSubtractNoThrow);
4385 4386 4387 4388 4389 4390 4391
  auto call_descriptor = Linkage::GetStubCallDescriptor(
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
      Operator::kFoldable | Operator::kNoThrow);
  Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs,
                        rhs, __ NoContextConstant());

4392
  // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
4393
  __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{},
4394 4395 4396 4397 4398
                  ObjectIsSmi(value), frame_state);

  return value;
}

4399
Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) {
4400
  Callable const callable =
4401
      Builtins::CallableFor(isolate(), Builtin::kBigIntUnaryMinus);
4402 4403 4404 4405
  auto call_descriptor = Linkage::GetStubCallDescriptor(
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
      Operator::kFoldable | Operator::kNoThrow);
4406 4407
  Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()),
                        node->InputAt(0), __ NoContextConstant());
4408 4409 4410 4411

  return value;
}

4412 4413
Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
                                                     Node* frame_state) {
4414 4415
  // If we reach this point w/o eliminating the {node} that's marked
  // with allow-return-hole, we cannot do anything, so just deoptimize
4416
  // in case of the hole NaN.
4417 4418
  CheckFloat64HoleParameters const& params =
      CheckFloat64HoleParametersOf(node->op());
4419
  Node* value = node->InputAt(0);
4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438

  auto if_nan = __ MakeDeferredLabel();
  auto done = __ MakeLabel();

  // First check whether {value} is a NaN at all...
  __ Branch(__ Float64Equal(value, value), &done, &if_nan);

  __ Bind(&if_nan);
  {
    // ...and only if {value} is a NaN, perform the expensive bit
    // check. See http://crbug.com/v8/8264 for details.
    Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
                                 __ Int32Constant(kHoleNanUpper32));
    __ DeoptimizeIf(DeoptimizeReason::kHole, params.feedback(), check,
                    frame_state);
    __ Goto(&done);
  }

  __ Bind(&done);
4439
  return value;
4440 4441
}

4442 4443 4444
Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
                                                       Node* frame_state) {
  Node* value = node->InputAt(0);
4445
  Node* check = __ TaggedEqual(value, __ TheHoleConstant());
4446
  __ DeoptimizeIf(DeoptimizeReason::kHole, FeedbackSource(), check,
4447
                  frame_state);
4448
  return value;
4449 4450
}

4451
Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) {
4452 4453
  Node* value = node->InputAt(0);

4454 4455
  auto if_is_hole = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
4456

4457
  Node* check = __ TaggedEqual(value, __ TheHoleConstant());
4458 4459
  __ GotoIf(check, &if_is_hole);
  __ Goto(&done, value);
4460

4461 4462
  __ Bind(&if_is_hole);
  __ Goto(&done, __ UndefinedConstant());
4463

4464 4465
  __ Bind(&done);
  return done.PhiAt(0);
4466 4467
}

4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478
void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
    Node* node, Node* frame_state) {
  Node* exp = node->InputAt(0);
  Node* val = node->InputAt(1);

  auto if_same = __ MakeLabel();
  auto if_notsame = __ MakeDeferredLabel();
  auto if_thinstring = __ MakeLabel();
  auto if_notthinstring = __ MakeLabel();

  // Check if {exp} and {val} are the same, which is the likely case.
4479
  __ Branch(__ TaggedEqual(exp, val), &if_same, &if_notsame);
4480 4481 4482 4483

  __ Bind(&if_notsame);
  {
    // Now {val} could still be a non-internalized String that matches {exp}.
4484
    __ DeoptimizeIf(DeoptimizeReason::kWrongName, FeedbackSource(),
4485
                    ObjectIsSmi(val), frame_state);
4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502
    Node* val_map = __ LoadField(AccessBuilder::ForMap(), val);
    Node* val_instance_type =
        __ LoadField(AccessBuilder::ForMapInstanceType(), val_map);

    // Check for the common case of ThinString first.
    __ GotoIf(__ Word32Equal(val_instance_type,
                             __ Int32Constant(THIN_ONE_BYTE_STRING_TYPE)),
              &if_thinstring);
    __ Branch(
        __ Word32Equal(val_instance_type, __ Int32Constant(THIN_STRING_TYPE)),
        &if_thinstring, &if_notthinstring);

    __ Bind(&if_notthinstring);
    {
      // Check that the {val} is a non-internalized String, if it's anything
      // else it cannot match the recorded feedback {exp} anyways.
      __ DeoptimizeIfNot(
4503
          DeoptimizeReason::kWrongName, FeedbackSource(),
4504 4505 4506 4507 4508 4509 4510
          __ Word32Equal(__ Word32And(val_instance_type,
                                      __ Int32Constant(kIsNotStringMask |
                                                       kIsNotInternalizedMask)),
                         __ Int32Constant(kStringTag | kNotInternalizedTag)),
          frame_state);

      // Try to find the {val} in the string table.
4511
      MachineSignature::Builder builder(graph()->zone(), 1, 2);
4512
      builder.AddReturn(MachineType::AnyTagged());
4513
      builder.AddParam(MachineType::Pointer());
4514
      builder.AddParam(MachineType::AnyTagged());
4515 4516
      Node* try_string_to_index_or_lookup_existing = __ ExternalConstant(
          ExternalReference::try_string_to_index_or_lookup_existing());
4517 4518
      Node* const isolate_ptr =
          __ ExternalConstant(ExternalReference::isolate_address(isolate()));
4519
      auto call_descriptor =
4520
          Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
4521 4522
      Node* val_internalized =
          __ Call(common()->Call(call_descriptor),
4523
                  try_string_to_index_or_lookup_existing, isolate_ptr, val);
4524 4525

      // Now see if the results match.
4526
      __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(),
4527
                         __ TaggedEqual(exp, val_internalized), frame_state);
4528 4529 4530 4531 4532 4533 4534 4535
      __ Goto(&if_same);
    }

    __ Bind(&if_thinstring);
    {
      // The {val} is a ThinString, let's check the actual value.
      Node* val_actual =
          __ LoadField(AccessBuilder::ForThinStringActual(), val);
4536
      __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(),
4537
                         __ TaggedEqual(exp, val_actual), frame_state);
4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548
      __ Goto(&if_same);
    }
  }

  __ Bind(&if_same);
}

void EffectControlLinearizer::LowerCheckEqualsSymbol(Node* node,
                                                     Node* frame_state) {
  Node* exp = node->InputAt(0);
  Node* val = node->InputAt(1);
4549
  Node* check = __ TaggedEqual(exp, val);
4550
  __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, FeedbackSource(), check,
4551
                     frame_state);
4552 4553
}

4554
Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
4555
  Node* result =
4556
      __ Allocate(AllocationType::kYoung, __ IntPtrConstant(HeapNumber::kSize));
4557 4558 4559
  __ StoreField(AccessBuilder::ForMap(), result, __ HeapNumberMapConstant());
  __ StoreField(AccessBuilder::ForHeapNumberValue(), result, value);
  return result;
4560 4561
}

4562
Node* EffectControlLinearizer::ChangeIntPtrToSmi(Node* value) {
4563 4564
  // Do shift on 32bit values if Smis are stored in the lower word.
  if (machine()->Is64() && SmiValuesAre31Bits()) {
4565
    return ChangeTaggedInt32ToSmi(__ Word32Shl(value, SmiShiftBitsConstant()));
4566
  }
4567
  return __ WordShl(value, SmiShiftBitsConstant());
4568 4569
}

4570 4571 4572 4573 4574 4575 4576 4577
Node* EffectControlLinearizer::ChangeTaggedInt32ToSmi(Node* value) {
  DCHECK(SmiValuesAre31Bits());
  // In pointer compression, we smi-corrupt. Then, the upper bits are not
  // important.
  return COMPRESS_POINTERS_BOOL ? __ BitcastWord32ToWord64(value)
                                : ChangeInt32ToIntPtr(value);
}

4578
Node* EffectControlLinearizer::ChangeInt32ToIntPtr(Node* value) {
4579
  if (machine()->Is64()) {
4580
    value = __ ChangeInt32ToInt64(value);
4581
  }
4582
  return value;
4583 4584
}

4585 4586 4587 4588 4589 4590 4591
Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
  if (machine()->Is64()) {
    value = __ TruncateInt64ToInt32(value);
  }
  return value;
}

4592
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
4593 4594
  // Do shift on 32bit values if Smis are stored in the lower word.
  if (machine()->Is64() && SmiValuesAre31Bits()) {
4595
    return ChangeIntPtrToSmi(value);
4596
  }
4597 4598 4599
  return ChangeIntPtrToSmi(ChangeInt32ToIntPtr(value));
}

4600 4601 4602 4603 4604
Node* EffectControlLinearizer::ChangeInt64ToSmi(Node* value) {
  DCHECK(machine()->Is64());
  return ChangeIntPtrToSmi(value);
}

4605
Node* EffectControlLinearizer::ChangeUint32ToUintPtr(Node* value) {
4606
  if (machine()->Is64()) {
4607
    value = __ ChangeUint32ToUint64(value);
4608
  }
4609 4610 4611 4612
  return value;
}

Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
4613 4614
  // Do shift on 32bit values if Smis are stored in the lower word.
  if (machine()->Is64() && SmiValuesAre31Bits()) {
4615 4616 4617 4618 4619
    Node* smi_value = __ Word32Shl(value, SmiShiftBitsConstant());
    // In pointer compression, we smi-corrupt. Then, the upper bits are not
    // important.
    return COMPRESS_POINTERS_BOOL ? __ BitcastWord32ToWord64(smi_value)
                                  : __ ChangeUint32ToUint64(smi_value);
4620 4621 4622
  } else {
    return __ WordShl(ChangeUint32ToUintPtr(value), SmiShiftBitsConstant());
  }
4623 4624
}

4625
Node* EffectControlLinearizer::ChangeSmiToIntPtr(Node* value) {
4626
  if (machine()->Is64() && SmiValuesAre31Bits()) {
4627 4628 4629 4630
    // First sign-extend the upper half, then shift away the Smi tag.
    return __ WordSarShiftOutZeros(
        __ ChangeInt32ToInt64(__ TruncateInt64ToInt32(value)),
        SmiShiftBitsConstant());
4631
  }
4632
  return __ WordSarShiftOutZeros(value, SmiShiftBitsConstant());
4633 4634
}

4635
Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
4636 4637
  // Do shift on 32bit values if Smis are stored in the lower word.
  if (machine()->Is64() && SmiValuesAre31Bits()) {
4638 4639
    return __ Word32SarShiftOutZeros(__ TruncateInt64ToInt32(value),
                                     SmiShiftBitsConstant());
4640
  }
4641
  if (machine()->Is64()) {
4642
    return __ TruncateInt64ToInt32(ChangeSmiToIntPtr(value));
4643
  }
4644
  return ChangeSmiToIntPtr(value);
4645
}
4646

4647 4648 4649 4650 4651
Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) {
  CHECK(machine()->Is64());
  return ChangeSmiToIntPtr(value);
}

4652
Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
4653 4654 4655 4656
  return __ Word32Equal(__ Word32And(value, __ Int32Constant(kSmiTagMask)),
                        __ Int32Constant(kSmiTag));
}

4657
Node* EffectControlLinearizer::SmiMaxValueConstant() {
4658
  return __ Int32Constant(Smi::kMaxValue);
4659 4660 4661
}

Node* EffectControlLinearizer::SmiShiftBitsConstant() {
4662 4663 4664
  if (machine()->Is64() && SmiValuesAre31Bits()) {
    return __ Int32Constant(kSmiShiftSize + kSmiTagSize);
  }
4665
  return __ IntPtrConstant(kSmiShiftSize + kSmiTagSize);
4666 4667
}

4668
Node* EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node) {
4669
  Node* value = node->InputAt(0);
4670
  return __ PlainPrimitiveToNumber(TNode<Object>::UncheckedCast(value));
4671 4672
}

4673
Node* EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node) {
4674 4675
  Node* value = node->InputAt(0);

4676 4677 4678
  auto if_not_smi = __ MakeDeferredLabel();
  auto if_to_number_smi = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kWord32);
4679

4680
  Node* check0 = ObjectIsSmi(value);
4681
  __ GotoIfNot(check0, &if_not_smi);
4682
  __ Goto(&done, ChangeSmiToInt32(value));
4683

4684
  __ Bind(&if_not_smi);
4685 4686
  Node* to_number =
      __ PlainPrimitiveToNumber(TNode<Object>::UncheckedCast(value));
4687

4688 4689 4690 4691
  Node* check1 = ObjectIsSmi(to_number);
  __ GotoIf(check1, &if_to_number_smi);
  Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
  __ Goto(&done, __ TruncateFloat64ToWord32(number));
4692

4693 4694
  __ Bind(&if_to_number_smi);
  __ Goto(&done, ChangeSmiToInt32(to_number));
4695

4696 4697
  __ Bind(&done);
  return done.PhiAt(0);
4698 4699
}

4700
Node* EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node) {
4701 4702
  Node* value = node->InputAt(0);

4703 4704 4705
  auto if_not_smi = __ MakeDeferredLabel();
  auto if_to_number_smi = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kFloat64);
4706

4707
  Node* check0 = ObjectIsSmi(value);
4708
  __ GotoIfNot(check0, &if_not_smi);
4709 4710
  Node* from_smi = ChangeSmiToInt32(value);
  __ Goto(&done, __ ChangeInt32ToFloat64(from_smi));
4711

4712
  __ Bind(&if_not_smi);
4713 4714
  Node* to_number =
      __ PlainPrimitiveToNumber(TNode<Object>::UncheckedCast(value));
4715 4716
  Node* check1 = ObjectIsSmi(to_number);
  __ GotoIf(check1, &if_to_number_smi);
4717

4718 4719
  Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
  __ Goto(&done, number);
4720

4721 4722 4723 4724
  __ Bind(&if_to_number_smi);
  Node* number_from_smi = ChangeSmiToInt32(to_number);
  number_from_smi = __ ChangeInt32ToFloat64(number_from_smi);
  __ Goto(&done, number_from_smi);
4725

4726 4727
  __ Bind(&done);
  return done.PhiAt(0);
4728 4729
}

4730
Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
4731 4732 4733
  Node* object = node->InputAt(0);
  Node* elements = node->InputAt(1);

4734 4735
  auto if_not_fixed_array = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
4736

4737
  // Load the current map of {elements}.
4738
  Node* elements_map = __ LoadField(AccessBuilder::ForMap(), elements);
4739 4740

  // Check if {elements} is not a copy-on-write FixedArray.
4741
  Node* check = __ TaggedEqual(elements_map, __ FixedArrayMapConstant());
4742
  __ GotoIfNot(check, &if_not_fixed_array);
4743
  // Nothing to do if the {elements} are not copy-on-write.
4744
  __ Goto(&done, elements);
4745

4746
  __ Bind(&if_not_fixed_array);
4747
  // We need to take a copy of the {elements} and set them up for {object}.
4748
  Operator::Properties properties = Operator::kEliminatable;
4749
  Callable callable =
4750
      Builtins::CallableFor(isolate(), Builtin::kCopyFastSmiOrObjectElements);
4751
  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
4752
  auto call_descriptor = Linkage::GetStubCallDescriptor(
4753 4754
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), flags, properties);
4755 4756
  Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
                         object, __ NoContextConstant());
4757
  __ Goto(&done, result);
4758

4759 4760
  __ Bind(&done);
  return done.PhiAt(0);
4761 4762
}

4763 4764
Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
                                                          Node* frame_state) {
4765
  GrowFastElementsParameters params = GrowFastElementsParametersOf(node->op());
4766 4767 4768
  Node* object = node->InputAt(0);
  Node* elements = node->InputAt(1);
  Node* index = node->InputAt(2);
4769
  Node* elements_length = node->InputAt(3);
4770

4771 4772 4773
  auto done = __ MakeLabel(MachineRepresentation::kTagged);
  auto if_grow = __ MakeDeferredLabel();
  auto if_not_grow = __ MakeLabel();
4774

4775 4776 4777 4778
  // Check if we need to grow the {elements} backing store.
  Node* check = __ Uint32LessThan(index, elements_length);
  __ GotoIfNot(check, &if_grow);
  __ Goto(&done, elements);
4779

4780 4781 4782 4783
  __ Bind(&if_grow);
  // We need to grow the {elements} for {object}.
  Operator::Properties properties = Operator::kEliminatable;
  Callable callable =
4784
      (params.mode() == GrowFastElementsMode::kDoubleElements)
4785
          ? Builtins::CallableFor(isolate(), Builtin::kGrowFastDoubleElements)
4786
          : Builtins::CallableFor(isolate(),
4787
                                  Builtin::kGrowFastSmiOrObjectElements);
4788
  CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
4789
  auto call_descriptor = Linkage::GetStubCallDescriptor(
4790 4791
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), call_flags, properties);
4792 4793 4794
  Node* new_elements =
      __ Call(call_descriptor, __ HeapConstant(callable.code()), object,
              ChangeInt32ToSmi(index), __ NoContextConstant());
4795

4796
  // Ensure that we were able to grow the {elements}.
4797
  __ DeoptimizeIf(DeoptimizeReason::kCouldNotGrowElements, params.feedback(),
4798
                  ObjectIsSmi(new_elements), frame_state);
4799
  __ Goto(&done, new_elements);
4800

4801 4802
  __ Bind(&done);
  return done.PhiAt(0);
4803 4804
}

4805
void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
4806 4807
  ElementsTransition const transition = ElementsTransitionOf(node->op());
  Node* object = node->InputAt(0);
4808

4809 4810
  auto if_map_same = __ MakeDeferredLabel();
  auto done = __ MakeLabel();
4811 4812 4813

  Node* source_map = __ HeapConstant(transition.source());
  Node* target_map = __ HeapConstant(transition.target());
4814 4815

  // Load the current map of {object}.
4816
  Node* object_map = __ LoadField(AccessBuilder::ForMap(), object);
4817 4818

  // Check if {object_map} is the same as {source_map}.
4819
  Node* check = __ TaggedEqual(object_map, source_map);
4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832
  __ GotoIf(check, &if_map_same);
  __ Goto(&done);

  __ Bind(&if_map_same);
  switch (transition.mode()) {
    case ElementsTransition::kFastTransition:
      // In-place migration of {object}, just store the {target_map}.
      __ StoreField(AccessBuilder::ForMap(), object, target_map);
      break;
    case ElementsTransition::kSlowTransition: {
      // Instance migration, call out to the runtime for {object}.
      Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
      Runtime::FunctionId id = Runtime::kTransitionElementsKind;
4833
      auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
4834
          graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
4835
      __ Call(call_descriptor, __ CEntryStubConstant(1), object, target_map,
4836
              __ ExternalConstant(ExternalReference::Create(id)),
4837 4838
              __ Int32Constant(2), __ NoContextConstant());
      break;
4839 4840
    }
  }
4841
  __ Goto(&done);
4842

4843
  __ Bind(&done);
4844 4845
}

4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859
Node* EffectControlLinearizer::LowerLoadMessage(Node* node) {
  Node* offset = node->InputAt(0);
  Node* object_pattern =
      __ LoadField(AccessBuilder::ForExternalIntPtr(), offset);
  return __ BitcastWordToTagged(object_pattern);
}

void EffectControlLinearizer::LowerStoreMessage(Node* node) {
  Node* offset = node->InputAt(0);
  Node* object = node->InputAt(1);
  Node* object_pattern = __ BitcastTaggedToWord(object);
  __ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
}

4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935
Node* EffectControlLinearizer::AdaptFastCallTypedArrayArgument(
    Node* node, ElementsKind expected_elements_kind,
    GraphAssemblerLabel<0>* bailout) {
  Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
  Node* value_instance_type =
      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
  Node* value_is_typed_array = __ Word32Equal(
      value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE));
  __ GotoIfNot(value_is_typed_array, bailout);

  Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), value_map);
  Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
  Node* andit = __ Word32And(bit_field2, mask);
  Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
  Node* kind = __ Word32Shr(andit, shift);

  Node* value_is_expected_elements_kind =
      __ Word32Equal(kind, __ Int32Constant(expected_elements_kind));
  __ GotoIfNot(value_is_expected_elements_kind, bailout);

  Node* buffer =
      __ LoadField(AccessBuilder::ForJSArrayBufferViewBuffer(), node);
  Node* buffer_bit_field =
      __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), buffer);

  // Go to the slow path if the {buffer} was detached.
  Node* buffer_is_not_detached = __ Word32Equal(
      __ Word32And(buffer_bit_field,
                   __ Int32Constant(JSArrayBuffer::WasDetachedBit::kMask)),
      __ ZeroConstant());
  __ GotoIfNot(buffer_is_not_detached, bailout);

  // Go to the slow path if the {buffer} is shared.
  Node* buffer_is_not_shared = __ Word32Equal(
      __ Word32And(buffer_bit_field,
                   __ Int32Constant(JSArrayBuffer::IsSharedBit::kMask)),
      __ ZeroConstant());
  __ GotoIfNot(buffer_is_not_shared, bailout);

  // Unpack the store and length, and store them to a struct
  // FastApiTypedArray.
  Node* external_pointer =
      __ LoadField(AccessBuilder::ForJSTypedArrayExternalPointer(), node);

  // Load the base pointer for the buffer. This will always be Smi
  // zero unless we allow on-heap TypedArrays, which is only the case
  // for Chrome. Node and Electron both set this limit to 0. Setting
  // the base to Smi zero here allows the BuildTypedArrayDataPointer
  // to optimize away the tricky part of the access later.
  Node* base_pointer =
      __ LoadField(AccessBuilder::ForJSTypedArrayBasePointer(), node);
  if (JSTypedArray::kMaxSizeInHeap == 0) {
    base_pointer = jsgraph()->ZeroConstant();
  }
  Node* data_ptr = BuildTypedArrayDataPointer(base_pointer, external_pointer);
  Node* length_in_bytes =
      __ LoadField(AccessBuilder::ForJSTypedArrayLength(), node);

  // We hard-code int32_t here, because all specializations of
  // FastApiTypedArray have the same size.
  constexpr int kAlign = alignof(FastApiTypedArray<int32_t>);
  constexpr int kSize = sizeof(FastApiTypedArray<int32_t>);
  static_assert(kAlign == alignof(FastApiTypedArray<double>),
                "Alignment mismatch between different specializations of "
                "FastApiTypedArray");
  static_assert(kSize == sizeof(FastApiTypedArray<double>),
                "Size mismatch between different specializations of "
                "FastApiTypedArray");
  static_assert(
      kSize == sizeof(uintptr_t) + sizeof(size_t),
      "The size of "
      "FastApiTypedArray isn't equal to the sum of its expected members.");
  Node* stack_slot = __ StackSlot(kSize, kAlign);

  __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
                               kNoWriteBarrier),
4936
           stack_slot, 0, length_in_bytes);
4937 4938
  __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
                               kNoWriteBarrier),
4939
           stack_slot, sizeof(size_t), data_ptr);
4940 4941 4942 4943 4944 4945 4946
  static_assert(sizeof(uintptr_t) == sizeof(size_t),
                "The buffer length can't "
                "fit the PointerRepresentation used to store it.");

  return stack_slot;
}

4947 4948
Node* EffectControlLinearizer::AdaptFastCallArgument(
    Node* node, CTypeInfo arg_type, GraphAssemblerLabel<0>* if_error) {
4949 4950
  int kAlign = alignof(uintptr_t);
  int kSize = sizeof(uintptr_t);
4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976
  switch (arg_type.GetSequenceType()) {
    case CTypeInfo::SequenceType::kScalar: {
      switch (arg_type.GetType()) {
        case CTypeInfo::Type::kV8Value: {
          Node* stack_slot = __ StackSlot(kSize, kAlign);
          __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
                                       kNoWriteBarrier),
                   stack_slot, 0, node);

          return stack_slot;
        }
        case CTypeInfo::Type::kFloat32: {
          return __ TruncateFloat64ToFloat32(node);
        }
        default: {
          return node;
        }
      }
    }
    case CTypeInfo::SequenceType::kIsSequence: {
      CHECK_EQ(arg_type.GetType(), CTypeInfo::Type::kVoid);

      // Check that the value is a HeapObject.
      Node* value_is_smi = ObjectIsSmi(node);
      __ GotoIf(value_is_smi, if_error);

4977 4978 4979 4980 4981
      Node* stack_slot = __ StackSlot(kSize, kAlign);
      __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
                                   kNoWriteBarrier),
               stack_slot, 0, node);

4982 4983 4984 4985 4986 4987 4988 4989
      // Check that the value is a JSArray.
      Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
      Node* value_instance_type =
          __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
      Node* value_is_js_array =
          __ Word32Equal(value_instance_type, __ Int32Constant(JS_ARRAY_TYPE));
      __ GotoIfNot(value_is_js_array, if_error);

4990 4991
      return stack_slot;
    }
4992 4993 4994 4995 4996 4997 4998 4999 5000
    case CTypeInfo::SequenceType::kIsTypedArray: {
      // Check that the value is a HeapObject.
      Node* value_is_smi = ObjectIsSmi(node);
      __ GotoIf(value_is_smi, if_error);

      return AdaptFastCallTypedArrayArgument(
          node, fast_api_call::GetTypedArrayElementsKind(arg_type.GetType()),
          if_error);
    }
5001
    default: {
5002
      UNREACHABLE();
5003 5004 5005
    }
  }
}
5006

5007 5008 5009 5010 5011 5012 5013
EffectControlLinearizer::AdaptOverloadedFastCallResult
EffectControlLinearizer::AdaptOverloadedFastCallArgument(
    Node* node, const FastApiCallFunctionVector& c_functions,
    const fast_api_call::OverloadsResolutionResult& overloads_resolution_result,
    GraphAssemblerLabel<0>* if_error) {
  static constexpr int kReceiver = 1;

5014 5015
  auto merge = __ MakeLabel(MachineRepresentation::kTagged,
                            MachineRepresentation::kTagged);
5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027

  for (size_t func_index = 0; func_index < c_functions.size(); func_index++) {
    const CFunctionInfo* c_signature = c_functions[func_index].signature;
    CTypeInfo arg_type = c_signature->ArgumentInfo(
        overloads_resolution_result.distinguishable_arg_index + kReceiver);

    auto next = __ MakeLabel();

    // Check that the value is a HeapObject.
    Node* value_is_smi = ObjectIsSmi(node);
    __ GotoIf(value_is_smi, if_error);

5028 5029
    ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;

5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041
    switch (arg_type.GetSequenceType()) {
      case CTypeInfo::SequenceType::kIsSequence: {
        CHECK_EQ(arg_type.GetType(), CTypeInfo::Type::kVoid);

        // Check that the value is a JSArray.
        Node* value_map = __ LoadField(AccessBuilder::ForMap(), node);
        Node* value_instance_type =
            __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
        Node* value_is_js_array = __ Word32Equal(
            value_instance_type, __ Int32Constant(JS_ARRAY_TYPE));
        __ GotoIfNot(value_is_js_array, &next);

5042 5043 5044 5045 5046 5047 5048 5049
        int kAlign = alignof(uintptr_t);
        int kSize = sizeof(uintptr_t);
        Node* stack_slot = __ StackSlot(kSize, kAlign);

        __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
                                     kNoWriteBarrier),
                 stack_slot, 0, node);

5050 5051
        Node* target_address = __ ExternalConstant(ExternalReference::Create(
            c_functions[func_index].address, ref_type));
5052
        __ Goto(&merge, target_address, stack_slot);
5053 5054 5055 5056 5057 5058
        break;
      }

      case CTypeInfo::SequenceType::kIsTypedArray: {
        // Check that the value is a TypedArray with a type that matches the
        // type declared in the c-function.
5059 5060
        Node* stack_slot = AdaptFastCallTypedArrayArgument(
            node,
5061
            fast_api_call::GetTypedArrayElementsKind(
5062 5063
                overloads_resolution_result.element_type),
            &next);
5064 5065
        Node* target_address = __ ExternalConstant(ExternalReference::Create(
            c_functions[func_index].address, ref_type));
5066
        __ Goto(&merge, target_address, stack_slot);
5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079
        break;
      }

      default: {
        UNREACHABLE();
      }
    }

    __ Bind(&next);
  }
  __ Goto(if_error);

  __ Bind(&merge);
5080
  return {merge.PhiAt(0), merge.PhiAt(1)};
5081 5082
}

5083 5084 5085 5086 5087
Node* EffectControlLinearizer::WrapFastCall(
    const CallDescriptor* call_descriptor, int inputs_size, Node** inputs,
    Node* target, const CFunctionInfo* c_signature, int c_arg_count,
    Node* stack_slot) {
  // CPU profiler support
5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112
  Node* target_address = __ ExternalConstant(
      ExternalReference::fast_api_call_target_address(isolate()));
  __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
                               kNoWriteBarrier),
           target_address, 0, target);

  // Disable JS execution
  Node* javascript_execution_assert = __ ExternalConstant(
      ExternalReference::javascript_execution_assert(isolate()));
  static_assert(sizeof(bool) == 1, "Wrong assumption about boolean size.");

  if (FLAG_debug_code) {
    auto do_store = __ MakeLabel();
    Node* old_scope_value =
        __ Load(MachineType::Int8(), javascript_execution_assert, 0);
    __ GotoIf(__ Word32Equal(old_scope_value, __ Int32Constant(1)), &do_store);

    // We expect that JS execution is enabled, otherwise assert.
    __ Unreachable(&do_store);
    __ Bind(&do_store);
  }
  __ Store(StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
           javascript_execution_assert, 0, __ Int32Constant(0));

  // Update effect and control
5113
  if (stack_slot != nullptr) {
5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136
    inputs[c_arg_count + 1] = stack_slot;
    inputs[c_arg_count + 2] = __ effect();
    inputs[c_arg_count + 3] = __ control();
  } else {
    inputs[c_arg_count + 1] = __ effect();
    inputs[c_arg_count + 2] = __ control();
  }

  // Create the fast call
  Node* call = __ Call(call_descriptor, inputs_size, inputs);

  // Reenable JS execution
  __ Store(StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
           javascript_execution_assert, 0, __ Int32Constant(1));

  // Reset the CPU profiler target address.
  __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
                               kNoWriteBarrier),
           target_address, 0, __ IntPtrConstant(0));

  return call;
}

5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161
Node* EffectControlLinearizer::GenerateSlowApiCall(Node* node) {
  FastApiCallNode n(node);
  FastApiCallParameters const& params = n.Parameters();
  const CFunctionInfo* c_signature = params.c_functions()[0].signature;
  const int c_arg_count = c_signature->ArgumentCount();

  Node** const slow_inputs = graph()->zone()->NewArray<Node*>(
      n.SlowCallArgumentCount() + FastApiCallNode::kEffectAndControlInputCount);

  int fast_call_params = c_arg_count;
  CHECK_EQ(node->op()->ValueInputCount() - fast_call_params,
           n.SlowCallArgumentCount());
  int index = 0;
  for (; index < n.SlowCallArgumentCount(); ++index) {
    slow_inputs[index] = n.SlowCallArgument(index);
  }

  slow_inputs[index] = __ effect();
  slow_inputs[index + 1] = __ control();
  Node* slow_call_result = __ Call(
      params.descriptor(), index + FastApiCallNode::kEffectAndControlInputCount,
      slow_inputs);
  return slow_call_result;
}

5162 5163 5164
Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
  FastApiCallNode n(node);
  FastApiCallParameters const& params = n.Parameters();
5165 5166 5167 5168 5169

  static constexpr int kReceiver = 1;

  const FastApiCallFunctionVector& c_functions = params.c_functions();
  const CFunctionInfo* c_signature = params.c_functions()[0].signature;
5170 5171 5172 5173 5174 5175 5176
  const int c_arg_count = c_signature->ArgumentCount();
  CallDescriptor* js_call_descriptor = params.descriptor();
  int js_arg_count = static_cast<int>(js_call_descriptor->ParameterCount());
  const int value_input_count = node->op()->ValueInputCount();
  CHECK_EQ(FastApiCallNode::ArityForArgc(c_arg_count, js_arg_count),
           value_input_count);

5177 5178 5179 5180
  // Hint to fast path.
  auto if_success = __ MakeLabel();
  auto if_error = __ MakeDeferredLabel();

5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212
  // Overload resolution
  bool generate_fast_call = false;
  int distinguishable_arg_index = INT_MIN;
  fast_api_call::OverloadsResolutionResult overloads_resolution_result =
      fast_api_call::OverloadsResolutionResult::Invalid();

  if (c_functions.size() == 1) {
    generate_fast_call = true;
  } else {
    DCHECK_EQ(c_functions.size(), 2);
    overloads_resolution_result = fast_api_call::ResolveOverloads(
        graph()->zone(), c_functions, c_arg_count);
    if (overloads_resolution_result.is_valid()) {
      generate_fast_call = true;
      distinguishable_arg_index =
          overloads_resolution_result.distinguishable_arg_index;
    }
  }

  if (!generate_fast_call) {
    // Only generate the slow call.
    return GenerateSlowApiCall(node);
  }

  // Generate fast call.

  const int kFastTargetAddressInputIndex = 0;
  const int kFastTargetAddressInputCount = 1;

  Node** const inputs = graph()->zone()->NewArray<Node*>(
      kFastTargetAddressInputCount + c_arg_count + n.FastCallExtraInputCount());

5213 5214
  ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;

5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225
  // The inputs to {Call} node for the fast call look like:
  // [fast callee, receiver, ... C arguments, [optional Options], effect,
  //  control].
  //
  // The first input node represents the target address for the fast call.
  // If the function is not overloaded (c_functions.size() == 1) this is the
  // address associated to the first and only element in the c_functions vector.
  // If there are multiple overloads the value of this input will be set later
  // with a Phi node created by AdaptOverloadedFastCallArgument.
  inputs[kFastTargetAddressInputIndex] =
      (c_functions.size() == 1) ? __ ExternalConstant(ExternalReference::Create(
5226
                                      c_functions[0].address, ref_type))
5227 5228 5229
                                : nullptr;

  for (int i = 0; i < c_arg_count; ++i) {
5230
    Node* value = NodeProperties::GetValueInput(node, i);
5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247

    if (i == distinguishable_arg_index + kReceiver) {
      // This only happens when the FastApiCall node represents multiple
      // overloaded functions and {i} is the index of the distinguishable
      // argument.
      AdaptOverloadedFastCallResult nodes = AdaptOverloadedFastCallArgument(
          value, c_functions, overloads_resolution_result, &if_error);
      inputs[i + kFastTargetAddressInputCount] = nodes.argument;

      // Replace the target address node with a Phi node that represents the
      // choice between the target addreseses of overloaded functions.
      inputs[kFastTargetAddressInputIndex] = nodes.target_address;
    } else {
      CTypeInfo type = c_signature->ArgumentInfo(i);
      inputs[i + kFastTargetAddressInputCount] =
          AdaptFastCallArgument(value, type, &if_error);
    }
5248
  }
5249
  DCHECK_NOT_NULL(inputs[0]);
5250

5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291
  MachineSignature::Builder builder(
      graph()->zone(), 1, c_arg_count + (c_signature->HasOptions() ? 1 : 0));
  MachineType return_type =
      MachineType::TypeForCType(c_signature->ReturnInfo());
  builder.AddReturn(return_type);
  for (int i = 0; i < c_arg_count; ++i) {
    CTypeInfo type = c_signature->ArgumentInfo(i);
    MachineType machine_type =
        type.GetSequenceType() == CTypeInfo::SequenceType::kScalar
            ? MachineType::TypeForCType(type)
            : MachineType::AnyTagged();
    builder.AddParam(machine_type);
  }

  Node* stack_slot = nullptr;
  if (c_signature->HasOptions()) {
    int kAlign = alignof(v8::FastApiCallbackOptions);
    int kSize = sizeof(v8::FastApiCallbackOptions);
    // If this check fails, you've probably added new fields to
    // v8::FastApiCallbackOptions, which means you'll need to write code
    // that initializes and reads from them too.
    CHECK_EQ(kSize, sizeof(uintptr_t) * 2);
    stack_slot = __ StackSlot(kSize, kAlign);

    __ Store(
        StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
        stack_slot,
        static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)),
        __ Int32Constant(0));
    __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
                                 kNoWriteBarrier),
             stack_slot,
             static_cast<int>(offsetof(v8::FastApiCallbackOptions, data)),
             n.SlowCallArgument(FastApiCallNode::kSlowCallDataArgumentIndex));

    builder.AddParam(MachineType::Pointer());  // stack_slot
  }

  CallDescriptor* call_descriptor =
      Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());

5292 5293 5294
  Node* c_call_result = WrapFastCall(
      call_descriptor, c_arg_count + n.FastCallExtraInputCount() + 1, inputs,
      inputs[0], c_signature, c_arg_count, stack_slot);
5295

5296
  Node* fast_call_result = nullptr;
5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313
  switch (c_signature->ReturnInfo().GetType()) {
    case CTypeInfo::Type::kVoid:
      fast_call_result = __ UndefinedConstant();
      break;
    case CTypeInfo::Type::kBool:
      static_assert(sizeof(bool) == 1, "unsupported bool size");
      fast_call_result = ChangeBitToTagged(
          __ Word32And(c_call_result, __ Int32Constant(0xFF)));
      break;
    case CTypeInfo::Type::kInt32:
      fast_call_result = ChangeInt32ToTagged(c_call_result);
      break;
    case CTypeInfo::Type::kUint32:
      fast_call_result = ChangeUint32ToTagged(c_call_result);
      break;
    case CTypeInfo::Type::kInt64:
    case CTypeInfo::Type::kUint64:
5314
      UNREACHABLE();
5315
    case CTypeInfo::Type::kFloat32:
5316 5317 5318 5319
      fast_call_result =
          ChangeFloat64ToTagged(__ ChangeFloat32ToFloat64(c_call_result),
                                CheckForMinusZeroMode::kCheckForMinusZero);
      break;
5320
    case CTypeInfo::Type::kFloat64:
5321 5322 5323
      fast_call_result = ChangeFloat64ToTagged(
          c_call_result, CheckForMinusZeroMode::kCheckForMinusZero);
      break;
5324
    case CTypeInfo::Type::kV8Value:
5325
    case CTypeInfo::Type::kApiObject:
5326
      UNREACHABLE();
5327 5328 5329 5330 5331
    case CTypeInfo::Type::kAny:
      fast_call_result =
          ChangeFloat64ToTagged(__ ChangeInt64ToFloat64(c_call_result),
                                CheckForMinusZeroMode::kCheckForMinusZero);
      break;
5332
  }
5333

5334
  auto merge = __ MakeLabel(MachineRepresentation::kTagged);
5335 5336 5337 5338 5339 5340 5341 5342 5343
  if (c_signature->HasOptions()) {
    DCHECK_NOT_NULL(stack_slot);
    Node* load = __ Load(
        MachineType::Int32(), stack_slot,
        static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)));

    Node* is_zero = __ Word32Equal(load, __ Int32Constant(0));
    __ Branch(is_zero, &if_success, &if_error);
  } else {
5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360
    __ Goto(&if_success);
  }

  // We need to generate a fallback (both fast and slow call) in case:
  // 1) the generated code might fail, in case e.g. a Smi was passed where
  // a JSObject was expected and an error must be thrown or
  // 2) the embedder requested fallback possibility via providing options arg.
  // None of the above usually holds true for Wasm functions with primitive
  // types only, so we avoid generating an extra branch here.
  DCHECK_IMPLIES(c_signature->HasOptions(), if_error.IsUsed());
  if (if_error.IsUsed()) {
    // Generate direct slow call.
    __ Bind(&if_error);
    {
      Node* slow_call_result = GenerateSlowApiCall(node);
      __ Goto(&merge, slow_call_result);
    }
5361
  }
5362 5363 5364 5365 5366 5367

  __ Bind(&if_success);
  __ Goto(&merge, fast_call_result);

  __ Bind(&merge);
  return merge.PhiAt(0);
5368 5369
}

5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384
Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
  Node* object = node->InputAt(0);
  Node* index = node->InputAt(1);
  Node* zero = __ IntPtrConstant(0);
  Node* one = __ IntPtrConstant(1);

  // Sign-extend the {index} on 64-bit architectures.
  if (machine()->Is64()) {
    index = __ ChangeInt32ToInt64(index);
  }

  auto if_double = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kTagged);

  // Check if field is a mutable double field.
5385
  __ GotoIfNot(__ IntPtrEqual(__ WordAnd(index, one), zero), &if_double);
5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396

  // The field is a proper Tagged field on {object}. The {index} is shifted
  // to the left by one in the code below.
  {
    // Check if field is in-object or out-of-object.
    auto if_outofobject = __ MakeLabel();
    __ GotoIf(__ IntLessThan(index, zero), &if_outofobject);

    // The field is located in the {object} itself.
    {
      Node* offset =
5397
          __ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2 - 1)),
5398
                    __ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
5399
      Node* result = __ Load(MachineType::AnyTagged(), object, offset);
5400 5401 5402 5403 5404 5405 5406
      __ Goto(&done, result);
    }

    // The field is located in the properties backing store of {object}.
    // The {index} is equal to the negated out of property index plus 1.
    __ Bind(&if_outofobject);
    {
5407 5408
      Node* properties = __ LoadField(
          AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), object);
5409 5410
      Node* offset =
          __ IntAdd(__ WordShl(__ IntSub(zero, index),
5411 5412
                               __ IntPtrConstant(kTaggedSizeLog2 - 1)),
                    __ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
5413
                                      kHeapObjectTag));
5414
      Node* result = __ Load(MachineType::AnyTagged(), properties, offset);
5415 5416 5417 5418 5419
      __ Goto(&done, result);
    }
  }

  // The field is a Double field, either unboxed in the object on 64-bit
5420
  // architectures, or a mutable HeapNumber.
5421 5422
  __ Bind(&if_double);
  {
5423
    auto loaded_field = __ MakeLabel(MachineRepresentation::kTagged);
5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434
    auto done_double = __ MakeLabel(MachineRepresentation::kFloat64);

    index = __ WordSar(index, one);

    // Check if field is in-object or out-of-object.
    auto if_outofobject = __ MakeLabel();
    __ GotoIf(__ IntLessThan(index, zero), &if_outofobject);

    // The field is located in the {object} itself.
    {
      Node* offset =
5435
          __ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2)),
5436
                    __ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
5437 5438
      Node* field = __ Load(MachineType::AnyTagged(), object, offset);
      __ Goto(&loaded_field, field);
5439 5440 5441 5442
    }

    __ Bind(&if_outofobject);
    {
5443 5444
      Node* properties = __ LoadField(
          AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer(), object);
5445 5446
      Node* offset =
          __ IntAdd(__ WordShl(__ IntSub(zero, index),
5447 5448
                               __ IntPtrConstant(kTaggedSizeLog2)),
                    __ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
5449
                                      kHeapObjectTag));
5450
      Node* field = __ Load(MachineType::AnyTagged(), properties, offset);
5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461
      __ Goto(&loaded_field, field);
    }

    __ Bind(&loaded_field);
    {
      Node* field = loaded_field.PhiAt(0);
      // We may have transitioned in-place away from double, so check that
      // this is a HeapNumber -- otherwise the load is fine and we don't need
      // to copy anything anyway.
      __ GotoIf(ObjectIsSmi(field), &done, field);
      Node* field_map = __ LoadField(AccessBuilder::ForMap(), field);
5462 5463
      __ GotoIfNot(__ TaggedEqual(field_map, __ HeapNumberMapConstant()), &done,
                   field);
5464 5465 5466

      Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), field);
      __ Goto(&done_double, value);
5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479
    }

    __ Bind(&done_double);
    {
      Node* result = AllocateHeapNumberWithValue(done_double.PhiAt(0));
      __ Goto(&done, result);
    }
  }

  __ Bind(&done);
  return done.PhiAt(0);
}

5480 5481 5482
Node* EffectControlLinearizer::BuildReverseBytes(ExternalArrayType type,
                                                 Node* value) {
  switch (type) {
5483
    case kExternalInt8Array:
5484 5485 5486
    case kExternalUint8Array:
    case kExternalUint8ClampedArray:
      return value;
5487

5488
    case kExternalInt16Array: {
5489 5490 5491 5492
      Node* result = __ Word32ReverseBytes(value);
      result = __ Word32Sar(result, __ Int32Constant(16));
      return result;
    }
5493

5494 5495 5496 5497
    case kExternalUint16Array: {
      Node* result = __ Word32ReverseBytes(value);
      result = __ Word32Shr(result, __ Int32Constant(16));
      return result;
5498 5499
    }

5500 5501 5502
    case kExternalInt32Array:  // Fall through.
    case kExternalUint32Array:
      return __ Word32ReverseBytes(value);
5503

5504 5505 5506 5507 5508
    case kExternalFloat32Array: {
      Node* result = __ BitcastFloat32ToInt32(value);
      result = __ Word32ReverseBytes(result);
      result = __ BitcastInt32ToFloat32(result);
      return result;
5509 5510 5511
    }

    case kExternalFloat64Array: {
5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523
      if (machine()->Is64()) {
        Node* result = __ BitcastFloat64ToInt64(value);
        result = __ Word64ReverseBytes(result);
        result = __ BitcastInt64ToFloat64(result);
        return result;
      } else {
        Node* lo = __ Word32ReverseBytes(__ Float64ExtractLowWord32(value));
        Node* hi = __ Word32ReverseBytes(__ Float64ExtractHighWord32(value));
        Node* result = __ Float64Constant(0.0);
        result = __ Float64InsertLowWord32(result, hi);
        result = __ Float64InsertHighWord32(result, lo);
        return result;
5524 5525 5526
      }
    }

5527 5528
    case kExternalBigInt64Array:
    case kExternalBigUint64Array:
5529 5530
      UNREACHABLE();
  }
5531
}
5532

5533
Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
5534
  ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
5535
  Node* object = node->InputAt(0);
5536
  Node* storage = node->InputAt(1);
5537 5538
  Node* index = node->InputAt(2);
  Node* is_little_endian = node->InputAt(3);
5539

5540 5541 5542 5543
  // We need to keep the {object} (either the JSArrayBuffer or the JSDataView)
  // alive so that the GC will not release the JSArrayBuffer (if there's any)
  // as long as we are still operating on it.
  __ Retain(object);
5544

5545 5546
  MachineType const machine_type =
      AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
5547

5548
  Node* value = __ LoadUnaligned(machine_type, storage, index);
5549 5550
  auto big_endian = __ MakeLabel();
  auto done = __ MakeLabel(machine_type.representation());
5551

5552 5553 5554 5555 5556 5557 5558 5559
  __ GotoIfNot(is_little_endian, &big_endian);
  {  // Little-endian load.
#if V8_TARGET_LITTLE_ENDIAN
    __ Goto(&done, value);
#else
    __ Goto(&done, BuildReverseBytes(element_type, value));
#endif  // V8_TARGET_LITTLE_ENDIAN
  }
5560

5561 5562 5563 5564 5565 5566 5567 5568
  __ Bind(&big_endian);
  {  // Big-endian load.
#if V8_TARGET_LITTLE_ENDIAN
    __ Goto(&done, BuildReverseBytes(element_type, value));
#else
    __ Goto(&done, value);
#endif  // V8_TARGET_LITTLE_ENDIAN
  }
5569

5570 5571 5572 5573
  // We're done, return {result}.
  __ Bind(&done);
  return done.PhiAt(0);
}
5574

5575 5576
void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
  ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
5577
  Node* object = node->InputAt(0);
5578
  Node* storage = node->InputAt(1);
5579 5580 5581
  Node* index = node->InputAt(2);
  Node* value = node->InputAt(3);
  Node* is_little_endian = node->InputAt(4);
5582

5583 5584 5585 5586
  // We need to keep the {object} (either the JSArrayBuffer or the JSDataView)
  // alive so that the GC will not release the JSArrayBuffer (if there's any)
  // as long as we are still operating on it.
  __ Retain(object);
5587

5588 5589
  MachineType const machine_type =
      AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
5590

5591 5592
  auto big_endian = __ MakeLabel();
  auto done = __ MakeLabel(machine_type.representation());
5593

5594 5595 5596 5597 5598 5599 5600 5601
  __ GotoIfNot(is_little_endian, &big_endian);
  {  // Little-endian store.
#if V8_TARGET_LITTLE_ENDIAN
    __ Goto(&done, value);
#else
    __ Goto(&done, BuildReverseBytes(element_type, value));
#endif  // V8_TARGET_LITTLE_ENDIAN
  }
5602

5603 5604 5605 5606 5607 5608 5609
  __ Bind(&big_endian);
  {  // Big-endian store.
#if V8_TARGET_LITTLE_ENDIAN
    __ Goto(&done, BuildReverseBytes(element_type, value));
#else
    __ Goto(&done, value);
#endif  // V8_TARGET_LITTLE_ENDIAN
5610
  }
5611 5612

  __ Bind(&done);
5613
  __ StoreUnaligned(machine_type.representation(), storage, index,
5614
                    done.PhiAt(0));
5615
}
5616

5617 5618 5619 5620 5621 5622 5623
// Compute the data pointer, handling the case where the {external} pointer
// is the effective data pointer (i.e. the {base} is Smi zero).
Node* EffectControlLinearizer::BuildTypedArrayDataPointer(Node* base,
                                                          Node* external) {
  if (IntPtrMatcher(base).Is(0)) {
    return external;
  } else {
5624
    base = __ BitcastTaggedToWord(base);
5625
    if (COMPRESS_POINTERS_BOOL) {
5626
      // Zero-extend Tagged_t to UintPtr according to current compression
5627 5628 5629 5630
      // scheme so that the addition with |external_pointer| (which already
      // contains compensated offset value) will decompress the tagged value.
      // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for
      // details.
5631
      base = ChangeUint32ToUintPtr(base);
5632
    }
5633
    return __ IntPtrAdd(base, external);
5634 5635 5636
  }
}

5637
Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
5638 5639 5640 5641 5642 5643 5644 5645
  ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
  Node* buffer = node->InputAt(0);
  Node* base = node->InputAt(1);
  Node* external = node->InputAt(2);
  Node* index = node->InputAt(3);

  // We need to keep the {buffer} alive so that the GC will not release the
  // ArrayBuffer (if there's any) as long as we are still operating on it.
5646
  __ Retain(buffer);
5647

5648
  Node* data_ptr = BuildTypedArrayDataPointer(base, external);
5649 5650

  // Perform the actual typed element access.
5651
  return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
5652
                        data_ptr, index);
5653 5654
}

5655 5656 5657 5658 5659 5660 5661 5662 5663 5664
Node* EffectControlLinearizer::LowerLoadStackArgument(Node* node) {
  Node* base = node->InputAt(0);
  Node* index = node->InputAt(1);

  Node* argument =
      __ LoadElement(AccessBuilder::ForStackArgument(), base, index);

  return __ BitcastWordToTagged(argument);
}

5665
void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
5666 5667 5668 5669 5670 5671 5672 5673 5674
  ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
  Node* buffer = node->InputAt(0);
  Node* base = node->InputAt(1);
  Node* external = node->InputAt(2);
  Node* index = node->InputAt(3);
  Node* value = node->InputAt(4);

  // We need to keep the {buffer} alive so that the GC will not release the
  // ArrayBuffer (if there's any) as long as we are still operating on it.
5675
  __ Retain(buffer);
5676

5677
  Node* data_ptr = BuildTypedArrayDataPointer(base, external);
5678 5679

  // Perform the actual typed element access.
5680
  __ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
5681
                  data_ptr, index, value);
5682 5683
}

5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699
void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array,
                                                   ElementsKind from,
                                                   ElementsKind to) {
  DCHECK(IsMoreGeneralElementsKindTransition(from, to));
  DCHECK(to == HOLEY_ELEMENTS || to == HOLEY_DOUBLE_ELEMENTS);

  Handle<Map> target(to == HOLEY_ELEMENTS ? FastMapParameterOf(node->op())
                                          : DoubleMapParameterOf(node->op()));
  Node* target_map = __ HeapConstant(target);

  if (IsSimpleMapChangeTransition(from, to)) {
    __ StoreField(AccessBuilder::ForMap(), array, target_map);
  } else {
    // Instance migration, call out to the runtime for {array}.
    Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
    Runtime::FunctionId id = Runtime::kTransitionElementsKind;
5700
    auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
5701
        graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
5702
    __ Call(call_descriptor, __ CEntryStubConstant(1), array, target_map,
5703
            __ ExternalConstant(ExternalReference::Create(id)),
5704 5705 5706 5707 5708 5709 5710
            __ Int32Constant(2), __ NoContextConstant());
  }
}

Node* EffectControlLinearizer::IsElementsKindGreaterThan(
    Node* kind, ElementsKind reference_kind) {
  Node* ref_kind = __ Int32Constant(reference_kind);
5711
  Node* ret = __ Int32LessThan(ref_kind, kind);
5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727
  return ret;
}

void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
  Node* array = node->InputAt(0);
  Node* index = node->InputAt(1);
  Node* value = node->InputAt(2);

  // Possibly transition array based on input and store.
  //
  //   -- TRANSITION PHASE -----------------
  //   kind = ElementsKind(array)
  //   if value is not smi {
  //     if kind == HOLEY_SMI_ELEMENTS {
  //       if value is heap number {
  //         Transition array to HOLEY_DOUBLE_ELEMENTS
5728
  //         kind = HOLEY_DOUBLE_ELEMENTS
5729 5730
  //       } else {
  //         Transition array to HOLEY_ELEMENTS
5731
  //         kind = HOLEY_ELEMENTS
5732 5733 5734 5735
  //       }
  //     } else if kind == HOLEY_DOUBLE_ELEMENTS {
  //       if value is not heap number {
  //         Transition array to HOLEY_ELEMENTS
5736
  //         kind = HOLEY_ELEMENTS
5737 5738 5739 5740 5741
  //       }
  //     }
  //   }
  //
  //   -- STORE PHASE ----------------------
5742
  //   [make sure {kind} is up-to-date]
5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759
  //   if kind == HOLEY_DOUBLE_ELEMENTS {
  //     if value is smi {
  //       float_value = convert smi to float
  //       Store array[index] = float_value
  //     } else {
  //       float_value = value
  //       Store array[index] = float_value
  //     }
  //   } else {
  //     // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
  //     Store array[index] = value
  //   }
  //
  Node* map = __ LoadField(AccessBuilder::ForMap(), array);
  Node* kind;
  {
    Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
5760
    Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
5761
    Node* andit = __ Word32And(bit_field2, mask);
5762
    Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
5763 5764 5765
    kind = __ Word32Shr(andit, shift);
  }

5766
  auto do_store = __ MakeLabel(MachineRepresentation::kWord32);
5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782
  // We can store a smi anywhere.
  __ GotoIf(ObjectIsSmi(value), &do_store, kind);

  // {value} is a HeapObject.
  auto transition_smi_array = __ MakeDeferredLabel();
  auto transition_double_to_fast = __ MakeDeferredLabel();
  {
    __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
                 &transition_smi_array);
    __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &do_store,
                 kind);

    // We have double elements kind. Only a HeapNumber can be stored
    // without effecting a transition.
    Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
    Node* heap_number_map = __ HeapNumberMapConstant();
5783
    Node* check = __ TaggedEqual(value_map, heap_number_map);
5784 5785 5786 5787 5788
    __ GotoIfNot(check, &transition_double_to_fast);
    __ Goto(&do_store, kind);
  }

  __ Bind(&transition_smi_array);  // deferred code.
5789
  {
5790 5791 5792 5793 5794
    // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS or
    // to HOLEY_ELEMENTS.
    auto if_value_not_heap_number = __ MakeLabel();
    Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
    Node* heap_number_map = __ HeapNumberMapConstant();
5795
    Node* check = __ TaggedEqual(value_map, heap_number_map);
5796
    __ GotoIfNot(check, &if_value_not_heap_number);
5797
    {
5798 5799 5800 5801
      // {value} is a HeapNumber.
      TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS,
                           HOLEY_DOUBLE_ELEMENTS);
      __ Goto(&do_store, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS));
5802
    }
5803
    __ Bind(&if_value_not_heap_number);
5804
    {
5805
      TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS);
5806
      __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
5807 5808 5809
    }
  }

5810 5811 5812 5813 5814 5815
  __ Bind(&transition_double_to_fast);  // deferred code.
  {
    TransitionElementsTo(node, array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS);
    __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
  }

5816
  // Make sure kind is up-to-date.
5817
  __ Bind(&do_store);
5818 5819
  kind = do_store.PhiAt(0);

5820
  Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
5821 5822
  auto if_kind_is_double = __ MakeLabel();
  auto done = __ MakeLabel();
5823 5824
  __ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
            &if_kind_is_double);
5825 5826 5827 5828 5829 5830 5831 5832 5833
  {
    // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
    __ StoreElement(AccessBuilder::ForFixedArrayElement(HOLEY_ELEMENTS),
                    elements, index, value);
    __ Goto(&done);
  }
  __ Bind(&if_kind_is_double);
  {
    // Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
5834
    auto do_double_store = __ MakeLabel();
5835
    __ GotoIfNot(ObjectIsSmi(value), &do_double_store);
5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847
    {
      Node* int_value = ChangeSmiToInt32(value);
      Node* float_value = __ ChangeInt32ToFloat64(int_value);
      __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
                      index, float_value);
      __ Goto(&done);
    }
    __ Bind(&do_double_store);
    {
      Node* float_value =
          __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
      __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
5848
                      index, __ Float64SilenceNaN(float_value));
5849 5850 5851
      __ Goto(&done);
    }
  }
5852

5853 5854 5855
  __ Bind(&done);
}

5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877
void EffectControlLinearizer::LowerTransitionAndStoreNumberElement(Node* node) {
  Node* array = node->InputAt(0);
  Node* index = node->InputAt(1);
  Node* value = node->InputAt(2);  // This is a Float64, not tagged.

  // Possibly transition array based on input and store.
  //
  //   -- TRANSITION PHASE -----------------
  //   kind = ElementsKind(array)
  //   if kind == HOLEY_SMI_ELEMENTS {
  //     Transition array to HOLEY_DOUBLE_ELEMENTS
  //   } else if kind != HOLEY_DOUBLE_ELEMENTS {
  //     This is UNREACHABLE, execute a debug break.
  //   }
  //
  //   -- STORE PHASE ----------------------
  //   Store array[index] = value (it's a float)
  //
  Node* map = __ LoadField(AccessBuilder::ForMap(), array);
  Node* kind;
  {
    Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
5878
    Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
5879
    Node* andit = __ Word32And(bit_field2, mask);
5880
    Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896
    kind = __ Word32Shr(andit, shift);
  }

  auto do_store = __ MakeLabel();

  // {value} is a float64.
  auto transition_smi_array = __ MakeDeferredLabel();
  {
    __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
                 &transition_smi_array);
    // We expect that our input array started at HOLEY_SMI_ELEMENTS, and
    // climbs the lattice up to HOLEY_DOUBLE_ELEMENTS. Force a debug break
    // if this assumption is broken. It also would be the case that
    // loop peeling can break this assumption.
    __ GotoIf(__ Word32Equal(kind, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
              &do_store);
5897
    __ Unreachable(&do_store);
5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911
  }

  __ Bind(&transition_smi_array);  // deferred code.
  {
    // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS.
    TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS,
                         HOLEY_DOUBLE_ELEMENTS);
    __ Goto(&do_store);
  }

  __ Bind(&do_store);

  Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
  __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements, index,
5912
                  __ Float64SilenceNaN(value));
5913 5914 5915 5916
}

void EffectControlLinearizer::LowerTransitionAndStoreNonNumberElement(
    Node* node) {
5917 5918 5919 5920
  Node* array = node->InputAt(0);
  Node* index = node->InputAt(1);
  Node* value = node->InputAt(2);

5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938
  // Possibly transition array based on input and store.
  //
  //   -- TRANSITION PHASE -----------------
  //   kind = ElementsKind(array)
  //   if kind == HOLEY_SMI_ELEMENTS {
  //     Transition array to HOLEY_ELEMENTS
  //   } else if kind == HOLEY_DOUBLE_ELEMENTS {
  //     Transition array to HOLEY_ELEMENTS
  //   }
  //
  //   -- STORE PHASE ----------------------
  //   // kind is HOLEY_ELEMENTS
  //   Store array[index] = value
  //
  Node* map = __ LoadField(AccessBuilder::ForMap(), array);
  Node* kind;
  {
    Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
5939
    Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
5940
    Node* andit = __ Word32And(bit_field2, mask);
5941
    Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974
    kind = __ Word32Shr(andit, shift);
  }

  auto do_store = __ MakeLabel();

  auto transition_smi_array = __ MakeDeferredLabel();
  auto transition_double_to_fast = __ MakeDeferredLabel();
  {
    __ GotoIfNot(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
                 &transition_smi_array);
    __ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
              &transition_double_to_fast);
    __ Goto(&do_store);
  }

  __ Bind(&transition_smi_array);  // deferred code.
  {
    // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_ELEMENTS.
    TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS);
    __ Goto(&do_store);
  }

  __ Bind(&transition_double_to_fast);  // deferred code.
  {
    TransitionElementsTo(node, array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS);
    __ Goto(&do_store);
  }

  __ Bind(&do_store);

  Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
  // Our ElementsKind is HOLEY_ELEMENTS.
  ElementAccess access = AccessBuilder::ForFixedArrayElement(HOLEY_ELEMENTS);
5975
  Type value_type = ValueTypeParameterOf(node->op());
5976
  if (value_type.Is(Type::BooleanOrNullOrUndefined())) {
5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987
    access.type = value_type;
    access.write_barrier_kind = kNoWriteBarrier;
  }
  __ StoreElement(access, elements, index, value);
}

void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
  Node* array = node->InputAt(0);
  Node* index = node->InputAt(1);
  Node* value = node->InputAt(2);  // int32

5988 5989 5990 5991 5992 5993
  // Store a signed small in an output array.
  //
  //   kind = ElementsKind(array)
  //
  //   -- STORE PHASE ----------------------
  //   if kind == HOLEY_DOUBLE_ELEMENTS {
5994
  //     float_value = convert int32 to float
5995 5996 5997
  //     Store array[index] = float_value
  //   } else {
  //     // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
5998 5999
  //     smi_value = convert int32 to smi
  //     Store array[index] = smi_value
6000 6001 6002 6003 6004 6005
  //   }
  //
  Node* map = __ LoadField(AccessBuilder::ForMap(), array);
  Node* kind;
  {
    Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
6006
    Node* mask = __ Int32Constant(Map::Bits2::ElementsKindBits::kMask);
6007
    Node* andit = __ Word32And(bit_field2, mask);
6008
    Node* shift = __ Int32Constant(Map::Bits2::ElementsKindBits::kShift);
6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022
    kind = __ Word32Shr(andit, shift);
  }

  Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
  auto if_kind_is_double = __ MakeLabel();
  auto done = __ MakeLabel();
  __ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
            &if_kind_is_double);
  {
    // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
    // In this case, we know our value is a signed small, and we can optimize
    // the ElementAccess information.
    ElementAccess access = AccessBuilder::ForFixedArrayElement();
    access.type = Type::SignedSmall();
6023
    access.machine_type = MachineType::TaggedSigned();
6024
    access.write_barrier_kind = kNoWriteBarrier;
6025 6026
    Node* smi_value = ChangeInt32ToSmi(value);
    __ StoreElement(access, elements, index, smi_value);
6027 6028 6029 6030 6031
    __ Goto(&done);
  }
  __ Bind(&if_kind_is_double);
  {
    // Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
6032
    Node* float_value = __ ChangeInt32ToFloat64(value);
6033 6034 6035 6036 6037 6038 6039 6040
    __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
                    index, float_value);
    __ Goto(&done);
  }

  __ Bind(&done);
}

6041
void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
6042
  AbortReason reason = AbortReasonOf(node->op());
6043 6044
  Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
  Runtime::FunctionId id = Runtime::kAbort;
6045
  auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
6046
      graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
6047
  __ Call(call_descriptor, __ CEntryStubConstant(1),
6048
          __ SmiConstant(static_cast<int>(reason)),
6049
          __ ExternalConstant(ExternalReference::Create(id)),
6050 6051 6052
          __ Int32Constant(1), __ NoContextConstant());
}

6053
template <typename... Args>
6054
Node* EffectControlLinearizer::CallBuiltin(Builtin builtin,
6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065
                                           Operator::Properties properties,
                                           Args... args) {
  Callable const callable = Builtins::CallableFor(isolate(), builtin);
  auto call_descriptor = Linkage::GetStubCallDescriptor(
      graph()->zone(), callable.descriptor(),
      callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
      properties);
  return __ Call(call_descriptor, __ HeapConstant(callable.code()), args...,
                 __ NoContextConstant());
}

6066 6067 6068
Node* EffectControlLinearizer::LowerAssertType(Node* node) {
  DCHECK_EQ(node->opcode(), IrOpcode::kAssertType);
  Type type = OpParameter<Type>(node->op());
6069
  CHECK(type.CanBeAsserted());
6070
  Node* const input = node->InputAt(0);
6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081
  Node* allocated_type;
  {
    DCHECK(isolate()->CurrentLocalHeap()->is_main_thread());
    base::Optional<UnparkedScope> unparked_scope;
    if (isolate()->CurrentLocalHeap()->IsParked()) {
      unparked_scope.emplace(isolate()->main_thread_local_isolate());
    }
    allocated_type = __ HeapConstant(type.AllocateOnHeap(factory()));
  }
  CallBuiltin(Builtin::kCheckTurbofanType, node->op()->properties(), input,
              allocated_type, __ SmiConstant(node->id()));
6082 6083 6084 6085 6086 6087 6088
  return input;
}

Node* EffectControlLinearizer::LowerFoldConstant(Node* node) {
  DCHECK_EQ(node->opcode(), IrOpcode::kFoldConstant);
  Node* original = node->InputAt(0);
  Node* constant = node->InputAt(1);
6089
  CallBuiltin(Builtin::kCheckSameObject, node->op()->properties(), original,
6090 6091
              constant);
  return constant;
6092 6093
}

6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108
Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
  ConvertReceiverMode const mode = ConvertReceiverModeOf(node->op());
  Node* value = node->InputAt(0);
  Node* global_proxy = node->InputAt(1);

  switch (mode) {
    case ConvertReceiverMode::kNullOrUndefined: {
      return global_proxy;
    }
    case ConvertReceiverMode::kNotNullOrUndefined: {
      auto convert_to_object = __ MakeDeferredLabel();
      auto done_convert = __ MakeLabel(MachineRepresentation::kTagged);

      // Check if {value} is already a JSReceiver.
      __ GotoIf(ObjectIsSmi(value), &convert_to_object);
6109
      static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
6110 6111 6112 6113 6114 6115 6116 6117
      Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
      Node* value_instance_type =
          __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
      Node* check = __ Uint32LessThan(
          value_instance_type, __ Uint32Constant(FIRST_JS_RECEIVER_TYPE));
      __ GotoIf(check, &convert_to_object);
      __ Goto(&done_convert, value);

6118
      // Wrap the primitive {value} into a JSPrimitiveWrapper.
6119 6120
      __ Bind(&convert_to_object);
      Operator::Properties properties = Operator::kEliminatable;
6121
      Callable callable = Builtins::CallableFor(isolate(), Builtin::kToObject);
6122
      CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
6123
      auto call_descriptor = Linkage::GetStubCallDescriptor(
6124 6125
          graph()->zone(), callable.descriptor(),
          callable.descriptor().GetStackParameterCount(), flags, properties);
6126 6127
      Node* native_context = __ LoadField(
          AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
6128 6129
      Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
                             value, native_context);
6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141
      __ Goto(&done_convert, result);

      __ Bind(&done_convert);
      return done_convert.PhiAt(0);
    }
    case ConvertReceiverMode::kAny: {
      auto convert_to_object = __ MakeDeferredLabel();
      auto convert_global_proxy = __ MakeDeferredLabel();
      auto done_convert = __ MakeLabel(MachineRepresentation::kTagged);

      // Check if {value} is already a JSReceiver, or null/undefined.
      __ GotoIf(ObjectIsSmi(value), &convert_to_object);
6142
      static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
6143 6144 6145 6146 6147 6148 6149 6150
      Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
      Node* value_instance_type =
          __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
      Node* check = __ Uint32LessThan(
          value_instance_type, __ Uint32Constant(FIRST_JS_RECEIVER_TYPE));
      __ GotoIf(check, &convert_to_object);
      __ Goto(&done_convert, value);

6151
      // Wrap the primitive {value} into a JSPrimitiveWrapper.
6152
      __ Bind(&convert_to_object);
6153 6154 6155
      __ GotoIf(__ TaggedEqual(value, __ UndefinedConstant()),
                &convert_global_proxy);
      __ GotoIf(__ TaggedEqual(value, __ NullConstant()),
6156 6157
                &convert_global_proxy);
      Operator::Properties properties = Operator::kEliminatable;
6158
      Callable callable = Builtins::CallableFor(isolate(), Builtin::kToObject);
6159
      CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
6160
      auto call_descriptor = Linkage::GetStubCallDescriptor(
6161 6162
          graph()->zone(), callable.descriptor(),
          callable.descriptor().GetStackParameterCount(), flags, properties);
6163 6164
      Node* native_context = __ LoadField(
          AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
6165 6166
      Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
                             value, native_context);
6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180
      __ Goto(&done_convert, result);

      // Replace the {value} with the {global_proxy}.
      __ Bind(&convert_global_proxy);
      __ Goto(&done_convert, global_proxy);

      __ Bind(&done_convert);
      return done_convert.PhiAt(0);
    }
  }

  UNREACHABLE();
}

6181
Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
6182 6183
  // Nothing to be done if a fast hardware instruction is available.
  if (machine()->Float64RoundUp().IsSupported()) {
6184
    return Nothing<Node*>();
6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211
  }

  Node* const input = node->InputAt(0);

  // General case for ceil.
  //
  //   if 0.0 < input then
  //     if 2^52 <= input then
  //       input
  //     else
  //       let temp1 = (2^52 + input) - 2^52 in
  //       if temp1 < input then
  //         temp1 + 1
  //       else
  //         temp1
  //   else
  //     if input == 0 then
  //       input
  //     else
  //       if input <= -2^52 then
  //         input
  //       else
  //         let temp1 = -0 - input in
  //         let temp2 = (2^52 + temp1) - 2^52 in
  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
  //         -0 - temp3

6212 6213 6214 6215 6216 6217
  auto if_not_positive = __ MakeDeferredLabel();
  auto if_greater_than_two_52 = __ MakeDeferredLabel();
  auto if_less_than_minus_two_52 = __ MakeDeferredLabel();
  auto if_zero = __ MakeDeferredLabel();
  auto done_temp3 = __ MakeLabel(MachineRepresentation::kFloat64);
  auto done = __ MakeLabel(MachineRepresentation::kFloat64);
6218

6219 6220 6221
  Node* const zero = __ Float64Constant(0.0);
  Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
  Node* const one = __ Float64Constant(1.0);
6222

6223
  Node* check0 = __ Float64LessThan(zero, input);
6224
  __ GotoIfNot(check0, &if_not_positive);
6225 6226 6227
  {
    Node* check1 = __ Float64LessThanOrEqual(two_52, input);
    __ GotoIf(check1, &if_greater_than_two_52);
6228
    {
6229
      Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
6230
      __ GotoIfNot(__ Float64LessThan(temp1, input), &done, temp1);
6231
      __ Goto(&done, __ Float64Add(temp1, one));
6232 6233
    }

6234 6235
    __ Bind(&if_greater_than_two_52);
    __ Goto(&done, input);
6236 6237
  }

6238
  __ Bind(&if_not_positive);
6239
  {
6240 6241
    Node* check1 = __ Float64Equal(input, zero);
    __ GotoIf(check1, &if_zero);
6242

6243 6244 6245
    Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
    Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
    __ GotoIf(check2, &if_less_than_minus_two_52);
6246 6247

    {
6248 6249 6250 6251
      Node* const minus_zero = __ Float64Constant(-0.0);
      Node* temp1 = __ Float64Sub(minus_zero, input);
      Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
      Node* check3 = __ Float64LessThan(temp1, temp2);
6252
      __ GotoIfNot(check3, &done_temp3, temp2);
6253 6254 6255 6256 6257
      __ Goto(&done_temp3, __ Float64Sub(temp2, one));

      __ Bind(&done_temp3);
      Node* temp3 = done_temp3.PhiAt(0);
      __ Goto(&done, __ Float64Sub(minus_zero, temp3));
6258
    }
6259 6260
    __ Bind(&if_less_than_minus_two_52);
    __ Goto(&done, input);
6261

6262 6263
    __ Bind(&if_zero);
    __ Goto(&done, input);
6264
  }
6265 6266
  __ Bind(&done);
  return Just(done.PhiAt(0));
6267 6268
}

6269
Node* EffectControlLinearizer::BuildFloat64RoundDown(Node* value) {
6270 6271
  if (machine()->Float64RoundDown().IsSupported()) {
    return __ Float64RoundDown(value);
6272
  }
6273

6274
  Node* const input = value;
6275

6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300
  // General case for floor.
  //
  //   if 0.0 < input then
  //     if 2^52 <= input then
  //       input
  //     else
  //       let temp1 = (2^52 + input) - 2^52 in
  //       if input < temp1 then
  //         temp1 - 1
  //       else
  //         temp1
  //   else
  //     if input == 0 then
  //       input
  //     else
  //       if input <= -2^52 then
  //         input
  //       else
  //         let temp1 = -0 - input in
  //         let temp2 = (2^52 + temp1) - 2^52 in
  //         if temp2 < temp1 then
  //           -1 - temp2
  //         else
  //           -0 - temp2

6301 6302 6303 6304 6305 6306
  auto if_not_positive = __ MakeDeferredLabel();
  auto if_greater_than_two_52 = __ MakeDeferredLabel();
  auto if_less_than_minus_two_52 = __ MakeDeferredLabel();
  auto if_temp2_lt_temp1 = __ MakeLabel();
  auto if_zero = __ MakeDeferredLabel();
  auto done = __ MakeLabel(MachineRepresentation::kFloat64);
6307 6308 6309 6310 6311

  Node* const zero = __ Float64Constant(0.0);
  Node* const two_52 = __ Float64Constant(4503599627370496.0E0);

  Node* check0 = __ Float64LessThan(zero, input);
6312
  __ GotoIfNot(check0, &if_not_positive);
6313 6314 6315
  {
    Node* check1 = __ Float64LessThanOrEqual(two_52, input);
    __ GotoIf(check1, &if_greater_than_two_52);
6316
    {
6317 6318
      Node* const one = __ Float64Constant(1.0);
      Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
6319
      __ GotoIfNot(__ Float64LessThan(input, temp1), &done, temp1);
6320 6321
      __ Goto(&done, __ Float64Sub(temp1, one));
    }
6322

6323 6324 6325
    __ Bind(&if_greater_than_two_52);
    __ Goto(&done, input);
  }
6326

6327 6328 6329 6330
  __ Bind(&if_not_positive);
  {
    Node* check1 = __ Float64Equal(input, zero);
    __ GotoIf(check1, &if_zero);
6331

6332 6333 6334
    Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
    Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
    __ GotoIf(check2, &if_less_than_minus_two_52);
6335

6336 6337 6338 6339 6340 6341 6342 6343 6344 6345
    {
      Node* const minus_zero = __ Float64Constant(-0.0);
      Node* temp1 = __ Float64Sub(minus_zero, input);
      Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
      Node* check3 = __ Float64LessThan(temp2, temp1);
      __ GotoIf(check3, &if_temp2_lt_temp1);
      __ Goto(&done, __ Float64Sub(minus_zero, temp2));

      __ Bind(&if_temp2_lt_temp1);
      __ Goto(&done, __ Float64Sub(__ Float64Constant(-1.0), temp2));
6346
    }
6347 6348
    __ Bind(&if_less_than_minus_two_52);
    __ Goto(&done, input);
6349

6350 6351
    __ Bind(&if_zero);
    __ Goto(&done, input);
6352
  }
6353 6354
  __ Bind(&done);
  return done.PhiAt(0);
6355 6356
}

6357
Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundDown(Node* node) {
6358 6359
  // Nothing to be done if a fast hardware instruction is available.
  if (machine()->Float64RoundDown().IsSupported()) {
6360
    return Nothing<Node*>();
6361 6362
  }

6363
  Node* const input = node->InputAt(0);
6364
  return Just(BuildFloat64RoundDown(input));
6365 6366
}

6367
Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node) {
6368 6369
  // Nothing to be done if a fast hardware instruction is available.
  if (machine()->Float64RoundTiesEven().IsSupported()) {
6370
    return Nothing<Node*>();
6371 6372
  }

6373 6374
  Node* const input = node->InputAt(0);

6375
  // Generate case for round ties to even:
6376
  //
6377 6378 6379 6380 6381 6382
  //   let value = floor(input) in
  //   let temp1 = input - value in
  //   if temp1 < 0.5 then
  //     value
  //   else if 0.5 < temp1 then
  //     value + 1.0
6383
  //   else
6384 6385 6386
  //     let temp2 = value % 2.0 in
  //     if temp2 == 0.0 then
  //       value
6387
  //     else
6388
  //       value + 1.0
6389

6390 6391
  auto if_is_half = __ MakeLabel();
  auto done = __ MakeLabel(MachineRepresentation::kFloat64);
6392

6393 6394
  Node* value = BuildFloat64RoundDown(input);
  Node* temp1 = __ Float64Sub(input, value);
6395

6396 6397 6398
  Node* const half = __ Float64Constant(0.5);
  Node* check0 = __ Float64LessThan(temp1, half);
  __ GotoIf(check0, &done, value);
6399

6400 6401
  Node* const one = __ Float64Constant(1.0);
  Node* check1 = __ Float64LessThan(half, temp1);
6402
  __ GotoIfNot(check1, &if_is_half);
6403
  __ Goto(&done, __ Float64Add(value, one));
6404

6405 6406 6407 6408 6409
  __ Bind(&if_is_half);
  Node* temp2 = __ Float64Mod(value, __ Float64Constant(2.0));
  Node* check2 = __ Float64Equal(temp2, __ Float64Constant(0.0));
  __ GotoIf(check2, &done, value);
  __ Goto(&done, __ Float64Add(value, one));
6410

6411 6412
  __ Bind(&done);
  return Just(done.PhiAt(0));
6413 6414
}

6415
Node* EffectControlLinearizer::BuildFloat64RoundTruncate(Node* input) {
6416
  if (machine()->Float64RoundTruncate().IsSupported()) {
6417
    return __ Float64RoundTruncate(input);
6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444
  }
  // General case for trunc.
  //
  //   if 0.0 < input then
  //     if 2^52 <= input then
  //       input
  //     else
  //       let temp1 = (2^52 + input) - 2^52 in
  //       if input < temp1 then
  //         temp1 - 1
  //       else
  //         temp1
  //   else
  //     if input == 0 then
  //       input
  //     else
  //       if input <= -2^52 then
  //         input
  //       else
  //         let temp1 = -0 - input in
  //         let temp2 = (2^52 + temp1) - 2^52 in
  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
  //         -0 - temp3
  //
  // Note: We do not use the Diamond helper class here, because it really hurts
  // readability with nested diamonds.

6445 6446 6447 6448 6449 6450
  auto if_not_positive = __ MakeDeferredLabel();
  auto if_greater_than_two_52 = __ MakeDeferredLabel();
  auto if_less_than_minus_two_52 = __ MakeDeferredLabel();
  auto if_zero = __ MakeDeferredLabel();
  auto done_temp3 = __ MakeLabel(MachineRepresentation::kFloat64);
  auto done = __ MakeLabel(MachineRepresentation::kFloat64);
6451

6452 6453 6454
  Node* const zero = __ Float64Constant(0.0);
  Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
  Node* const one = __ Float64Constant(1.0);
6455

6456
  Node* check0 = __ Float64LessThan(zero, input);
6457
  __ GotoIfNot(check0, &if_not_positive);
6458 6459 6460
  {
    Node* check1 = __ Float64LessThanOrEqual(two_52, input);
    __ GotoIf(check1, &if_greater_than_two_52);
6461
    {
6462
      Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
6463
      __ GotoIfNot(__ Float64LessThan(input, temp1), &done, temp1);
6464
      __ Goto(&done, __ Float64Sub(temp1, one));
6465 6466
    }

6467 6468
    __ Bind(&if_greater_than_two_52);
    __ Goto(&done, input);
6469 6470
  }

6471
  __ Bind(&if_not_positive);
6472
  {
6473 6474
    Node* check1 = __ Float64Equal(input, zero);
    __ GotoIf(check1, &if_zero);
6475

6476 6477 6478
    Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
    Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
    __ GotoIf(check2, &if_less_than_minus_two_52);
6479 6480

    {
6481 6482 6483 6484
      Node* const minus_zero = __ Float64Constant(-0.0);
      Node* temp1 = __ Float64Sub(minus_zero, input);
      Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
      Node* check3 = __ Float64LessThan(temp1, temp2);
6485
      __ GotoIfNot(check3, &done_temp3, temp2);
6486 6487 6488 6489 6490
      __ Goto(&done_temp3, __ Float64Sub(temp2, one));

      __ Bind(&done_temp3);
      Node* temp3 = done_temp3.PhiAt(0);
      __ Goto(&done, __ Float64Sub(minus_zero, temp3));
6491
    }
6492 6493
    __ Bind(&if_less_than_minus_two_52);
    __ Goto(&done, input);
6494

6495 6496
    __ Bind(&if_zero);
    __ Goto(&done, input);
6497
  }
6498
  __ Bind(&done);
6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509
  return done.PhiAt(0);
}

Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
  // Nothing to be done if a fast hardware instruction is available.
  if (machine()->Float64RoundTruncate().IsSupported()) {
    return Nothing<Node*>();
  }

  Node* const input = node->InputAt(0);
  return Just(BuildFloat64RoundTruncate(input));
6510 6511
}

6512 6513 6514 6515 6516 6517
Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
  Node* table = NodeProperties::GetValueInput(node, 0);
  Node* key = NodeProperties::GetValueInput(node, 1);

  {
    Callable const callable =
6518
        Builtins::CallableFor(isolate(), Builtin::kFindOrderedHashMapEntry);
6519 6520
    Operator::Properties const properties = node->op()->properties();
    CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
6521
    auto call_descriptor = Linkage::GetStubCallDescriptor(
6522 6523
        graph()->zone(), callable.descriptor(),
        callable.descriptor().GetStackParameterCount(), flags, properties);
6524 6525
    return __ Call(call_descriptor, __ HeapConstant(callable.code()), table,
                   key, __ NoContextConstant());
6526 6527 6528
  }
}

6529 6530
Node* EffectControlLinearizer::ComputeUnseededHash(Node* value) {
  // See v8::internal::ComputeUnseededHash()
6531
  value = __ Int32Add(__ Word32Xor(value, __ Int32Constant(0xFFFFFFFF)),
6532 6533 6534 6535 6536 6537
                      __ Word32Shl(value, __ Int32Constant(15)));
  value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(12)));
  value = __ Int32Add(value, __ Word32Shl(value, __ Int32Constant(2)));
  value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(4)));
  value = __ Int32Mul(value, __ Int32Constant(2057));
  value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(16)));
6538
  value = __ Word32And(value, __ Int32Constant(0x3FFFFFFF));
6539 6540 6541 6542 6543 6544 6545 6546 6547
  return value;
}

Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
    Node* node) {
  Node* table = NodeProperties::GetValueInput(node, 0);
  Node* key = NodeProperties::GetValueInput(node, 1);

  // Compute the integer hash code.
6548
  Node* hash = ChangeUint32ToUintPtr(ComputeUnseededHash(key));
6549

6550
  Node* number_of_buckets = ChangeSmiToIntPtr(__ LoadField(
6551
      AccessBuilder::ForOrderedHashMapOrSetNumberOfBuckets(), table));
6552 6553
  hash = __ WordAnd(hash, __ IntSub(number_of_buckets, __ IntPtrConstant(1)));
  Node* first_entry = ChangeSmiToIntPtr(__ Load(
6554
      MachineType::TaggedSigned(), table,
6555
      __ IntAdd(__ WordShl(hash, __ IntPtrConstant(kTaggedSizeLog2)),
6556
                __ IntPtrConstant(OrderedHashMap::HashTableStartOffset() -
6557 6558 6559
                                  kHeapObjectTag))));

  auto loop = __ MakeLoopLabel(MachineType::PointerRepresentation());
6560
  auto done = __ MakeLabel(MachineType::PointerRepresentation());
6561 6562 6563 6564 6565
  __ Goto(&loop, first_entry);
  __ Bind(&loop);
  {
    Node* entry = loop.PhiAt(0);
    Node* check =
6566
        __ IntPtrEqual(entry, __ IntPtrConstant(OrderedHashMap::kNotFound));
6567
    __ GotoIf(check, &done, entry);
6568
    entry = __ IntAdd(
6569 6570 6571 6572
        __ IntMul(entry, __ IntPtrConstant(OrderedHashMap::kEntrySize)),
        number_of_buckets);

    Node* candidate_key = __ Load(
6573
        MachineType::AnyTagged(), table,
6574
        __ IntAdd(__ WordShl(entry, __ IntPtrConstant(kTaggedSizeLog2)),
6575
                  __ IntPtrConstant(OrderedHashMap::HashTableStartOffset() -
6576 6577 6578 6579
                                    kHeapObjectTag)));

    auto if_match = __ MakeLabel();
    auto if_notmatch = __ MakeLabel();
6580
    auto if_notsmi = __ MakeDeferredLabel();
6581 6582 6583
    __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi);
    __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match,
              &if_notmatch);
6584 6585 6586

    __ Bind(&if_notsmi);
    __ GotoIfNot(
6587 6588
        __ TaggedEqual(__ LoadField(AccessBuilder::ForMap(), candidate_key),
                       __ HeapNumberMapConstant()),
6589 6590 6591 6592 6593
        &if_notmatch);
    __ Branch(__ Float64Equal(__ LoadField(AccessBuilder::ForHeapNumberValue(),
                                           candidate_key),
                              __ ChangeInt32ToFloat64(key)),
              &if_match, &if_notmatch);
6594 6595

    __ Bind(&if_match);
6596
    __ Goto(&done, entry);
6597 6598 6599 6600

    __ Bind(&if_notmatch);
    {
      Node* next_entry = ChangeSmiToIntPtr(__ Load(
6601
          MachineType::TaggedSigned(), table,
6602
          __ IntAdd(
6603
              __ WordShl(entry, __ IntPtrConstant(kTaggedSizeLog2)),
6604
              __ IntPtrConstant(OrderedHashMap::HashTableStartOffset() +
6605
                                OrderedHashMap::kChainOffset * kTaggedSize -
6606 6607 6608 6609 6610 6611 6612 6613 6614
                                kHeapObjectTag))));
      __ Goto(&loop, next_entry);
    }
  }

  __ Bind(&done);
  return done.PhiAt(0);
}

6615 6616 6617 6618 6619 6620 6621 6622 6623 6624
Node* EffectControlLinearizer::LowerDateNow(Node* node) {
  Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
  Runtime::FunctionId id = Runtime::kDateCurrentTime;
  auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
      graph()->zone(), id, 0, properties, CallDescriptor::kNoFlags);
  return __ Call(call_descriptor, __ CEntryStubConstant(1),
                 __ ExternalConstant(ExternalReference::Create(id)),
                 __ Int32Constant(0), __ NoContextConstant());
}

6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639
Node* EffectControlLinearizer::TruncateWordToInt32(Node* value) {
  if (machine()->Is64()) {
    return __ TruncateInt64ToInt32(value);
  }
  return value;
}

Node* EffectControlLinearizer::BuildIsStrongReference(Node* value) {
  return __ Word32Equal(
      __ Word32And(
          TruncateWordToInt32(__ BitcastTaggedToWordForTagAndSmiBits(value)),
          __ Int32Constant(kHeapObjectTagMask)),
      __ Int32Constant(kHeapObjectTag));
}

6640 6641 6642 6643 6644 6645 6646
Node* EffectControlLinearizer::MakeWeakForComparison(Node* heap_object) {
  // TODO(gsathya): Specialize this for pointer compression.
  return __ BitcastWordToTagged(
      __ WordOr(__ BitcastTaggedToWord(heap_object),
                __ IntPtrConstant(kWeakHeapObjectTag)));
}

6647 6648 6649 6650 6651 6652 6653
Node* EffectControlLinearizer::BuildStrongReferenceFromWeakReference(
    Node* maybe_object) {
  return __ BitcastWordToTagged(
      __ WordAnd(__ BitcastMaybeObjectToWord(maybe_object),
                 __ IntPtrConstant(~kWeakHeapObjectMask)));
}

6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668
Node* EffectControlLinearizer::BuildIsWeakReferenceTo(Node* maybe_object,
                                                      Node* value) {
  if (COMPRESS_POINTERS_BOOL) {
    return __ Word32Equal(
        __ Word32And(
            TruncateWordToInt32(__ BitcastMaybeObjectToWord(maybe_object)),
            __ Uint32Constant(~static_cast<uint32_t>(kWeakHeapObjectMask))),
        TruncateWordToInt32(__ BitcastTaggedToWord(value)));
  } else {
    return __ WordEqual(__ WordAnd(__ BitcastMaybeObjectToWord(maybe_object),
                                   __ IntPtrConstant(~kWeakHeapObjectMask)),
                        __ BitcastTaggedToWord(value));
  }
}

6669 6670 6671 6672 6673 6674
Node* EffectControlLinearizer::BuildIsClearedWeakReference(Node* maybe_object) {
  return __ Word32Equal(
      TruncateWordToInt32(__ BitcastMaybeObjectToWord(maybe_object)),
      __ Int32Constant(kClearedWeakHeapObjectLower32));
}

6675 6676 6677 6678 6679 6680
// Pass {bitfield} = {digit} = nullptr to construct the canoncial 0n BigInt.
Node* EffectControlLinearizer::BuildAllocateBigInt(Node* bitfield,
                                                   Node* digit) {
  DCHECK(machine()->Is64());
  DCHECK_EQ(bitfield == nullptr, digit == nullptr);
  static constexpr auto zero_bitfield =
6681
      BigInt::SignBits::update(BigInt::LengthBits::encode(0), false);
6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702

  Node* map = __ HeapConstant(factory()->bigint_map());

  Node* result = __ Allocate(AllocationType::kYoung,
                             __ IntPtrConstant(BigInt::SizeFor(digit ? 1 : 0)));
  __ StoreField(AccessBuilder::ForMap(), result, map);
  __ StoreField(AccessBuilder::ForBigIntBitfield(), result,
                bitfield ? bitfield : __ Int32Constant(zero_bitfield));

  // BigInts have no padding on 64 bit architectures with pointer compression.
  if (BigInt::HasOptionalPadding()) {
    __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result,
                  __ IntPtrConstant(0));
  }
  if (digit) {
    __ StoreField(AccessBuilder::ForBigIntLeastSignificantDigit64(), result,
                  digit);
  }
  return result;
}

6703 6704
#undef __

6705 6706 6707
void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
                            SourcePositionTable* source_positions,
                            NodeOriginTable* node_origins,
6708
                            JSHeapBroker* broker) {
6709
  JSGraphAssembler graph_assembler_(graph, temp_zone);
6710 6711 6712 6713 6714 6715
  EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
                                     temp_zone, source_positions, node_origins,
                                     MaintainSchedule::kDiscard, broker);
  linearizer.Run();
}

6716 6717 6718
}  // namespace compiler
}  // namespace internal
}  // namespace v8