interpreter-assembler.cc 67.7 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/interpreter/interpreter-assembler.h"

7
#include <limits>
8 9
#include <ostream>

10 11 12
#include "src/codegen/code-factory.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/machine-type.h"
13
#include "src/execution/frames.h"
14
#include "src/interpreter/bytecodes.h"
15
#include "src/interpreter/interpreter.h"
16
#include "src/objects/objects-inl.h"
17
#include "src/zone/zone.h"
18 19 20 21 22

namespace v8 {
namespace internal {
namespace interpreter {

23
using compiler::CodeAssemblerState;
24
using compiler::Node;
25 26
template <class T>
using TNode = compiler::TNode<T>;
27

28
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
29 30
                                           Bytecode bytecode,
                                           OperandScale operand_scale)
31
    : CodeStubAssembler(state),
32
      bytecode_(bytecode),
33
      operand_scale_(operand_scale),
34 35 36 37 38 39 40 41 42 43 44 45 46 47
      VARIABLE_CONSTRUCTOR(interpreted_frame_pointer_,
                           MachineType::PointerRepresentation()),
      VARIABLE_CONSTRUCTOR(
          bytecode_array_, MachineRepresentation::kTagged,
          Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
      VARIABLE_CONSTRUCTOR(
          bytecode_offset_, MachineType::PointerRepresentation(),
          Parameter(InterpreterDispatchDescriptor::kBytecodeOffset)),
      VARIABLE_CONSTRUCTOR(
          dispatch_table_, MachineType::PointerRepresentation(),
          Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
      VARIABLE_CONSTRUCTOR(
          accumulator_, MachineRepresentation::kTagged,
          Parameter(InterpreterDispatchDescriptor::kAccumulator)),
48
      accumulator_use_(AccumulatorUse::kNone),
49
      made_call_(false),
50
      reloaded_frame_ptr_(false),
51
      bytecode_array_valid_(true),
52 53
      disable_stack_check_across_call_(false),
      stack_pointer_before_call_(nullptr) {
54 55 56
#ifdef V8_TRACE_IGNITION
  TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
#endif
57 58
  RegisterCallGenerationCallbacks([this] { CallPrologue(); },
                                  [this] { CallEpilogue(); });
59

60
  // Save the bytecode offset immediately if bytecode will make a call along the
61 62
  // critical path, or it is a return bytecode.
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
63
      Bytecodes::Returns(bytecode)) {
64
    SaveBytecodeOffset();
65
  }
66 67
}

68 69 70 71 72
InterpreterAssembler::~InterpreterAssembler() {
  // If the following check fails the handler does not use the
  // accumulator in the way described in the bytecode definitions in
  // bytecodes.h.
  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
73
  UnregisterCallGenerationCallbacks();
74 75
}

76 77 78
Node* InterpreterAssembler::GetInterpretedFramePointer() {
  if (!interpreted_frame_pointer_.IsBound()) {
    interpreted_frame_pointer_.Bind(LoadParentFramePointer());
79 80 81 82
  } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
             !reloaded_frame_ptr_) {
    interpreted_frame_pointer_.Bind(LoadParentFramePointer());
    reloaded_frame_ptr_ = true;
83 84 85 86
  }
  return interpreted_frame_pointer_.value();
}

87 88 89 90
Node* InterpreterAssembler::BytecodeOffset() {
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
      (bytecode_offset_.value() ==
       Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
91
    bytecode_offset_.Bind(ReloadBytecodeOffset());
92 93 94 95
  }
  return bytecode_offset_.value();
}

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
Node* InterpreterAssembler::ReloadBytecodeOffset() {
  Node* offset = LoadAndUntagRegister(Register::bytecode_offset());
  if (operand_scale() != OperandScale::kSingle) {
    // Add one to the offset such that it points to the actual bytecode rather
    // than the Wide / ExtraWide prefix bytecode.
    offset = IntPtrAdd(offset, IntPtrConstant(1));
  }
  return offset;
}

void InterpreterAssembler::SaveBytecodeOffset() {
  Node* offset = BytecodeOffset();
  if (operand_scale() != OperandScale::kSingle) {
    // Subtract one from the offset such that it points to the Wide / ExtraWide
    // prefix bytecode.
    offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
  }
  StoreAndTagRegister(offset, Register::bytecode_offset());
}

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
  // Force a re-load of the bytecode array after every call in case the debugger
  // has been activated.
  if (!bytecode_array_valid_) {
    bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
    bytecode_array_valid_ = true;
  }
  return bytecode_array_.value();
}

Node* InterpreterAssembler::DispatchTableRawPointer() {
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
      (dispatch_table_.value() ==
       Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
    dispatch_table_.Bind(ExternalConstant(
        ExternalReference::interpreter_dispatch_table_address(isolate())));
  }
  return dispatch_table_.value();
}

136 137 138
Node* InterpreterAssembler::GetAccumulatorUnchecked() {
  return accumulator_.value();
}
139

140 141 142
Node* InterpreterAssembler::GetAccumulator() {
  DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
143
  return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
144
}
145

146
void InterpreterAssembler::SetAccumulator(Node* value) {
147 148
  DCHECK(Bytecodes::WritesAccumulator(bytecode_));
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
149 150
  accumulator_.Bind(value);
}
151

152 153 154
Node* InterpreterAssembler::GetContext() {
  return LoadRegister(Register::current_context());
}
155 156 157 158 159

void InterpreterAssembler::SetContext(Node* value) {
  StoreRegister(value, Register::current_context());
}

160 161 162 163 164 165 166 167 168 169 170 171 172
Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
  cur_context.Bind(context);

  Variable cur_depth(this, MachineRepresentation::kWord32);
  cur_depth.Bind(depth);

  Label context_found(this);

  Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
  Label context_search(this, 2, context_search_loop_variables);

  // Fast path if the depth is 0.
173
  Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
174 175

  // Loop until the depth is 0.
176
  BIND(&context_search);
177 178 179
  {
    cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
    cur_context.Bind(
180
        LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
181

182 183
    Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
           &context_search);
184 185
  }

186
  BIND(&context_found);
187 188 189
  return cur_context.value();
}

190 191 192 193 194 195 196 197 198 199 200 201 202 203
void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
                                                              Node* depth,
                                                              Label* target) {
  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
  cur_context.Bind(context);

  Variable cur_depth(this, MachineRepresentation::kWord32);
  cur_depth.Bind(depth);

  Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
  Label context_search(this, 2, context_search_loop_variables);

  // Loop until the depth is 0.
  Goto(&context_search);
204
  BIND(&context_search);
205 206 207 208 209 210
  {
    // TODO(leszeks): We only need to do this check if the context had a sloppy
    // eval, we could pass in a context chain bitmask to figure out which
    // contexts actually need to be checked.

    Node* extension_slot =
211
        LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
212 213 214 215 216 217

    // Jump to the target if the extension slot is not a hole.
    GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);

    cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
    cur_context.Bind(
218
        LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
219 220 221 222 223 224

    GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
           &context_search);
  }
}

225
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
226
  return WordPoisonOnSpeculation(
227
      IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
228 229
}

230 231 232 233
Node* InterpreterAssembler::RegisterLocation(Register reg) {
  return RegisterLocation(IntPtrConstant(reg.ToOperand()));
}

234
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
235
  return TimesSystemPointerSize(index);
236 237
}

238
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
239 240 241
  return LoadFullTagged(GetInterpretedFramePointer(),
                        RegisterFrameOffset(reg_index),
                        LoadSensitivity::kCritical);
242 243
}

244
Node* InterpreterAssembler::LoadRegister(Register reg) {
245 246
  return LoadFullTagged(GetInterpretedFramePointer(),
                        IntPtrConstant(reg.ToOperand() * kSystemPointerSize));
247 248
}

249
Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
250
  return LoadAndUntagSmi(GetInterpretedFramePointer(),
251
                         reg.ToOperand() * kSystemPointerSize);
252 253
}

254
Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
255 256
  return LoadRegister(
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
257 258 259 260 261 262
}

std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex(
    int operand_index) {
  DCHECK_EQ(OperandType::kRegPair,
            Bytecodes::GetOperandType(bytecode_, operand_index));
263 264
  Node* first_reg_index =
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
265 266 267 268 269 270 271 272 273 274 275
  Node* second_reg_index = NextRegister(first_reg_index);
  return std::make_pair(LoadRegister(first_reg_index),
                        LoadRegister(second_reg_index));
}

InterpreterAssembler::RegListNodePair
InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
  DCHECK(Bytecodes::IsRegisterListOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  DCHECK_EQ(OperandType::kRegCount,
            Bytecodes::GetOperandType(bytecode_, operand_index + 1));
276 277
  Node* base_reg = RegisterLocation(
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
278 279 280 281 282 283 284
  Node* reg_count = BytecodeOperandCount(operand_index + 1);
  return RegListNodePair(base_reg, reg_count);
}

Node* InterpreterAssembler::LoadRegisterFromRegisterList(
    const RegListNodePair& reg_list, int index) {
  Node* location = RegisterLocationInRegisterList(reg_list, index);
285
  // Location is already poisoned on speculation, so no need to poison here.
286
  return LoadFullTagged(location);
287 288 289 290 291 292 293 294 295 296 297 298 299
}

Node* InterpreterAssembler::RegisterLocationInRegisterList(
    const RegListNodePair& reg_list, int index) {
  CSA_ASSERT(this,
             Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
  Node* offset = RegisterFrameOffset(IntPtrConstant(index));
  // Register indexes are negative, so subtract index from base location to get
  // location.
  return IntPtrSub(reg_list.base_reg_location(), offset);
}

void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
300 301
  StoreFullTaggedNoWriteBarrier(
      GetInterpretedFramePointer(),
302
      IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value);
303 304
}

305
void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
306 307
  StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(),
                                RegisterFrameOffset(reg_index), value);
308 309
}

310
void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
311
  int offset = reg.ToOperand() * kSystemPointerSize;
312 313 314 315 316
  StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
}

void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
                                                       int operand_index) {
317 318
  StoreRegister(value,
                BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
319 320 321 322 323 324 325
}

void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
                                                           Node* value2,
                                                           int operand_index) {
  DCHECK_EQ(OperandType::kRegOutPair,
            Bytecodes::GetOperandType(bytecode_, operand_index));
326 327
  Node* first_reg_index =
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
328 329 330 331 332 333 334 335 336
  StoreRegister(value1, first_reg_index);
  Node* second_reg_index = NextRegister(first_reg_index);
  StoreRegister(value2, second_reg_index);
}

void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
    Node* value1, Node* value2, Node* value3, int operand_index) {
  DCHECK_EQ(OperandType::kRegOutTriple,
            Bytecodes::GetOperandType(bytecode_, operand_index));
337 338
  Node* first_reg_index =
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
339 340 341 342 343
  StoreRegister(value1, first_reg_index);
  Node* second_reg_index = NextRegister(first_reg_index);
  StoreRegister(value2, second_reg_index);
  Node* third_reg_index = NextRegister(second_reg_index);
  StoreRegister(value3, third_reg_index);
344 345
}

346 347
Node* InterpreterAssembler::NextRegister(Node* reg_index) {
  // Register indexes are negative, so the next index is minus one.
348
  return IntPtrAdd(reg_index, IntPtrConstant(-1));
349 350
}

351 352 353 354 355
Node* InterpreterAssembler::OperandOffset(int operand_index) {
  return IntPtrConstant(
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}

356 357
Node* InterpreterAssembler::BytecodeOperandUnsignedByte(
    int operand_index, LoadSensitivity needs_poisoning) {
358
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
359 360 361 362
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  Node* operand_offset = OperandOffset(operand_index);
  return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
363
              IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
364 365
}

366 367
Node* InterpreterAssembler::BytecodeOperandSignedByte(
    int operand_index, LoadSensitivity needs_poisoning) {
368
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
369 370 371
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  Node* operand_offset = OperandOffset(operand_index);
372
  return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
373
              IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
374 375
}

376 377 378
Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
    int relative_offset, MachineType result_type,
    LoadSensitivity needs_poisoning) {
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
  static const int kMaxCount = 4;
  DCHECK(!TargetSupportsUnalignedAccess());

  int count;
  switch (result_type.representation()) {
    case MachineRepresentation::kWord16:
      count = 2;
      break;
    case MachineRepresentation::kWord32:
      count = 4;
      break;
    default:
      UNREACHABLE();
  }
  MachineType msb_type =
      result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();

396
#if V8_TARGET_LITTLE_ENDIAN
397 398
  const int kStep = -1;
  int msb_offset = count - 1;
399
#elif V8_TARGET_BIG_ENDIAN
400 401
  const int kStep = 1;
  int msb_offset = 0;
402 403 404
#else
#error "Unknown Architecture"
#endif
405 406 407

  // Read the most signicant bytecode into bytes[0] and then in order
  // down to least significant in bytes[count - 1].
408
  DCHECK_LE(count, kMaxCount);
409
  Node* bytes[kMaxCount];
410 411 412 413
  for (int i = 0; i < count; i++) {
    MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
    Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
    Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
414 415
    bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset,
                    needs_poisoning);
416 417 418 419 420 421 422 423 424 425 426 427
  }

  // Pack LSB to MSB.
  Node* result = bytes[--count];
  for (int i = 1; --count >= 0; i++) {
    Node* shift = Int32Constant(i * kBitsPerByte);
    Node* value = Word32Shl(bytes[count], shift);
    result = Word32Or(value, result);
  }
  return result;
}

428 429
Node* InterpreterAssembler::BytecodeOperandUnsignedShort(
    int operand_index, LoadSensitivity needs_poisoning) {
430 431 432 433 434 435 436 437
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
    return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
438 439
                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
                needs_poisoning);
440
  } else {
441 442
    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16(),
                                        needs_poisoning);
443 444 445
  }
}

446 447
Node* InterpreterAssembler::BytecodeOperandSignedShort(
    int operand_index, LoadSensitivity needs_poisoning) {
448
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
449 450 451 452 453
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
454
  if (TargetSupportsUnalignedAccess()) {
455
    return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
456 457
                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
                needs_poisoning);
458
  } else {
459 460
    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16(),
                                        needs_poisoning);
461 462 463
  }
}

464 465
Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(
    int operand_index, LoadSensitivity needs_poisoning) {
466 467 468 469 470 471 472
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
    return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
473 474
                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
                needs_poisoning);
475
  } else {
476 477
    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32(),
                                        needs_poisoning);
478 479 480
  }
}

481 482
Node* InterpreterAssembler::BytecodeOperandSignedQuad(
    int operand_index, LoadSensitivity needs_poisoning) {
483 484 485 486 487 488
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
489
    return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
490 491
                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
                needs_poisoning);
492
  } else {
493 494
    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32(),
                                        needs_poisoning);
495 496 497
  }
}

498 499 500
Node* InterpreterAssembler::BytecodeSignedOperand(
    int operand_index, OperandSize operand_size,
    LoadSensitivity needs_poisoning) {
501 502 503
  DCHECK(!Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
504
    case OperandSize::kByte:
505
      return BytecodeOperandSignedByte(operand_index, needs_poisoning);
506
    case OperandSize::kShort:
507
      return BytecodeOperandSignedShort(operand_index, needs_poisoning);
508
    case OperandSize::kQuad:
509
      return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
510 511 512 513 514 515
    case OperandSize::kNone:
      UNREACHABLE();
  }
  return nullptr;
}

516 517 518
Node* InterpreterAssembler::BytecodeUnsignedOperand(
    int operand_index, OperandSize operand_size,
    LoadSensitivity needs_poisoning) {
519 520 521
  DCHECK(Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
522
    case OperandSize::kByte:
523
      return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
524
    case OperandSize::kShort:
525
      return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
526
    case OperandSize::kQuad:
527
      return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
528 529 530 531 532 533
    case OperandSize::kNone:
      UNREACHABLE();
  }
  return nullptr;
}

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
  DCHECK_EQ(OperandType::kRegCount,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
  DCHECK_EQ(OperandType::kFlag8,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

551 552 553 554 555 556 557 558
Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
  DCHECK_EQ(OperandType::kUImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

559 560 561 562
Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
  return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
}

563
Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
564
  return SmiFromInt32(BytecodeOperandUImm(operand_index));
565 566
}

567 568 569 570 571 572 573 574
Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
  DCHECK_EQ(OperandType::kImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeSignedOperand(operand_index, operand_size);
}

575 576 577 578 579
Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
  return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
}

Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
580
  return SmiFromInt32(BytecodeOperandImm(operand_index));
581 582
}

583
Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
584 585
  DCHECK_EQ(OperandType::kIdx,
            Bytecodes::GetOperandType(bytecode_, operand_index));
586 587
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
588 589 590 591 592
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
  return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
593 594 595 596
}

Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
  return SmiTag(BytecodeOperandIdx(operand_index));
597 598
}

599 600
Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
    int operand_index, LoadSensitivity needs_poisoning) {
601 602 603 604 605
  DCHECK_EQ(OperandType::kIdx,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return ChangeUint32ToWord(
606
      BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
607 608
}

609 610
Node* InterpreterAssembler::BytecodeOperandReg(
    int operand_index, LoadSensitivity needs_poisoning) {
611 612 613 614
  DCHECK(Bytecodes::IsRegisterOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
615
  return ChangeInt32ToIntPtr(
616
      BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
617 618 619
}

Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
620 621
  DCHECK_EQ(OperandType::kRuntimeId,
            Bytecodes::GetOperandType(bytecode_, operand_index));
622 623 624 625
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kShort);
  return BytecodeUnsignedOperand(operand_index, operand_size);
626 627
}

628 629
Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
    int operand_index) {
630 631
  DCHECK_EQ(OperandType::kNativeContextIndex,
            Bytecodes::GetOperandType(bytecode_, operand_index));
632 633 634 635 636 637
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return ChangeUint32ToWord(
      BytecodeUnsignedOperand(operand_index, operand_size));
}

638
Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
639 640
  DCHECK_EQ(OperandType::kIntrinsicId,
            Bytecodes::GetOperandType(bytecode_, operand_index));
641 642 643 644 645 646
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

647
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
648 649
  TNode<FixedArray> constant_pool = CAST(LoadObjectField(
      BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
650 651
  return UnsafeLoadFixedArrayElement(
      constant_pool, UncheckedCast<IntPtrT>(index), LoadSensitivity::kCritical);
652 653
}

654
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
655
  return SmiUntag(LoadConstantPoolEntry(index));
656 657
}

658 659
Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
    int operand_index) {
660 661
  Node* index =
      BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
662 663 664 665 666 667 668 669
  return LoadConstantPoolEntry(index);
}

Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
    int operand_index) {
  return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
}

670
TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() {
671
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
672
  return CodeStubAssembler::LoadFeedbackVector(function);
673 674 675
}

void InterpreterAssembler::CallPrologue() {
676 677 678 679
  if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
    // Bytecodes that make a call along the critical path save the bytecode
    // offset in the bytecode handler's prologue. For other bytecodes, if
    // there are multiple calls in the bytecode handler, you need to spill
680
    // before each of them, unless SaveBytecodeOffset has explicitly been called
681
    // in a path that dominates _all_ of those calls (which we don't track).
682
    SaveBytecodeOffset();
683
  }
684 685

  if (FLAG_debug_code && !disable_stack_check_across_call_) {
686
    DCHECK_NULL(stack_pointer_before_call_);
687 688
    stack_pointer_before_call_ = LoadStackPointer();
  }
689
  bytecode_array_valid_ = false;
690
  made_call_ = true;
691 692 693 694 695 696 697 698
}

void InterpreterAssembler::CallEpilogue() {
  if (FLAG_debug_code && !disable_stack_check_across_call_) {
    Node* stack_pointer_after_call = LoadStackPointer();
    Node* stack_pointer_before_call = stack_pointer_before_call_;
    stack_pointer_before_call_ = nullptr;
    AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
699
                        AbortReason::kUnexpectedStackPointer);
700 701 702
  }
}

703 704
void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
                                              Node* slot_id) {
705
  Comment("increment call count");
706
  TNode<Smi> call_count =
707
      CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize));
708
  // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
709
  // count are used as flags. To increment the call count by 1 we hence
710 711 712
  // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
  Node* new_count = SmiAdd(
      call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
713
  // Count is Smi, so we don't need a write barrier.
714
  StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
715
                          SKIP_WRITE_BARRIER, kTaggedSize);
716 717
}

718 719 720
void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
                                                   Node* feedback_vector,
                                                   Node* slot_id) {
721 722
  Label extra_checks(this, Label::kDeferred), done(this);

723
  // Check if we have monomorphic {target} feedback already.
724 725
  TNode<MaybeObject> feedback =
      LoadFeedbackVectorSlot(feedback_vector, slot_id);
726
  Comment("check if monomorphic");
727
  TNode<BoolT> is_monomorphic = IsWeakReferenceTo(feedback, CAST(target));
728 729 730 731
  GotoIf(is_monomorphic, &done);

  // Check if it is a megamorphic {target}.
  Comment("check if megamorphic");
732 733
  Node* is_megamorphic = WordEqual(
      feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
734
  Branch(is_megamorphic, &done, &extra_checks);
735 736 737

  BIND(&extra_checks);
  {
738
    Label initialize(this), mark_megamorphic(this);
739

740
    Comment("check if weak reference");
741
    Node* is_uninitialized = WordEqual(
742
        feedback,
743 744
        HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
    GotoIf(is_uninitialized, &initialize);
745
    CSA_ASSERT(this, IsWeakOrCleared(feedback));
746

747 748 749
    // If the weak reference is cleared, we have a new chance to become
    // monomorphic.
    Comment("check if weak reference is cleared");
750
    Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
751 752 753

    BIND(&initialize);
    {
754
      // Check if {target} is a JSFunction in the current native context.
755
      Comment("check if function in same native context");
756
      GotoIf(TaggedIsSmi(target), &mark_megamorphic);
757 758
      // Check if the {target} is a JSFunction or JSBoundFunction
      // in the current native context.
759 760
      VARIABLE(var_current, MachineRepresentation::kTagged, target);
      Label loop(this, &var_current), done_loop(this);
761 762 763 764
      Goto(&loop);
      BIND(&loop);
      {
        Label if_boundfunction(this), if_function(this);
765 766 767 768
        Node* current = var_current.value();
        CSA_ASSERT(this, TaggedIsNotSmi(current));
        Node* current_instance_type = LoadInstanceType(current);
        GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
769
               &if_boundfunction);
770
        Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
771 772 773 774
               &if_function, &mark_megamorphic);

        BIND(&if_function);
        {
775
          // Check that the JSFunction {current} is in the current native
776
          // context.
777 778 779 780
          Node* current_context =
              LoadObjectField(current, JSFunction::kContextOffset);
          Node* current_native_context = LoadNativeContext(current_context);
          Branch(WordEqual(LoadNativeContext(context), current_native_context),
781 782 783 784 785 786
                 &done_loop, &mark_megamorphic);
        }

        BIND(&if_boundfunction);
        {
          // Continue with the [[BoundTargetFunction]] of {target}.
787 788
          var_current.Bind(LoadObjectField(
              current, JSBoundFunction::kBoundTargetFunctionOffset));
789 790 791 792
          Goto(&loop);
        }
      }
      BIND(&done_loop);
793 794
      StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
                                         CAST(target));
795
      ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
796 797 798 799 800 801 802 803
      Goto(&done);
    }

    BIND(&mark_megamorphic);
    {
      // MegamorphicSentinel is an immortal immovable object so
      // write-barrier is not needed.
      Comment("transition to megamorphic");
804
      DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
805
      StoreFeedbackVectorSlot(
806 807 808
          feedback_vector, slot_id,
          HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
          SKIP_WRITE_BARRIER);
809 810
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "Call:TransitionMegamorphic");
811 812 813 814 815 816 817
      Goto(&done);
    }
  }

  BIND(&done);
}

818
void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
819
                                               Node* maybe_feedback_vector,
820
                                               Node* slot_id) {
821 822 823 824 825 826
  Label feedback_done(this);
  // If feedback_vector is not valid, then nothing to do.
  GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);

  CSA_SLOW_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));

827
  // Increment the call count.
828
  IncrementCallCount(maybe_feedback_vector, slot_id);
829 830

  // Collect the callable {target} feedback.
831 832 833 834
  CollectCallableFeedback(target, context, maybe_feedback_vector, slot_id);
  Goto(&feedback_done);

  BIND(&feedback_done);
835 836
}

837
void InterpreterAssembler::CallJSAndDispatch(
838
    Node* function, Node* context, const RegListNodePair& args,
839
    ConvertReceiverMode receiver_mode) {
840
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
841 842
  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
         bytecode_ == Bytecode::kInvokeIntrinsic);
843
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
844 845 846 847 848 849 850 851 852 853 854

  Node* args_count;
  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    // The receiver is implied, so it is not in the argument list.
    args_count = args.reg_count();
  } else {
    // Subtract the receiver from the argument count.
    Node* receiver_count = Int32Constant(1);
    args_count = Int32Sub(args.reg_count(), receiver_count);
  }

855
  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
856
      isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
857 858
  Node* code_target = HeapConstant(callable.code());

859
  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
860 861
                                   args_count, args.base_reg_location(),
                                   function);
862 863
  // TailCallStubThenDispatch updates accumulator with result.
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
864 865
}

866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
template <class... TArgs>
void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
                                             Node* arg_count,
                                             ConvertReceiverMode receiver_mode,
                                             TArgs... args) {
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
         bytecode_ == Bytecode::kInvokeIntrinsic);
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
  Callable callable = CodeFactory::Call(isolate());
  Node* code_target = HeapConstant(callable.code());

  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    // The first argument parameter (the receiver) is implied to be undefined.
    TailCallStubThenBytecodeDispatch(
        callable.descriptor(), code_target, context, function, arg_count,
        static_cast<Node*>(UndefinedConstant()), args...);
  } else {
    TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
                                     context, function, arg_count, args...);
  }
  // TailCallStubThenDispatch updates accumulator with result.
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}

// Instantiate CallJSAndDispatch() for argument counts used by interpreter
// generator.
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
    Node* function, Node* context, Node* arg_count,
    ConvertReceiverMode receiver_mode);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
    Node* function, Node* context, Node* arg_count,
    ConvertReceiverMode receiver_mode, Node*);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
    Node* function, Node* context, Node* arg_count,
    ConvertReceiverMode receiver_mode, Node*, Node*);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
    Node* function, Node* context, Node* arg_count,
    ConvertReceiverMode receiver_mode, Node*, Node*, Node*);

void InterpreterAssembler::CallJSWithSpreadAndDispatch(
907
    Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
908
    Node* maybe_feedback_vector) {
909
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
910
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
911
  CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
912
  Comment("call using CallWithSpread builtin");
913
  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
914
      isolate(), ConvertReceiverMode::kAny,
915
      InterpreterPushArgsMode::kWithFinalSpread);
916
  Node* code_target = HeapConstant(callable.code());
917

918 919
  Node* receiver_count = Int32Constant(1);
  Node* args_count = Int32Sub(args.reg_count(), receiver_count);
920
  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
921 922
                                   args_count, args.base_reg_location(),
                                   function);
923 924
  // TailCallStubThenDispatch updates accumulator with result.
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
925 926
}

927
Node* InterpreterAssembler::Construct(Node* target, Node* context,
928 929 930
                                      Node* new_target,
                                      const RegListNodePair& args,
                                      Node* slot_id, Node* feedback_vector) {
931
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
932 933 934 935
  VARIABLE(var_result, MachineRepresentation::kTagged);
  VARIABLE(var_site, MachineRepresentation::kTagged);
  Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
      construct(this), construct_array(this, &var_site);
936
  GotoIf(IsUndefined(feedback_vector), &construct);
937

938 939
  // Increment the call count.
  IncrementCallCount(feedback_vector, slot_id);
940

941
  // Check if we have monomorphic {new_target} feedback already.
942 943 944 945
  TNode<MaybeObject> feedback =
      LoadFeedbackVectorSlot(feedback_vector, slot_id);
  Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
         &extra_checks);
946

947
  BIND(&extra_checks);
948 949 950 951
  {
    Label check_allocation_site(this), check_initialized(this),
        initialize(this), mark_megamorphic(this);

952
    // Check if it is a megamorphic {new_target}..
953
    Comment("check if megamorphic");
954 955
    Node* is_megamorphic = WordEqual(
        feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
956
    GotoIf(is_megamorphic, &construct);
957

958
    Comment("check if weak reference");
959
    GotoIfNot(IsWeakOrCleared(feedback), &check_allocation_site);
960

961 962 963
    // If the weak reference is cleared, we have a new chance to become
    // monomorphic.
    Comment("check if weak reference is cleared");
964
    Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
965

966
    BIND(&check_allocation_site);
967
    {
968 969
      // Check if it is an AllocationSite.
      Comment("check if allocation site");
970
      TNode<HeapObject> strong_feedback = CAST(feedback);
971
      GotoIfNot(IsAllocationSite(strong_feedback), &check_initialized);
972 973 974 975 976 977

      // Make sure that {target} and {new_target} are the Array constructor.
      Node* array_function = LoadContextElement(LoadNativeContext(context),
                                                Context::ARRAY_FUNCTION_INDEX);
      GotoIfNot(WordEqual(target, array_function), &mark_megamorphic);
      GotoIfNot(WordEqual(new_target, array_function), &mark_megamorphic);
978
      var_site.Bind(strong_feedback);
979
      Goto(&construct_array);
980
    }
981

982
    BIND(&check_initialized);
983 984 985
    {
      // Check if it is uninitialized.
      Comment("check if uninitialized");
986
      Node* is_uninitialized =
987
          WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
988 989 990
      Branch(is_uninitialized, &initialize, &mark_megamorphic);
    }

991
    BIND(&initialize);
992
    {
993 994
      Comment("check if function in same native context");
      GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
      // Check if the {new_target} is a JSFunction or JSBoundFunction
      // in the current native context.
      VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
      Label loop(this, &var_current), done_loop(this);
      Goto(&loop);
      BIND(&loop);
      {
        Label if_boundfunction(this), if_function(this);
        Node* current = var_current.value();
        CSA_ASSERT(this, TaggedIsNotSmi(current));
        Node* current_instance_type = LoadInstanceType(current);
        GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
               &if_boundfunction);
        Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
               &if_function, &mark_megamorphic);

        BIND(&if_function);
        {
          // Check that the JSFunction {current} is in the current native
          // context.
          Node* current_context =
              LoadObjectField(current, JSFunction::kContextOffset);
          Node* current_native_context = LoadNativeContext(current_context);
          Branch(WordEqual(LoadNativeContext(context), current_native_context),
                 &done_loop, &mark_megamorphic);
        }

        BIND(&if_boundfunction);
        {
          // Continue with the [[BoundTargetFunction]] of {current}.
          var_current.Bind(LoadObjectField(
              current, JSBoundFunction::kBoundTargetFunctionOffset));
          Goto(&loop);
        }
      }
      BIND(&done_loop);
1031 1032 1033

      // Create an AllocationSite if {target} and {new_target} refer
      // to the current native context's Array constructor.
1034 1035
      Label create_allocation_site(this), store_weak_reference(this);
      GotoIfNot(WordEqual(target, new_target), &store_weak_reference);
1036
      Node* array_function = LoadContextElement(LoadNativeContext(context),
1037 1038
                                                Context::ARRAY_FUNCTION_INDEX);
      Branch(WordEqual(target, array_function), &create_allocation_site,
1039
             &store_weak_reference);
1040

1041
      BIND(&create_allocation_site);
1042
      {
1043 1044
        var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector,
                                                           SmiTag(slot_id)));
1045 1046
        ReportFeedbackUpdate(feedback_vector, slot_id,
                             "Construct:CreateAllocationSite");
1047
        Goto(&construct_array);
1048 1049
      }

1050
      BIND(&store_weak_reference);
1051
      {
1052 1053
        StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
                                           CAST(new_target));
1054
        ReportFeedbackUpdate(feedback_vector, slot_id,
1055
                             "Construct:StoreWeakReference");
1056
        Goto(&construct);
1057 1058 1059
      }
    }

1060
    BIND(&mark_megamorphic);
1061
    {
1062 1063 1064
      // MegamorphicSentinel is an immortal immovable object so
      // write-barrier is not needed.
      Comment("transition to megamorphic");
1065
      DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
1066
      StoreFeedbackVectorSlot(
1067 1068
          feedback_vector, slot_id,
          HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
1069
          SKIP_WRITE_BARRIER);
1070 1071
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "Construct:TransitionMegamorphic");
1072
      Goto(&construct);
1073 1074 1075
    }
  }

1076 1077 1078 1079 1080 1081
  BIND(&construct_array);
  {
    // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
    // constructor feedback collection inside of Ignition.
    Comment("call using ConstructArray builtin");
    Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1082
        isolate(), InterpreterPushArgsMode::kArrayFunction);
1083 1084
    Node* code_target = HeapConstant(callable.code());
    var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1085 1086
                             args.reg_count(), args.base_reg_location(), target,
                             new_target, var_site.value()));
1087 1088 1089 1090
    Goto(&return_result);
  }

  BIND(&construct);
1091
  {
1092
    // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
1093
    Comment("call using Construct builtin");
1094
    Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1095
        isolate(), InterpreterPushArgsMode::kOther);
1096
    Node* code_target = HeapConstant(callable.code());
1097
    var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1098 1099
                             args.reg_count(), args.base_reg_location(), target,
                             new_target, UndefinedConstant()));
1100
    Goto(&return_result);
1101 1102
  }

1103 1104
  BIND(&return_result);
  return var_result.value();
1105 1106
}

1107 1108
Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
                                                Node* new_target,
1109 1110
                                                const RegListNodePair& args,
                                                Node* slot_id,
1111
                                                Node* feedback_vector) {
1112 1113 1114
  // TODO(bmeurer): Unify this with the Construct bytecode feedback
  // above once we have a way to pass the AllocationSite to the Array
  // constructor _and_ spread the last argument at the same time.
1115
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
1116
  Label extra_checks(this, Label::kDeferred), construct(this);
1117
  GotoIf(IsUndefined(feedback_vector), &construct);
1118 1119 1120 1121 1122

  // Increment the call count.
  IncrementCallCount(feedback_vector, slot_id);

  // Check if we have monomorphic {new_target} feedback already.
1123 1124 1125 1126
  TNode<MaybeObject> feedback =
      LoadFeedbackVectorSlot(feedback_vector, slot_id);
  Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
         &extra_checks);
1127 1128 1129 1130 1131 1132 1133

  BIND(&extra_checks);
  {
    Label check_initialized(this), initialize(this), mark_megamorphic(this);

    // Check if it is a megamorphic {new_target}.
    Comment("check if megamorphic");
1134 1135
    Node* is_megamorphic = WordEqual(
        feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
1136 1137
    GotoIf(is_megamorphic, &construct);

1138
    Comment("check if weak reference");
1139
    GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
1140

1141 1142 1143
    // If the weak reference is cleared, we have a new chance to become
    // monomorphic.
    Comment("check if weak reference is cleared");
1144
    Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
1145 1146 1147 1148 1149

    BIND(&check_initialized);
    {
      // Check if it is uninitialized.
      Comment("check if uninitialized");
1150
      Node* is_uninitialized =
1151
          WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
1152 1153 1154 1155 1156 1157 1158
      Branch(is_uninitialized, &initialize, &mark_megamorphic);
    }

    BIND(&initialize);
    {
      Comment("check if function in same native context");
      GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
      // Check if the {new_target} is a JSFunction or JSBoundFunction
      // in the current native context.
      VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
      Label loop(this, &var_current), done_loop(this);
      Goto(&loop);
      BIND(&loop);
      {
        Label if_boundfunction(this), if_function(this);
        Node* current = var_current.value();
        CSA_ASSERT(this, TaggedIsNotSmi(current));
        Node* current_instance_type = LoadInstanceType(current);
        GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
               &if_boundfunction);
        Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
               &if_function, &mark_megamorphic);

        BIND(&if_function);
        {
          // Check that the JSFunction {current} is in the current native
          // context.
          Node* current_context =
              LoadObjectField(current, JSFunction::kContextOffset);
          Node* current_native_context = LoadNativeContext(current_context);
          Branch(WordEqual(LoadNativeContext(context), current_native_context),
                 &done_loop, &mark_megamorphic);
        }
1185

1186 1187 1188 1189 1190 1191 1192 1193 1194
        BIND(&if_boundfunction);
        {
          // Continue with the [[BoundTargetFunction]] of {current}.
          var_current.Bind(LoadObjectField(
              current, JSBoundFunction::kBoundTargetFunctionOffset));
          Goto(&loop);
        }
      }
      BIND(&done_loop);
1195 1196
      StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
                                         CAST(new_target));
1197 1198
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "ConstructWithSpread:Initialize");
1199 1200 1201 1202 1203 1204 1205 1206
      Goto(&construct);
    }

    BIND(&mark_megamorphic);
    {
      // MegamorphicSentinel is an immortal immovable object so
      // write-barrier is not needed.
      Comment("transition to megamorphic");
1207
      DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
1208 1209 1210 1211
      StoreFeedbackVectorSlot(
          feedback_vector, slot_id,
          HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
          SKIP_WRITE_BARRIER);
1212 1213
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "ConstructWithSpread:TransitionMegamorphic");
1214 1215 1216 1217 1218
      Goto(&construct);
    }
  }

  BIND(&construct);
1219
  Comment("call using ConstructWithSpread builtin");
1220
  Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1221
      isolate(), InterpreterPushArgsMode::kWithFinalSpread);
1222
  Node* code_target = HeapConstant(callable.code());
1223
  return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
1224 1225
                  args.base_reg_location(), target, new_target,
                  UndefinedConstant());
1226 1227
}

1228
Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
1229
                                         const RegListNodePair& args,
1230
                                         int result_size) {
1231 1232
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
  DCHECK(Bytecodes::IsCallRuntime(bytecode_));
1233 1234 1235 1236 1237 1238 1239 1240
  Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
  Node* code_target = HeapConstant(callable.code());

  // Get the function entry from the function id.
  Node* function_table = ExternalConstant(
      ExternalReference::runtime_function_table_address(isolate()));
  Node* function_offset =
      Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
1241 1242
  Node* function =
      IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
1243 1244
  Node* function_entry =
      Load(MachineType::Pointer(), function,
1245
           IntPtrConstant(offsetof(Runtime::Function, entry)));
1246

1247
  return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(),
1248 1249
                   result_size, code_target, context, args.reg_count(),
                   args.base_reg_location(), function_entry);
1250 1251
}

1252
void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
1253 1254
  Comment("[ UpdateInterruptBudget");

1255 1256 1257 1258
  // Assert that the weight is positive (negative weights should be implemented
  // as backward updates).
  CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));

1259 1260
  Label load_budget_from_bytecode(this), load_budget_done(this);
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
1261 1262 1263 1264 1265
  TNode<FeedbackCell> feedback_cell =
      CAST(LoadObjectField(function, JSFunction::kFeedbackCellOffset));
  TNode<Int32T> old_budget = LoadObjectField<Int32T>(
      feedback_cell, FeedbackCell::kInterruptBudgetOffset);

1266
  // Make sure we include the current bytecode in the budget calculation.
1267
  TNode<Int32T> budget_after_bytecode =
1268
      Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1269

1270
  Label done(this);
1271
  TVARIABLE(Int32T, new_budget);
1272
  if (backward) {
1273 1274
    // Update budget by |weight| and check if it reaches zero.
    new_budget = Signed(Int32Sub(budget_after_bytecode, weight));
1275 1276
    Node* condition =
        Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1277
    Label ok(this), interrupt_check(this, Label::kDeferred);
1278 1279 1280
    Branch(condition, &ok, &interrupt_check);

    BIND(&interrupt_check);
1281
    CallRuntime(Runtime::kBytecodeBudgetInterrupt, GetContext(), function);
1282
    Goto(&done);
1283 1284

    BIND(&ok);
1285
  } else {
1286 1287
    // For a forward jump, we know we only increase the interrupt budget, so
    // no need to check if it's below zero.
1288
    new_budget = Signed(Int32Add(budget_after_bytecode, weight));
1289
  }
1290 1291

  // Update budget.
1292 1293 1294
  StoreObjectFieldNoWriteBarrier(
      feedback_cell, FeedbackCell::kInterruptBudgetOffset, new_budget.value(),
      MachineRepresentation::kWord32);
1295 1296
  Goto(&done);
  BIND(&done);
1297
  Comment("] UpdateInterruptBudget");
1298 1299
}

1300
Node* InterpreterAssembler::Advance() { return Advance(CurrentBytecodeSize()); }
1301

1302
Node* InterpreterAssembler::Advance(int delta) {
1303
  return Advance(IntPtrConstant(delta));
1304 1305
}

1306
Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
1307 1308 1309
#ifdef V8_TRACE_IGNITION
  TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
#endif
1310 1311
  Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
                               : IntPtrAdd(BytecodeOffset(), delta);
1312 1313
  bytecode_offset_.Bind(next_offset);
  return next_offset;
1314 1315
}

1316
Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
1317 1318
  DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));

1319
  UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
1320
  Node* new_bytecode_offset = Advance(delta, backward);
1321 1322
  Node* target_bytecode = LoadBytecode(new_bytecode_offset);
  return DispatchToBytecode(target_bytecode, new_bytecode_offset);
1323
}
1324

1325 1326 1327 1328 1329 1330
Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }

Node* InterpreterAssembler::JumpBackward(Node* delta) {
  return Jump(delta, true);
}

1331
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
1332
  Label match(this), no_match(this);
1333

1334
  Branch(condition, &match, &no_match);
1335
  BIND(&match);
1336
  Jump(delta);
1337
  BIND(&no_match);
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
  Dispatch();
}

void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
  JumpConditional(WordEqual(lhs, rhs), delta);
}

void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
                                              Node* delta) {
  JumpConditional(WordNotEqual(lhs, rhs), delta);
}

1350
Node* InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
1351 1352
  Node* bytecode =
      Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
1353
  return ChangeUint32ToWord(bytecode);
1354 1355
}

1356 1357 1358
Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
  Label do_inline_star(this), done(this);

1359
  Variable var_bytecode(this, MachineType::PointerRepresentation());
1360 1361 1362 1363
  var_bytecode.Bind(target_bytecode);

  Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
  Node* is_star = WordEqual(target_bytecode, star_bytecode);
1364
  Branch(is_star, &do_inline_star, &done);
1365

1366
  BIND(&do_inline_star);
1367 1368 1369 1370 1371
  {
    InlineStar();
    var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
    Goto(&done);
  }
1372
  BIND(&done);
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
  return var_bytecode.value();
}

void InterpreterAssembler::InlineStar() {
  Bytecode previous_bytecode = bytecode_;
  AccumulatorUse previous_acc_use = accumulator_use_;

  bytecode_ = Bytecode::kStar;
  accumulator_use_ = AccumulatorUse::kNone;

1383 1384 1385
#ifdef V8_TRACE_IGNITION
  TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
#endif
1386 1387
  StoreRegister(GetAccumulator(),
                BytecodeOperandReg(0, LoadSensitivity::kSafe));
1388 1389 1390 1391 1392 1393 1394 1395 1396

  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));

  Advance();
  bytecode_ = previous_bytecode;
  accumulator_use_ = previous_acc_use;
}

Node* InterpreterAssembler::Dispatch() {
1397
  Comment("========= Dispatch");
1398
  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1399 1400 1401 1402 1403
  Node* target_offset = Advance();
  Node* target_bytecode = LoadBytecode(target_offset);

  if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
    target_bytecode = StarDispatchLookahead(target_bytecode);
1404
  }
1405 1406
  return DispatchToBytecode(target_bytecode, BytecodeOffset());
}
1407

1408 1409
Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
                                               Node* new_bytecode_offset) {
1410 1411 1412 1413
  if (FLAG_trace_ignition_dispatches) {
    TraceBytecodeDispatch(target_bytecode);
  }

1414
  Node* target_code_entry =
1415
      Load(MachineType::Pointer(), DispatchTableRawPointer(),
1416
           TimesSystemPointerSize(target_bytecode));
1417

1418 1419
  return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
                                        target_bytecode);
1420 1421
}

1422
Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
1423 1424
                                                      Node* bytecode_offset,
                                                      Node* target_bytecode) {
1425
  // TODO(ishell): Add CSA::CodeEntryPoint(code).
1426
  Node* handler_entry =
1427 1428
      IntPtrAdd(BitcastTaggedToWord(handler),
                IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
1429 1430
  return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
                                        target_bytecode);
1431 1432
}

1433
Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1434 1435
    Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
  // Propagate speculation poisoning.
1436
  Node* poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
1437
  return TailCallBytecodeDispatch(
1438 1439 1440
      InterpreterDispatchDescriptor{}, poisoned_handler_entry,
      GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(),
      DispatchTableRawPointer());
1441 1442
}

1443 1444 1445 1446 1447 1448 1449 1450
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
  // Dispatching a wide bytecode requires treating the prefix
  // bytecode a base pointer into the dispatch table and dispatching
  // the bytecode that follows relative to this base.
  //
  //   Indices 0-255 correspond to bytecodes with operand_scale == 0
  //   Indices 256-511 correspond to bytecodes with operand_scale == 1
  //   Indices 512-767 correspond to bytecodes with operand_scale == 2
1451
  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1452
  Node* next_bytecode_offset = Advance(1);
1453
  Node* next_bytecode = LoadBytecode(next_bytecode_offset);
1454 1455 1456 1457 1458

  if (FLAG_trace_ignition_dispatches) {
    TraceBytecodeDispatch(next_bytecode);
  }

1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
  Node* base_index;
  switch (operand_scale) {
    case OperandScale::kDouble:
      base_index = IntPtrConstant(1 << kBitsPerByte);
      break;
    case OperandScale::kQuadruple:
      base_index = IntPtrConstant(2 << kBitsPerByte);
      break;
    default:
      UNREACHABLE();
  }
  Node* target_index = IntPtrAdd(base_index, next_bytecode);
1471
  Node* target_code_entry =
1472
      Load(MachineType::Pointer(), DispatchTableRawPointer(),
1473
           TimesSystemPointerSize(target_index));
1474

1475 1476
  DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
                                 next_bytecode);
1477 1478
}

1479
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1480 1481 1482
  // TODO(rmcilroy): Investigate whether it is worth supporting self
  // optimization of primitive functions like FullCodegen.

1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
  // Update profiling count by the number of bytes between the end of the
  // current bytecode and the start of the first one, to simulate backedge to
  // start of function.
  //
  // With headers and current offset, the bytecode array layout looks like:
  //
  //           <---------- simulated backedge ----------
  // | header | first bytecode | .... | return bytecode |
  //  |<------ current offset ------->
  //  ^ tagged bytecode array pointer
  //
  // UpdateInterruptBudget already handles adding the bytecode size to the
  // length of the back-edge, so we just have to correct for the non-zero offset
  // of the first bytecode.

  const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
1499
  Node* profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
1500 1501
                                    Int32Constant(kFirstBytecodeOffset));
  UpdateInterruptBudget(profiling_weight, true);
1502 1503
}

1504
Node* InterpreterAssembler::LoadOsrNestingLevel() {
1505
  return LoadObjectField(BytecodeArrayTaggedPointer(),
1506
                         BytecodeArray::kOsrNestingLevelOffset,
1507
                         MachineType::Int8());
1508 1509
}

1510
void InterpreterAssembler::Abort(AbortReason abort_reason) {
1511
  disable_stack_check_across_call_ = true;
1512
  Node* abort_id = SmiConstant(abort_reason);
1513
  CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1514 1515 1516 1517
  disable_stack_check_across_call_ = false;
}

void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
1518
                                               AbortReason abort_reason) {
1519
  Label ok(this), abort(this, Label::kDeferred);
1520
  Branch(WordEqual(lhs, rhs), &ok, &abort);
1521

1522
  BIND(&abort);
1523
  Abort(abort_reason);
1524 1525
  Goto(&ok);

1526
  BIND(&ok);
1527 1528
}

1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
void InterpreterAssembler::MaybeDropFrames(Node* context) {
  Node* restart_fp_address =
      ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));

  Node* restart_fp = Load(MachineType::Pointer(), restart_fp_address);
  Node* null = IntPtrConstant(0);

  Label ok(this), drop_frames(this);
  Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);

1539
  BIND(&drop_frames);
1540 1541 1542
  // We don't expect this call to return since the frame dropper tears down
  // the stack and jumps into the function on the target frame to restart it.
  CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
1543
  Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
1544 1545
  Goto(&ok);

1546
  BIND(&ok);
1547 1548
}

1549 1550
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
  CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1551
              SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1552 1553
}

1554 1555 1556 1557 1558 1559
void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
  Node* counters_table = ExternalConstant(
      ExternalReference::interpreter_dispatch_counters(isolate()));
  Node* source_bytecode_table_index = IntPtrConstant(
      static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));

1560 1561
  Node* counter_offset = TimesSystemPointerSize(
      IntPtrAdd(source_bytecode_table_index, target_bytecode));
1562 1563 1564
  Node* old_counter =
      Load(MachineType::IntPtr(), counters_table, counter_offset);

1565
  Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1566 1567 1568

  Node* counter_reached_max = WordEqual(
      old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1569
  Branch(counter_reached_max, &counter_saturated, &counter_ok);
1570

1571
  BIND(&counter_ok);
1572 1573 1574 1575 1576 1577 1578
  {
    Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
    StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
                        counter_offset, new_counter);
    Goto(&counter_saturated);
  }

1579
  BIND(&counter_saturated);
1580 1581
}

1582 1583 1584 1585
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
  return false;
Jakob Kummerow's avatar
Jakob Kummerow committed
1586 1587
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
    V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
1588 1589 1590 1591 1592 1593
  return true;
#else
#error "Unknown Architecture"
#endif
}

1594 1595 1596 1597
void InterpreterAssembler::AbortIfRegisterCountInvalid(
    Node* parameters_and_registers, Node* formal_parameter_count,
    Node* register_count) {
  Node* array_size = LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1598 1599

  Label ok(this), abort(this, Label::kDeferred);
1600 1601 1602
  Branch(UintPtrLessThanOrEqual(
             IntPtrAdd(formal_parameter_count, register_count), array_size),
         &ok, &abort);
1603 1604

  BIND(&abort);
1605
  Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1606 1607 1608
  Goto(&ok);

  BIND(&ok);
1609 1610
}

1611
Node* InterpreterAssembler::ExportParametersAndRegisterFile(
1612 1613
    TNode<FixedArray> array, const RegListNodePair& registers,
    TNode<Int32T> formal_parameter_count) {
1614 1615
  // Store the formal parameters (without receiver) followed by the
  // registers into the generator's internal parameters_and_registers field.
1616 1617
  TNode<IntPtrT> formal_parameter_count_intptr =
      ChangeInt32ToIntPtr(formal_parameter_count);
1618
  Node* register_count = ChangeUint32ToWord(registers.reg_count());
1619
  if (FLAG_debug_code) {
1620 1621
    CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
                                 RegisterLocation(Register(0))));
1622 1623
    AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
                                register_count);
1624
  }
1625 1626

  {
1627 1628
    Variable var_index(this, MachineType::PointerRepresentation());
    var_index.Bind(IntPtrConstant(0));
1629

1630 1631
    // Iterate over parameters and write them into the array.
    Label loop(this, &var_index), done_loop(this);
1632

1633 1634
    Node* reg_base = IntPtrAdd(
        IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
1635
        formal_parameter_count_intptr);
1636 1637

    Goto(&loop);
1638 1639 1640
    BIND(&loop);
    {
      Node* index = var_index.value();
1641 1642
      GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
                &done_loop);
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672

      Node* reg_index = IntPtrSub(reg_base, index);
      Node* value = LoadRegister(reg_index);

      StoreFixedArrayElement(array, index, value);

      var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
      Goto(&loop);
    }
    BIND(&done_loop);
  }

  {
    // Iterate over register file and write values into array.
    // The mapping of register to array index must match that used in
    // BytecodeGraphBuilder::VisitResumeGenerator.
    Variable var_index(this, MachineType::PointerRepresentation());
    var_index.Bind(IntPtrConstant(0));

    Label loop(this, &var_index), done_loop(this);
    Goto(&loop);
    BIND(&loop);
    {
      Node* index = var_index.value();
      GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);

      Node* reg_index =
          IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
      Node* value = LoadRegister(reg_index);

1673
      Node* array_index = IntPtrAdd(formal_parameter_count_intptr, index);
1674 1675 1676 1677 1678 1679
      StoreFixedArrayElement(array, array_index, value);

      var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
      Goto(&loop);
    }
    BIND(&done_loop);
1680 1681 1682 1683 1684
  }

  return array;
}

1685 1686 1687 1688 1689 1690
Node* InterpreterAssembler::ImportRegisterFile(
    TNode<FixedArray> array, const RegListNodePair& registers,
    TNode<Int32T> formal_parameter_count) {
  TNode<IntPtrT> formal_parameter_count_intptr =
      ChangeInt32ToIntPtr(formal_parameter_count);
  TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1691
  if (FLAG_debug_code) {
1692 1693
    CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
                                 RegisterLocation(Register(0))));
1694 1695
    AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
                                register_count);
1696
  }
1697

1698
  TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1699

1700 1701
  // Iterate over array and write values into register file.  Also erase the
  // array contents to not keep them alive artificially.
1702 1703
  Label loop(this, &var_index), done_loop(this);
  Goto(&loop);
1704
  BIND(&loop);
1705
  {
1706
    TNode<IntPtrT> index = var_index.value();
1707
    GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1708

1709 1710 1711
    TNode<IntPtrT> array_index =
        IntPtrAdd(formal_parameter_count_intptr, index);
    TNode<Object> value = LoadFixedArrayElement(array, array_index);
1712

1713 1714
    TNode<IntPtrT> reg_index =
        IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1715
    StoreRegister(value, reg_index);
1716

1717
    StoreFixedArrayElement(array, array_index,
1718
                           LoadRoot(RootIndex::kStaleRegister));
1719

1720
    var_index = IntPtrAdd(index, IntPtrConstant(1));
1721 1722
    Goto(&loop);
  }
1723
  BIND(&done_loop);
1724 1725 1726 1727

  return array;
}

1728 1729 1730 1731
int InterpreterAssembler::CurrentBytecodeSize() const {
  return Bytecodes::Size(bytecode_, operand_scale_);
}

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
  Node* object = GetAccumulator();
  Node* context = GetContext();

  Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
  Variable var_result(this, MachineRepresentation::kTagged);
  Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
      if_objectisother(this, Label::kDeferred);

  GotoIf(TaggedIsSmi(object), &if_objectissmi);
  Branch(IsHeapNumber(object), &if_objectisheapnumber, &if_objectisother);

  BIND(&if_objectissmi);
  {
    var_result.Bind(object);
    var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
    Goto(&if_done);
  }

  BIND(&if_objectisheapnumber);
  {
    var_result.Bind(object);
    var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
    Goto(&if_done);
  }

  BIND(&if_objectisother);
  {
    auto builtin = Builtins::kNonNumberToNumber;
    if (mode == Object::Conversion::kToNumeric) {
      builtin = Builtins::kNonNumberToNumeric;
      // Special case for collecting BigInt feedback.
      Label not_bigint(this);
      GotoIfNot(IsBigInt(object), &not_bigint);
      {
        var_result.Bind(object);
        var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
        Goto(&if_done);
      }
      BIND(&not_bigint);
    }

    // Convert {object} by calling out to the appropriate builtin.
    var_result.Bind(CallBuiltin(builtin, context, object));
    var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
    Goto(&if_done);
  }

  BIND(&if_done);

  // Record the type feedback collected for {object}.
  Node* slot_index = BytecodeOperandIdx(0);
1784
  Node* maybe_feedback_vector = LoadFeedbackVector();
1785 1786

  UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
1787 1788 1789 1790 1791

  SetAccumulator(var_result.value());
  Dispatch();
}

1792 1793 1794
}  // namespace interpreter
}  // namespace internal
}  // namespace v8