interpreter-assembler.cc 59 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/interpreter/interpreter-assembler.h"

7
#include <limits>
8 9
#include <ostream>

10 11 12
#include "src/codegen/code-factory.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/machine-type.h"
13
#include "src/execution/frames.h"
14
#include "src/interpreter/bytecodes.h"
15
#include "src/interpreter/interpreter.h"
16
#include "src/objects/objects-inl.h"
17
#include "src/zone/zone.h"
18 19 20 21 22

namespace v8 {
namespace internal {
namespace interpreter {

23
using compiler::CodeAssemblerState;
24 25
using compiler::Node;

26
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
27 28
                                           Bytecode bytecode,
                                           OperandScale operand_scale)
29
    : CodeStubAssembler(state),
30
      bytecode_(bytecode),
31
      operand_scale_(operand_scale),
32
      TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
33 34 35
      TVARIABLE_CONSTRUCTOR(
          bytecode_array_,
          CAST(Parameter(InterpreterDispatchDescriptor::kBytecodeArray))),
36 37 38 39
      TVARIABLE_CONSTRUCTOR(
          bytecode_offset_,
          UncheckedCast<IntPtrT>(
              Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))),
40 41 42 43 44 45
      TVARIABLE_CONSTRUCTOR(
          dispatch_table_, UncheckedCast<ExternalReference>(Parameter(
                               InterpreterDispatchDescriptor::kDispatchTable))),
      TVARIABLE_CONSTRUCTOR(
          accumulator_,
          CAST(Parameter(InterpreterDispatchDescriptor::kAccumulator))),
46
      accumulator_use_(AccumulatorUse::kNone),
47
      made_call_(false),
48
      reloaded_frame_ptr_(false),
49
      bytecode_array_valid_(true) {
50 51 52
#ifdef V8_TRACE_IGNITION
  TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
#endif
53 54
  RegisterCallGenerationCallbacks([this] { CallPrologue(); },
                                  [this] { CallEpilogue(); });
55

56 57
  // Save the bytecode offset immediately if bytecode will make a call along
  // the critical path, or it is a return bytecode.
58
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
59
      Bytecodes::Returns(bytecode)) {
60
    SaveBytecodeOffset();
61
  }
62 63
}

64 65 66 67 68
InterpreterAssembler::~InterpreterAssembler() {
  // If the following check fails the handler does not use the
  // accumulator in the way described in the bytecode definitions in
  // bytecodes.h.
  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
69
  UnregisterCallGenerationCallbacks();
70 71
}

72
TNode<RawPtrT> InterpreterAssembler::GetInterpretedFramePointer() {
73
  if (!interpreted_frame_pointer_.IsBound()) {
74
    interpreted_frame_pointer_ = LoadParentFramePointer();
75 76
  } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
             !reloaded_frame_ptr_) {
77
    interpreted_frame_pointer_ = LoadParentFramePointer();
78
    reloaded_frame_ptr_ = true;
79 80 81 82
  }
  return interpreted_frame_pointer_.value();
}

83
TNode<IntPtrT> InterpreterAssembler::BytecodeOffset() {
84 85 86
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
      (bytecode_offset_.value() ==
       Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
87
    bytecode_offset_ = ReloadBytecodeOffset();
88 89 90 91
  }
  return bytecode_offset_.value();
}

92
TNode<IntPtrT> InterpreterAssembler::ReloadBytecodeOffset() {
93
  TNode<IntPtrT> offset = LoadAndUntagRegister(Register::bytecode_offset());
94 95 96 97 98 99 100 101 102
  if (operand_scale() != OperandScale::kSingle) {
    // Add one to the offset such that it points to the actual bytecode rather
    // than the Wide / ExtraWide prefix bytecode.
    offset = IntPtrAdd(offset, IntPtrConstant(1));
  }
  return offset;
}

void InterpreterAssembler::SaveBytecodeOffset() {
103
  TNode<IntPtrT> bytecode_offset = BytecodeOffset();
104
  if (operand_scale() != OperandScale::kSingle) {
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
    // Subtract one from the bytecode_offset such that it points to the Wide /
    // ExtraWide prefix bytecode.
    bytecode_offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
  }
  int store_offset =
      Register::bytecode_offset().ToOperand() * kSystemPointerSize;
  TNode<RawPtrT> base = GetInterpretedFramePointer();

  if (SmiValuesAre32Bits()) {
    int zero_offset = store_offset + 4;
    int payload_offset = store_offset;
#if V8_TARGET_LITTLE_ENDIAN
    std::swap(zero_offset, payload_offset);
#endif
    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
                        IntPtrConstant(zero_offset), Int32Constant(0));
    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
                        IntPtrConstant(payload_offset),
                        TruncateIntPtrToInt32(bytecode_offset));
  } else {
125 126
    StoreFullTaggedNoWriteBarrier(base, IntPtrConstant(store_offset),
                                  SmiTag(bytecode_offset));
127 128 129
  }
}

130
TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() {
131 132 133
  // Force a re-load of the bytecode array after every call in case the debugger
  // has been activated.
  if (!bytecode_array_valid_) {
134
    bytecode_array_ = CAST(LoadRegister(Register::bytecode_array()));
135 136 137 138 139
    bytecode_array_valid_ = true;
  }
  return bytecode_array_.value();
}

140
TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() {
141 142 143
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
      (dispatch_table_.value() ==
       Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
144 145
    dispatch_table_ = ExternalConstant(
        ExternalReference::interpreter_dispatch_table_address(isolate()));
146 147 148 149
  }
  return dispatch_table_.value();
}

150
TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() {
151 152
  return accumulator_.value();
}
153

154
TNode<Object> InterpreterAssembler::GetAccumulator() {
155 156
  DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
157
  return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
158
}
159

160
void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
161 162
  DCHECK(Bytecodes::WritesAccumulator(bytecode_));
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
163
  accumulator_ = value;
164
}
165

166 167
TNode<Context> InterpreterAssembler::GetContext() {
  return CAST(LoadRegister(Register::current_context()));
168
}
169

170
void InterpreterAssembler::SetContext(TNode<Context> value) {
171 172 173
  StoreRegister(value, Register::current_context());
}

174 175
TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
                                                       TNode<Uint32T> depth) {
176 177
  TVARIABLE(Context, cur_context, context);
  TVARIABLE(Uint32T, cur_depth, depth);
178 179 180

  Label context_found(this);

181
  Label context_search(this, {&cur_depth, &cur_context});
182 183

  // Fast path if the depth is 0.
184
  Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
185 186

  // Loop until the depth is 0.
187
  BIND(&context_search);
188
  {
189 190 191
    cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
    cur_context =
        CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
192

193 194
    Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
           &context_search);
195 196
  }

197
  BIND(&context_found);
198 199 200
  return cur_context.value();
}

201 202 203 204
void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(
    TNode<Context> context, TNode<Uint32T> depth, Label* target) {
  TVARIABLE(Context, cur_context, context);
  TVARIABLE(Uint32T, cur_depth, depth);
205

206
  Label context_search(this, {&cur_depth, &cur_context});
207
  Label no_extension(this);
208 209 210

  // Loop until the depth is 0.
  Goto(&context_search);
211
  BIND(&context_search);
212
  {
213
    // Check if context has an extension slot.
214
    TNode<BoolT> has_extension =
215
        LoadScopeInfoHasExtensionField(LoadScopeInfo(cur_context.value()));
216
    GotoIfNot(has_extension, &no_extension);
217

218
    // Jump to the target if the extension slot is not an undefined value.
219
    TNode<Object> extension_slot =
220
        LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
221
    Branch(TaggedNotEqual(extension_slot, UndefinedConstant()), target,
222 223 224 225 226 227 228 229 230 231 232
           &no_extension);

    BIND(&no_extension);
    {
      cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
      cur_context = CAST(
          LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));

      GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
             &context_search);
    }
233 234 235
  }
}

236 237
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
    TNode<IntPtrT> reg_index) {
238 239
  return Signed(WordPoisonOnSpeculation(
      IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))));
240 241
}

242
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
243 244 245
  return RegisterLocation(IntPtrConstant(reg.ToOperand()));
}

246 247
TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
  return TimesSystemPointerSize(index);
248 249
}

250
TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
251 252 253
  return LoadFullTagged(GetInterpretedFramePointer(),
                        RegisterFrameOffset(reg_index),
                        LoadSensitivity::kCritical);
254 255
}

256
TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
257 258
  return LoadFullTagged(GetInterpretedFramePointer(),
                        IntPtrConstant(reg.ToOperand() * kSystemPointerSize));
259 260
}

261
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
262 263 264 265 266 267
  TNode<RawPtrT> base = GetInterpretedFramePointer();
  int index = reg.ToOperand() * kSystemPointerSize;
  if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
    index += 4;
#endif
268
    return ChangeInt32ToIntPtr(Load<Int32T>(base, IntPtrConstant(index)));
269
  } else {
270
    return SmiToIntPtr(CAST(LoadFullTagged(base, IntPtrConstant(index))));
271
  }
272 273
}

274 275
TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
    int operand_index) {
276 277
  return LoadRegister(
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
278 279
}

280 281
std::pair<TNode<Object>, TNode<Object>>
InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
282 283
  DCHECK_EQ(OperandType::kRegPair,
            Bytecodes::GetOperandType(bytecode_, operand_index));
284
  TNode<IntPtrT> first_reg_index =
285
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
286
  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
287 288 289 290 291 292 293 294 295 296
  return std::make_pair(LoadRegister(first_reg_index),
                        LoadRegister(second_reg_index));
}

InterpreterAssembler::RegListNodePair
InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
  DCHECK(Bytecodes::IsRegisterListOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  DCHECK_EQ(OperandType::kRegCount,
            Bytecodes::GetOperandType(bytecode_, operand_index + 1));
297
  TNode<IntPtrT> base_reg = RegisterLocation(
298
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
299
  TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
300 301 302
  return RegListNodePair(base_reg, reg_count);
}

303
TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
304
    const RegListNodePair& reg_list, int index) {
305
  TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
306
  // Location is already poisoned on speculation, so no need to poison here.
307
  return LoadFullTagged(location);
308 309
}

310
TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
311 312 313
    const RegListNodePair& reg_list, int index) {
  CSA_ASSERT(this,
             Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
314
  TNode<IntPtrT> offset = RegisterFrameOffset(IntPtrConstant(index));
315 316
  // Register indexes are negative, so subtract index from base location to get
  // location.
317
  return Signed(IntPtrSub(reg_list.base_reg_location(), offset));
318 319
}

320
void InterpreterAssembler::StoreRegister(TNode<Object> value, Register reg) {
321 322
  StoreFullTaggedNoWriteBarrier(
      GetInterpretedFramePointer(),
323
      IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value);
324 325
}

326 327
void InterpreterAssembler::StoreRegister(TNode<Object> value,
                                         TNode<IntPtrT> reg_index) {
328 329
  StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(),
                                RegisterFrameOffset(reg_index), value);
330 331
}

332
void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
333
                                                       int operand_index) {
334 335
  StoreRegister(value,
                BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
336 337
}

338 339
void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
                                                           TNode<Object> value2,
340 341 342
                                                           int operand_index) {
  DCHECK_EQ(OperandType::kRegOutPair,
            Bytecodes::GetOperandType(bytecode_, operand_index));
343
  TNode<IntPtrT> first_reg_index =
344
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
345
  StoreRegister(value1, first_reg_index);
346
  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
347 348 349 350
  StoreRegister(value2, second_reg_index);
}

void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
351 352
    TNode<Object> value1, TNode<Object> value2, TNode<Object> value3,
    int operand_index) {
353 354
  DCHECK_EQ(OperandType::kRegOutTriple,
            Bytecodes::GetOperandType(bytecode_, operand_index));
355
  TNode<IntPtrT> first_reg_index =
356
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
357
  StoreRegister(value1, first_reg_index);
358
  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
359
  StoreRegister(value2, second_reg_index);
360
  TNode<IntPtrT> third_reg_index = NextRegister(second_reg_index);
361
  StoreRegister(value3, third_reg_index);
362 363
}

364
TNode<IntPtrT> InterpreterAssembler::NextRegister(TNode<IntPtrT> reg_index) {
365
  // Register indexes are negative, so the next index is minus one.
366
  return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1)));
367 368
}

369
TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
370 371 372 373
  return IntPtrConstant(
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}

374
TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
375
    int operand_index, LoadSensitivity needs_poisoning) {
376
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
377 378
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
379
  TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
380 381 382
  return Load<Uint8T>(BytecodeArrayTaggedPointer(),
                      IntPtrAdd(BytecodeOffset(), operand_offset),
                      needs_poisoning);
383 384
}

385
TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
386
    int operand_index, LoadSensitivity needs_poisoning) {
387
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
388 389
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
390
  TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
391 392 393
  return Load<Int8T>(BytecodeArrayTaggedPointer(),
                     IntPtrAdd(BytecodeOffset(), operand_offset),
                     needs_poisoning);
394 395
}

396
TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
397 398
    int relative_offset, MachineType result_type,
    LoadSensitivity needs_poisoning) {
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
  static const int kMaxCount = 4;
  DCHECK(!TargetSupportsUnalignedAccess());

  int count;
  switch (result_type.representation()) {
    case MachineRepresentation::kWord16:
      count = 2;
      break;
    case MachineRepresentation::kWord32:
      count = 4;
      break;
    default:
      UNREACHABLE();
  }
  MachineType msb_type =
      result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();

416
#if V8_TARGET_LITTLE_ENDIAN
417 418
  const int kStep = -1;
  int msb_offset = count - 1;
419
#elif V8_TARGET_BIG_ENDIAN
420 421
  const int kStep = 1;
  int msb_offset = 0;
422 423 424
#else
#error "Unknown Architecture"
#endif
425 426 427

  // Read the most signicant bytecode into bytes[0] and then in order
  // down to least significant in bytes[count - 1].
428
  DCHECK_LE(count, kMaxCount);
429
  TNode<Word32T> bytes[kMaxCount];
430 431
  for (int i = 0; i < count; i++) {
    MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
432 433
    TNode<IntPtrT> offset =
        IntPtrConstant(relative_offset + msb_offset + i * kStep);
434
    TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
435 436 437
    bytes[i] =
        UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(),
                                    array_offset, needs_poisoning));
438 439 440
  }

  // Pack LSB to MSB.
441
  TNode<Word32T> result = bytes[--count];
442
  for (int i = 1; --count >= 0; i++) {
443 444
    TNode<Int32T> shift = Int32Constant(i * kBitsPerByte);
    TNode<Word32T> value = Word32Shl(bytes[count], shift);
445 446 447 448 449
    result = Word32Or(value, result);
  }
  return result;
}

450
TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
451
    int operand_index, LoadSensitivity needs_poisoning) {
452 453 454 455 456 457 458
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
459 460 461 462
    return Load<Uint16T>(
        BytecodeArrayTaggedPointer(),
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
        needs_poisoning);
463
  } else {
464 465
    return UncheckedCast<Uint16T>(BytecodeOperandReadUnaligned(
        operand_offset, MachineType::Uint16(), needs_poisoning));
466 467 468
  }
}

469
TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
470
    int operand_index, LoadSensitivity needs_poisoning) {
471
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
472 473 474 475 476
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
477
  if (TargetSupportsUnalignedAccess()) {
478 479 480 481
    return Load<Int16T>(
        BytecodeArrayTaggedPointer(),
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
        needs_poisoning);
482
  } else {
483 484
    return UncheckedCast<Int16T>(BytecodeOperandReadUnaligned(
        operand_offset, MachineType::Int16(), needs_poisoning));
485 486 487
  }
}

488
TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
489
    int operand_index, LoadSensitivity needs_poisoning) {
490 491 492 493 494 495
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
496 497 498 499
    return Load<Uint32T>(
        BytecodeArrayTaggedPointer(),
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
        needs_poisoning);
500
  } else {
501 502
    return UncheckedCast<Uint32T>(BytecodeOperandReadUnaligned(
        operand_offset, MachineType::Uint32(), needs_poisoning));
503 504 505
  }
}

506
TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
507
    int operand_index, LoadSensitivity needs_poisoning) {
508 509 510 511 512 513
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
514 515 516 517
    return Load<Int32T>(
        BytecodeArrayTaggedPointer(),
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
        needs_poisoning);
518
  } else {
519 520
    return UncheckedCast<Int32T>(BytecodeOperandReadUnaligned(
        operand_offset, MachineType::Int32(), needs_poisoning));
521 522 523
  }
}

524
TNode<Int32T> InterpreterAssembler::BytecodeSignedOperand(
525 526
    int operand_index, OperandSize operand_size,
    LoadSensitivity needs_poisoning) {
527 528 529
  DCHECK(!Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
530
    case OperandSize::kByte:
531
      return BytecodeOperandSignedByte(operand_index, needs_poisoning);
532
    case OperandSize::kShort:
533
      return BytecodeOperandSignedShort(operand_index, needs_poisoning);
534
    case OperandSize::kQuad:
535
      return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
536 537 538 539 540
    case OperandSize::kNone:
      UNREACHABLE();
  }
}

541
TNode<Uint32T> InterpreterAssembler::BytecodeUnsignedOperand(
542 543
    int operand_index, OperandSize operand_size,
    LoadSensitivity needs_poisoning) {
544 545 546
  DCHECK(Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
547
    case OperandSize::kByte:
548
      return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
549
    case OperandSize::kShort:
550
      return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
551
    case OperandSize::kQuad:
552
      return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
553 554 555 556 557
    case OperandSize::kNone:
      UNREACHABLE();
  }
}

558
TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) {
559 560 561 562 563 564 565
  DCHECK_EQ(OperandType::kRegCount,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

566
TNode<Uint32T> InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
567 568 569 570 571 572 573 574
  DCHECK_EQ(OperandType::kFlag8,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

575
TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
576 577 578 579 580 581 582
  DCHECK_EQ(OperandType::kUImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

583 584
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandUImmWord(
    int operand_index) {
585 586 587
  return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
}

588 589
TNode<Smi> InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
  return SmiFromUint32(BytecodeOperandUImm(operand_index));
590 591
}

592
TNode<Int32T> InterpreterAssembler::BytecodeOperandImm(int operand_index) {
593 594 595 596 597 598 599
  DCHECK_EQ(OperandType::kImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeSignedOperand(operand_index, operand_size);
}

600 601
TNode<IntPtrT> InterpreterAssembler::BytecodeOperandImmIntPtr(
    int operand_index) {
602 603 604
  return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
}

605
TNode<Smi> InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
606
  return SmiFromInt32(BytecodeOperandImm(operand_index));
607 608
}

609 610
TNode<Uint32T> InterpreterAssembler::BytecodeOperandIdxInt32(
    int operand_index) {
611 612
  DCHECK_EQ(OperandType::kIdx,
            Bytecodes::GetOperandType(bytecode_, operand_index));
613 614
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
615 616 617
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

618
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
619
  return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
620 621
}

622 623
TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
  return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
624 625
}

626 627 628 629 630 631 632
TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
    int operand_index) {
  TNode<IntPtrT> index =
      ChangeInt32ToIntPtr(Signed(BytecodeOperandIdxInt32(operand_index)));
  return IntPtrToTaggedIndex(index);
}

633
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
634
    int operand_index, LoadSensitivity needs_poisoning) {
635 636 637 638 639
  DCHECK_EQ(OperandType::kIdx,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return ChangeUint32ToWord(
640
      BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
641 642
}

643
TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(
644
    int operand_index, LoadSensitivity needs_poisoning) {
645 646 647 648
  DCHECK(Bytecodes::IsRegisterOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
649
  return ChangeInt32ToIntPtr(
650
      BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
651 652
}

653 654
TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
    int operand_index) {
655 656
  DCHECK_EQ(OperandType::kRuntimeId,
            Bytecodes::GetOperandType(bytecode_, operand_index));
657 658 659 660
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kShort);
  return BytecodeUnsignedOperand(operand_index, operand_size);
661 662
}

663
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandNativeContextIndex(
664
    int operand_index) {
665 666
  DCHECK_EQ(OperandType::kNativeContextIndex,
            Bytecodes::GetOperandType(bytecode_, operand_index));
667 668 669 670 671 672
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return ChangeUint32ToWord(
      BytecodeUnsignedOperand(operand_index, operand_size));
}

673 674
TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId(
    int operand_index) {
675 676
  DCHECK_EQ(OperandType::kIntrinsicId,
            Bytecodes::GetOperandType(bytecode_, operand_index));
677 678 679 680 681 682
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

683
TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
684 685
  TNode<FixedArray> constant_pool = CAST(LoadObjectField(
      BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
686 687 688
  return UnsafeLoadFixedArrayElement(constant_pool,
                                     UncheckedCast<IntPtrT>(index), 0,
                                     LoadSensitivity::kCritical);
689 690
}

691
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
692 693
    TNode<WordT> index) {
  return SmiUntag(CAST(LoadConstantPoolEntry(index)));
694 695
}

696
TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
697
    int operand_index) {
698
  TNode<UintPtrT> index =
699
      BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
700 701 702
  return LoadConstantPoolEntry(index);
}

703 704
TNode<IntPtrT>
InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
705
    int operand_index) {
706
  return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index)));
707 708
}

709
TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() {
710
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
711
  return CodeStubAssembler::LoadFeedbackVector(function);
712 713 714
}

void InterpreterAssembler::CallPrologue() {
715 716 717 718
  if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
    // Bytecodes that make a call along the critical path save the bytecode
    // offset in the bytecode handler's prologue. For other bytecodes, if
    // there are multiple calls in the bytecode handler, you need to spill
719
    // before each of them, unless SaveBytecodeOffset has explicitly been called
720
    // in a path that dominates _all_ of those calls (which we don't track).
721
    SaveBytecodeOffset();
722
  }
723

724
  bytecode_array_valid_ = false;
725
  made_call_ = true;
726 727 728 729 730
}

void InterpreterAssembler::CallEpilogue() {
}

731
void InterpreterAssembler::CallJSAndDispatch(
732
    TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
733
    ConvertReceiverMode receiver_mode) {
734
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
735 736
  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
         bytecode_ == Bytecode::kInvokeIntrinsic);
737
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
738

739
  TNode<Word32T> args_count;
740 741 742 743 744
  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    // The receiver is implied, so it is not in the argument list.
    args_count = args.reg_count();
  } else {
    // Subtract the receiver from the argument count.
745
    TNode<Int32T> receiver_count = Int32Constant(1);
746 747 748
    args_count = Int32Sub(args.reg_count(), receiver_count);
  }

749
  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
750
      isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
751
  TNode<Code> code_target = HeapConstant(callable.code());
752

753
  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
754 755
                                   args_count, args.base_reg_location(),
                                   function);
756 757
  // TailCallStubThenDispatch updates accumulator with result.
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
758 759
}

760
template <class... TArgs>
761 762 763
void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
                                             TNode<Context> context,
                                             TNode<Word32T> arg_count,
764 765 766 767 768 769 770
                                             ConvertReceiverMode receiver_mode,
                                             TArgs... args) {
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
         bytecode_ == Bytecode::kInvokeIntrinsic);
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
  Callable callable = CodeFactory::Call(isolate());
771
  TNode<Code> code_target = HeapConstant(callable.code());
772 773 774

  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    // The first argument parameter (the receiver) is implied to be undefined.
775 776 777 778 779
#ifdef V8_REVERSE_JSARGS
    TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
                                     context, function, arg_count, args...,
                                     UndefinedConstant());
#else
780 781 782
    TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
                                     context, function, arg_count,
                                     UndefinedConstant(), args...);
783
#endif
784 785 786 787 788 789 790 791 792 793 794
  } else {
    TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
                                     context, function, arg_count, args...);
  }
  // TailCallStubThenDispatch updates accumulator with result.
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}

// Instantiate CallJSAndDispatch() for argument counts used by interpreter
// generator.
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
795
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
796 797
    ConvertReceiverMode receiver_mode);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
798
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
799
    ConvertReceiverMode receiver_mode, TNode<Object>);
800
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
801
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
802
    ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>);
803
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
804
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
805 806
    ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>,
    TNode<Object>);
807 808

void InterpreterAssembler::CallJSWithSpreadAndDispatch(
809 810
    TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
    TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) {
811
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
812
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
813
  CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
814
  Comment("call using CallWithSpread builtin");
815
  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
816
      isolate(), ConvertReceiverMode::kAny,
817
      InterpreterPushArgsMode::kWithFinalSpread);
818
  TNode<Code> code_target = HeapConstant(callable.code());
819

820 821
  TNode<Int32T> receiver_count = Int32Constant(1);
  TNode<Word32T> args_count = Int32Sub(args.reg_count(), receiver_count);
822
  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
823 824
                                   args_count, args.base_reg_location(),
                                   function);
825 826
  // TailCallStubThenDispatch updates accumulator with result.
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
827 828
}

829 830 831 832
TNode<Object> InterpreterAssembler::Construct(
    TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
    const RegListNodePair& args, TNode<UintPtrT> slot_id,
    TNode<HeapObject> maybe_feedback_vector) {
833
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
834 835
  TVARIABLE(Object, var_result);
  TVARIABLE(AllocationSite, var_site);
836 837
  Label return_result(this), construct_generic(this),
      construct_array(this, &var_site);
838

839 840 841
  CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
                           slot_id, &construct_generic, &construct_array,
                           &var_site);
842

843
  BIND(&construct_generic);
844
  {
845 846 847 848 849 850 851 852 853
    // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
    Comment("call using Construct builtin");
    Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
        isolate(), InterpreterPushArgsMode::kOther);
    TNode<Code> code_target = HeapConstant(callable.code());
    var_result = CallStub(callable.descriptor(), code_target, context,
                          args.reg_count(), args.base_reg_location(), target,
                          new_target, UndefinedConstant());
    Goto(&return_result);
854 855
  }

856 857 858 859 860 861
  BIND(&construct_array);
  {
    // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
    // constructor feedback collection inside of Ignition.
    Comment("call using ConstructArray builtin");
    Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
862
        isolate(), InterpreterPushArgsMode::kArrayFunction);
863
    TNode<Code> code_target = HeapConstant(callable.code());
864 865 866
    var_result = CallStub(callable.descriptor(), code_target, context,
                          args.reg_count(), args.base_reg_location(), target,
                          new_target, var_site.value());
867 868 869 870 871
    Goto(&return_result);
  }

  BIND(&return_result);
  return var_result.value();
872 873
}

874 875 876 877
TNode<Object> InterpreterAssembler::ConstructWithSpread(
    TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
    const RegListNodePair& args, TNode<UintPtrT> slot_id,
    TNode<HeapObject> maybe_feedback_vector) {
878 879 880
  // TODO(bmeurer): Unify this with the Construct bytecode feedback
  // above once we have a way to pass the AllocationSite to the Array
  // constructor _and_ spread the last argument at the same time.
881
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
882
  Label extra_checks(this, Label::kDeferred), construct(this);
883 884 885
  GotoIf(IsUndefined(maybe_feedback_vector), &construct);

  TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
886 887 888 889 890

  // Increment the call count.
  IncrementCallCount(feedback_vector, slot_id);

  // Check if we have monomorphic {new_target} feedback already.
891 892
  TNode<MaybeObject> feedback =
      LoadFeedbackVectorSlot(feedback_vector, slot_id);
893 894
  Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
         &extra_checks);
895 896 897 898 899 900 901

  BIND(&extra_checks);
  {
    Label check_initialized(this), initialize(this), mark_megamorphic(this);

    // Check if it is a megamorphic {new_target}.
    Comment("check if megamorphic");
902
    TNode<BoolT> is_megamorphic = TaggedEqual(
903
        feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
904 905
    GotoIf(is_megamorphic, &construct);

906
    Comment("check if weak reference");
907
    GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
908

909 910 911
    // If the weak reference is cleared, we have a new chance to become
    // monomorphic.
    Comment("check if weak reference is cleared");
912
    Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
913 914 915 916 917

    BIND(&check_initialized);
    {
      // Check if it is uninitialized.
      Comment("check if uninitialized");
918
      TNode<BoolT> is_uninitialized =
919
          TaggedEqual(feedback, UninitializedSymbolConstant());
920 921 922 923 924 925 926
      Branch(is_uninitialized, &initialize, &mark_megamorphic);
    }

    BIND(&initialize);
    {
      Comment("check if function in same native context");
      GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
927 928
      // Check if the {new_target} is a JSFunction or JSBoundFunction
      // in the current native context.
929
      TVARIABLE(HeapObject, var_current, CAST(new_target));
930 931 932 933 934
      Label loop(this, &var_current), done_loop(this);
      Goto(&loop);
      BIND(&loop);
      {
        Label if_boundfunction(this), if_function(this);
935
        TNode<HeapObject> current = var_current.value();
936
        TNode<Uint16T> current_instance_type = LoadInstanceType(current);
937 938 939 940 941 942 943 944 945
        GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
               &if_boundfunction);
        Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
               &if_function, &mark_megamorphic);

        BIND(&if_function);
        {
          // Check that the JSFunction {current} is in the current native
          // context.
946 947
          TNode<Context> current_context =
              CAST(LoadObjectField(current, JSFunction::kContextOffset));
948
          TNode<NativeContext> current_native_context =
949 950 951 952
              LoadNativeContext(current_context);
          Branch(
              TaggedEqual(LoadNativeContext(context), current_native_context),
              &done_loop, &mark_megamorphic);
953
        }
954

955 956 957
        BIND(&if_boundfunction);
        {
          // Continue with the [[BoundTargetFunction]] of {current}.
958 959
          var_current = LoadObjectField<HeapObject>(
              current, JSBoundFunction::kBoundTargetFunctionOffset);
960 961 962 963
          Goto(&loop);
        }
      }
      BIND(&done_loop);
964 965
      StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
                                         CAST(new_target));
966 967
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "ConstructWithSpread:Initialize");
968 969 970 971 972 973 974 975
      Goto(&construct);
    }

    BIND(&mark_megamorphic);
    {
      // MegamorphicSentinel is an immortal immovable object so
      // write-barrier is not needed.
      Comment("transition to megamorphic");
976
      DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
977 978 979 980
      StoreFeedbackVectorSlot(
          feedback_vector, slot_id,
          HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
          SKIP_WRITE_BARRIER);
981 982
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "ConstructWithSpread:TransitionMegamorphic");
983 984 985 986 987
      Goto(&construct);
    }
  }

  BIND(&construct);
988
  Comment("call using ConstructWithSpread builtin");
989
  Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
990
      isolate(), InterpreterPushArgsMode::kWithFinalSpread);
991
  TNode<Code> code_target = HeapConstant(callable.code());
992
  return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
993 994
                  args.base_reg_location(), target, new_target,
                  UndefinedConstant());
995 996
}

997 998
Node* InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
                                         TNode<Context> context,
999
                                         const RegListNodePair& args,
1000
                                         int result_size) {
1001 1002
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
  DCHECK(Bytecodes::IsCallRuntime(bytecode_));
1003
  Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
1004
  TNode<Code> code_target = HeapConstant(callable.code());
1005 1006

  // Get the function entry from the function id.
1007 1008
  TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant(
      ExternalReference::runtime_function_table_address(isolate())));
1009
  TNode<Word32T> function_offset =
1010
      Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
1011
  TNode<WordT> function =
1012
      IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
1013 1014
  TNode<RawPtrT> function_entry = Load<RawPtrT>(
      function, IntPtrConstant(offsetof(Runtime::Function, entry)));
1015

1016
  return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(),
1017 1018
                   result_size, code_target, context, args.reg_count(),
                   args.base_reg_location(), function_entry);
1019 1020
}

1021 1022
void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
                                                 bool backward) {
1023 1024
  Comment("[ UpdateInterruptBudget");

1025 1026 1027 1028
  // Assert that the weight is positive (negative weights should be implemented
  // as backward updates).
  CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));

1029 1030
  Label load_budget_from_bytecode(this), load_budget_done(this);
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
1031
  TNode<FeedbackCell> feedback_cell =
1032
      LoadObjectField<FeedbackCell>(function, JSFunction::kFeedbackCellOffset);
1033 1034 1035
  TNode<Int32T> old_budget = LoadObjectField<Int32T>(
      feedback_cell, FeedbackCell::kInterruptBudgetOffset);

1036
  // Make sure we include the current bytecode in the budget calculation.
1037
  TNode<Int32T> budget_after_bytecode =
1038
      Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1039

1040
  Label done(this);
1041
  TVARIABLE(Int32T, new_budget);
1042
  if (backward) {
1043
    // Update budget by |weight| and check if it reaches zero.
1044
    new_budget = Int32Sub(budget_after_bytecode, weight);
1045
    TNode<BoolT> condition =
1046
        Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1047
    Label ok(this), interrupt_check(this, Label::kDeferred);
1048 1049 1050
    Branch(condition, &ok, &interrupt_check);

    BIND(&interrupt_check);
1051
    CallRuntime(Runtime::kBytecodeBudgetInterrupt, GetContext(), function);
1052
    Goto(&done);
1053 1054

    BIND(&ok);
1055
  } else {
1056 1057
    // For a forward jump, we know we only increase the interrupt budget, so
    // no need to check if it's below zero.
1058
    new_budget = Int32Add(budget_after_bytecode, weight);
1059
  }
1060 1061

  // Update budget.
1062
  StoreObjectFieldNoWriteBarrier(
1063
      feedback_cell, FeedbackCell::kInterruptBudgetOffset, new_budget.value());
1064 1065
  Goto(&done);
  BIND(&done);
1066
  Comment("] UpdateInterruptBudget");
1067 1068
}

1069 1070 1071
TNode<IntPtrT> InterpreterAssembler::Advance() {
  return Advance(CurrentBytecodeSize());
}
1072

1073
TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
1074
  return Advance(IntPtrConstant(delta));
1075 1076
}

1077
TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta,
1078
                                             bool backward) {
1079 1080 1081
#ifdef V8_TRACE_IGNITION
  TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
#endif
1082 1083 1084
  TNode<IntPtrT> next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
                                        : IntPtrAdd(BytecodeOffset(), delta);
  bytecode_offset_ = next_offset;
1085
  return next_offset;
1086 1087
}

1088
void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset, bool backward) {
1089 1090
  DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));

1091 1092
  UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward);
  TNode<IntPtrT> new_bytecode_offset = Advance(jump_offset, backward);
1093 1094 1095
  TNode<RawPtrT> target_bytecode =
      UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset));
  DispatchToBytecode(target_bytecode, new_bytecode_offset);
1096
}
1097

1098 1099 1100
void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset) {
  Jump(jump_offset, false);
}
1101

1102 1103 1104
void InterpreterAssembler::JumpBackward(TNode<IntPtrT> jump_offset) {
  Jump(jump_offset, true);
}
1105

1106 1107
void InterpreterAssembler::JumpConditional(TNode<BoolT> condition,
                                           TNode<IntPtrT> jump_offset) {
1108
  Label match(this), no_match(this);
1109

1110
  Branch(condition, &match, &no_match);
1111
  BIND(&match);
1112
  Jump(jump_offset);
1113
  BIND(&no_match);
1114 1115 1116
  Dispatch();
}

1117
void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
1118 1119 1120
                                             TNode<Object> rhs,
                                             TNode<IntPtrT> jump_offset) {
  JumpConditional(TaggedEqual(lhs, rhs), jump_offset);
1121 1122
}

1123 1124
void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
                                                TNode<Object> rhs,
1125 1126
                                                TNode<IntPtrT> jump_offset) {
  JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset);
1127 1128
}

1129 1130 1131 1132
TNode<WordT> InterpreterAssembler::LoadBytecode(
    TNode<IntPtrT> bytecode_offset) {
  TNode<Uint8T> bytecode =
      Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset);
1133
  return ChangeUint32ToWord(bytecode);
1134 1135
}

1136 1137
TNode<WordT> InterpreterAssembler::StarDispatchLookahead(
    TNode<WordT> target_bytecode) {
1138 1139
  Label do_inline_star(this), done(this);

1140
  TVARIABLE(WordT, var_bytecode, target_bytecode);
1141

1142 1143
  TNode<Int32T> star_bytecode =
      Int32Constant(static_cast<int>(Bytecode::kStar));
1144
  TNode<BoolT> is_star =
1145
      Word32Equal(TruncateWordToInt32(target_bytecode), star_bytecode);
1146
  Branch(is_star, &do_inline_star, &done);
1147

1148
  BIND(&do_inline_star);
1149 1150
  {
    InlineStar();
1151
    var_bytecode = LoadBytecode(BytecodeOffset());
1152 1153
    Goto(&done);
  }
1154
  BIND(&done);
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
  return var_bytecode.value();
}

void InterpreterAssembler::InlineStar() {
  Bytecode previous_bytecode = bytecode_;
  AccumulatorUse previous_acc_use = accumulator_use_;

  bytecode_ = Bytecode::kStar;
  accumulator_use_ = AccumulatorUse::kNone;

1165 1166 1167
#ifdef V8_TRACE_IGNITION
  TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
#endif
1168 1169
  StoreRegister(GetAccumulator(),
                BytecodeOperandReg(0, LoadSensitivity::kSafe));
1170 1171 1172 1173 1174 1175 1176 1177

  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));

  Advance();
  bytecode_ = previous_bytecode;
  accumulator_use_ = previous_acc_use;
}

1178
void InterpreterAssembler::Dispatch() {
1179
  Comment("========= Dispatch");
1180
  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1181
  TNode<IntPtrT> target_offset = Advance();
1182
  TNode<WordT> target_bytecode = LoadBytecode(target_offset);
1183 1184 1185

  if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
    target_bytecode = StarDispatchLookahead(target_bytecode);
1186
  }
1187
  DispatchToBytecode(target_bytecode, BytecodeOffset());
1188
}
1189

1190 1191
void InterpreterAssembler::DispatchToBytecode(
    TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) {
1192 1193 1194 1195
  if (FLAG_trace_ignition_dispatches) {
    TraceBytecodeDispatch(target_bytecode);
  }

1196 1197
  TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
      DispatchTablePointer(), TimesSystemPointerSize(target_bytecode));
1198

1199
  DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1200 1201
}

1202 1203
void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
    TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
1204
  // Propagate speculation poisoning.
1205 1206 1207 1208 1209 1210
  TNode<RawPtrT> poisoned_handler_entry =
      UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry));
  TailCallBytecodeDispatch(InterpreterDispatchDescriptor{},
                           poisoned_handler_entry, GetAccumulatorUnchecked(),
                           bytecode_offset, BytecodeArrayTaggedPointer(),
                           DispatchTablePointer());
1211 1212
}

1213 1214 1215 1216 1217 1218 1219 1220
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
  // Dispatching a wide bytecode requires treating the prefix
  // bytecode a base pointer into the dispatch table and dispatching
  // the bytecode that follows relative to this base.
  //
  //   Indices 0-255 correspond to bytecodes with operand_scale == 0
  //   Indices 256-511 correspond to bytecodes with operand_scale == 1
  //   Indices 512-767 correspond to bytecodes with operand_scale == 2
1221
  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1222
  TNode<IntPtrT> next_bytecode_offset = Advance(1);
1223
  TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset);
1224 1225 1226 1227 1228

  if (FLAG_trace_ignition_dispatches) {
    TraceBytecodeDispatch(next_bytecode);
  }

1229
  TNode<IntPtrT> base_index;
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
  switch (operand_scale) {
    case OperandScale::kDouble:
      base_index = IntPtrConstant(1 << kBitsPerByte);
      break;
    case OperandScale::kQuadruple:
      base_index = IntPtrConstant(2 << kBitsPerByte);
      break;
    default:
      UNREACHABLE();
  }
1240
  TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode);
1241 1242
  TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
      DispatchTablePointer(), TimesSystemPointerSize(target_index));
1243

1244
  DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
1245 1246
}

1247
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1248 1249 1250
  // TODO(rmcilroy): Investigate whether it is worth supporting self
  // optimization of primitive functions like FullCodegen.

1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
  // Update profiling count by the number of bytes between the end of the
  // current bytecode and the start of the first one, to simulate backedge to
  // start of function.
  //
  // With headers and current offset, the bytecode array layout looks like:
  //
  //           <---------- simulated backedge ----------
  // | header | first bytecode | .... | return bytecode |
  //  |<------ current offset ------->
  //  ^ tagged bytecode array pointer
  //
  // UpdateInterruptBudget already handles adding the bytecode size to the
  // length of the back-edge, so we just have to correct for the non-zero offset
  // of the first bytecode.

  const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
1267 1268 1269
  TNode<Int32T> profiling_weight =
      Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
               Int32Constant(kFirstBytecodeOffset));
1270
  UpdateInterruptBudget(profiling_weight, true);
1271 1272
}

1273 1274 1275
TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() {
  return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
                                BytecodeArray::kOsrNestingLevelOffset);
1276 1277
}

1278
void InterpreterAssembler::Abort(AbortReason abort_reason) {
1279
  TNode<Smi> abort_id = SmiConstant(abort_reason);
1280
  CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1281 1282
}

1283 1284
void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
                                               TNode<WordT> rhs,
1285
                                               AbortReason abort_reason) {
1286
  Label ok(this), abort(this, Label::kDeferred);
1287
  Branch(WordEqual(lhs, rhs), &ok, &abort);
1288

1289
  BIND(&abort);
1290
  Abort(abort_reason);
1291 1292
  Goto(&ok);

1293
  BIND(&ok);
1294 1295
}

1296
void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) {
1297
  TNode<ExternalReference> restart_fp_address =
1298 1299
      ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));

1300 1301
  TNode<IntPtrT> restart_fp = Load<IntPtrT>(restart_fp_address);
  TNode<IntPtrT> null = IntPtrConstant(0);
1302 1303 1304 1305

  Label ok(this), drop_frames(this);
  Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);

1306
  BIND(&drop_frames);
1307 1308 1309
  // We don't expect this call to return since the frame dropper tears down
  // the stack and jumps into the function on the target frame to restart it.
  CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
1310
  Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
1311 1312
  Goto(&ok);

1313
  BIND(&ok);
1314 1315
}

1316 1317
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
  CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1318
              SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1319 1320
}

1321
void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
1322
  TNode<ExternalReference> counters_table = ExternalConstant(
1323
      ExternalReference::interpreter_dispatch_counters(isolate()));
1324
  TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant(
1325 1326
      static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));

1327
  TNode<WordT> counter_offset = TimesSystemPointerSize(
1328
      IntPtrAdd(source_bytecode_table_index, target_bytecode));
1329
  TNode<IntPtrT> old_counter = Load<IntPtrT>(counters_table, counter_offset);
1330

1331
  Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1332

1333
  TNode<BoolT> counter_reached_max = WordEqual(
1334
      old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1335
  Branch(counter_reached_max, &counter_saturated, &counter_ok);
1336

1337
  BIND(&counter_ok);
1338
  {
1339
    TNode<IntPtrT> new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1340 1341 1342 1343 1344
    StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
                        counter_offset, new_counter);
    Goto(&counter_saturated);
  }

1345
  BIND(&counter_saturated);
1346 1347
}

1348 1349 1350 1351
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
  return false;
Jakob Kummerow's avatar
Jakob Kummerow committed
1352
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
1353 1354
    V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC ||   \
    V8_TARGET_ARCH_PPC64
1355 1356 1357 1358 1359 1360
  return true;
#else
#error "Unknown Architecture"
#endif
}

1361
void InterpreterAssembler::AbortIfRegisterCountInvalid(
1362 1363
    TNode<FixedArrayBase> parameters_and_registers,
    TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count) {
1364 1365
  TNode<IntPtrT> array_size =
      LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1366 1367

  Label ok(this), abort(this, Label::kDeferred);
1368 1369 1370
  Branch(UintPtrLessThanOrEqual(
             IntPtrAdd(formal_parameter_count, register_count), array_size),
         &ok, &abort);
1371 1372

  BIND(&abort);
1373
  Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1374 1375 1376
  Goto(&ok);

  BIND(&ok);
1377 1378
}

1379
TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
1380 1381
    TNode<FixedArray> array, const RegListNodePair& registers,
    TNode<Int32T> formal_parameter_count) {
1382 1383
  // Store the formal parameters (without receiver) followed by the
  // registers into the generator's internal parameters_and_registers field.
1384
  TNode<IntPtrT> formal_parameter_count_intptr =
1385
      Signed(ChangeUint32ToWord(formal_parameter_count));
1386
  TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1387
  if (FLAG_debug_code) {
1388 1389
    CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
                                 RegisterLocation(Register(0))));
1390 1391
    AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
                                register_count);
1392
  }
1393 1394

  {
1395
    TVARIABLE(IntPtrT, var_index);
1396
    var_index = IntPtrConstant(0);
1397

1398 1399
    // Iterate over parameters and write them into the array.
    Label loop(this, &var_index), done_loop(this);
1400

1401 1402 1403 1404
#ifdef V8_REVERSE_JSARGS
    TNode<IntPtrT> reg_base =
        IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() + 1);
#else
1405
    TNode<IntPtrT> reg_base = IntPtrAdd(
1406
        IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
1407
        formal_parameter_count_intptr);
1408
#endif
1409 1410

    Goto(&loop);
1411 1412
    BIND(&loop);
    {
1413
      TNode<IntPtrT> index = var_index.value();
1414 1415
      GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
                &done_loop);
1416

1417 1418 1419
#ifdef V8_REVERSE_JSARGS
      TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
#else
1420
      TNode<IntPtrT> reg_index = IntPtrSub(reg_base, index);
1421
#endif
1422
      TNode<Object> value = LoadRegister(reg_index);
1423 1424 1425

      StoreFixedArrayElement(array, index, value);

1426
      var_index = IntPtrAdd(index, IntPtrConstant(1));
1427 1428 1429 1430 1431 1432 1433 1434 1435
      Goto(&loop);
    }
    BIND(&done_loop);
  }

  {
    // Iterate over register file and write values into array.
    // The mapping of register to array index must match that used in
    // BytecodeGraphBuilder::VisitResumeGenerator.
1436
    TVARIABLE(IntPtrT, var_index);
1437
    var_index = IntPtrConstant(0);
1438 1439 1440 1441 1442

    Label loop(this, &var_index), done_loop(this);
    Goto(&loop);
    BIND(&loop);
    {
1443
      TNode<IntPtrT> index = var_index.value();
1444 1445
      GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);

1446
      TNode<IntPtrT> reg_index =
1447
          IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1448
      TNode<Object> value = LoadRegister(reg_index);
1449

1450
      TNode<IntPtrT> array_index =
1451
          IntPtrAdd(formal_parameter_count_intptr, index);
1452 1453
      StoreFixedArrayElement(array, array_index, value);

1454
      var_index = IntPtrAdd(index, IntPtrConstant(1));
1455 1456 1457
      Goto(&loop);
    }
    BIND(&done_loop);
1458 1459 1460 1461 1462
  }

  return array;
}

1463
TNode<FixedArray> InterpreterAssembler::ImportRegisterFile(
1464 1465 1466
    TNode<FixedArray> array, const RegListNodePair& registers,
    TNode<Int32T> formal_parameter_count) {
  TNode<IntPtrT> formal_parameter_count_intptr =
1467
      Signed(ChangeUint32ToWord(formal_parameter_count));
1468
  TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1469
  if (FLAG_debug_code) {
1470 1471
    CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
                                 RegisterLocation(Register(0))));
1472 1473
    AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
                                register_count);
1474
  }
1475

1476
  TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1477

1478 1479
  // Iterate over array and write values into register file.  Also erase the
  // array contents to not keep them alive artificially.
1480 1481
  Label loop(this, &var_index), done_loop(this);
  Goto(&loop);
1482
  BIND(&loop);
1483
  {
1484
    TNode<IntPtrT> index = var_index.value();
1485
    GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1486

1487 1488 1489
    TNode<IntPtrT> array_index =
        IntPtrAdd(formal_parameter_count_intptr, index);
    TNode<Object> value = LoadFixedArrayElement(array, array_index);
1490

1491 1492
    TNode<IntPtrT> reg_index =
        IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1493
    StoreRegister(value, reg_index);
1494

1495
    StoreFixedArrayElement(array, array_index, StaleRegisterConstant());
1496

1497
    var_index = IntPtrAdd(index, IntPtrConstant(1));
1498 1499
    Goto(&loop);
  }
1500
  BIND(&done_loop);
1501 1502 1503 1504

  return array;
}

1505 1506 1507 1508
int InterpreterAssembler::CurrentBytecodeSize() const {
  return Bytecodes::Size(bytecode_, operand_scale_);
}

1509
void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
1510 1511
  TNode<Object> object = GetAccumulator();
  TNode<Context> context = GetContext();
1512

1513 1514
  TVARIABLE(Smi, var_type_feedback);
  TVARIABLE(Numeric, var_result);
1515 1516 1517 1518
  Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
      if_objectisother(this, Label::kDeferred);

  GotoIf(TaggedIsSmi(object), &if_objectissmi);
1519
  Branch(IsHeapNumber(CAST(object)), &if_objectisheapnumber, &if_objectisother);
1520 1521 1522

  BIND(&if_objectissmi);
  {
1523 1524
    var_result = CAST(object);
    var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
1525 1526 1527 1528 1529
    Goto(&if_done);
  }

  BIND(&if_objectisheapnumber);
  {
1530 1531
    var_result = CAST(object);
    var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
    Goto(&if_done);
  }

  BIND(&if_objectisother);
  {
    auto builtin = Builtins::kNonNumberToNumber;
    if (mode == Object::Conversion::kToNumeric) {
      builtin = Builtins::kNonNumberToNumeric;
      // Special case for collecting BigInt feedback.
      Label not_bigint(this);
1542
      GotoIfNot(IsBigInt(CAST(object)), &not_bigint);
1543
      {
1544 1545
        var_result = CAST(object);
        var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
1546 1547 1548 1549 1550 1551
        Goto(&if_done);
      }
      BIND(&not_bigint);
    }

    // Convert {object} by calling out to the appropriate builtin.
1552 1553
    var_result = CAST(CallBuiltin(builtin, context, object));
    var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
1554 1555 1556 1557 1558 1559
    Goto(&if_done);
  }

  BIND(&if_done);

  // Record the type feedback collected for {object}.
1560
  TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
1561
  TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
1562 1563

  UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
1564 1565 1566 1567 1568

  SetAccumulator(var_result.value());
  Dispatch();
}

1569 1570 1571
}  // namespace interpreter
}  // namespace internal
}  // namespace v8