interpreter-assembler.cc 60.9 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/interpreter/interpreter-assembler.h"

7
#include <limits>
8 9
#include <ostream>

10
#include "src/codegen/code-factory.h"
11
#include "src/codegen/interface-descriptors-inl.h"
12
#include "src/codegen/machine-type.h"
13
#include "src/execution/frames.h"
14
#include "src/interpreter/bytecodes.h"
15
#include "src/interpreter/interpreter.h"
16
#include "src/objects/objects-inl.h"
17
#include "src/zone/zone.h"
18 19 20 21 22

namespace v8 {
namespace internal {
namespace interpreter {

23
using compiler::CodeAssemblerState;
24

25
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
26 27
                                           Bytecode bytecode,
                                           OperandScale operand_scale)
28
    : CodeStubAssembler(state),
29
      bytecode_(bytecode),
30
      operand_scale_(operand_scale),
31
      TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
32 33 34
      TVARIABLE_CONSTRUCTOR(bytecode_array_,
                            Parameter<BytecodeArray>(
                                InterpreterDispatchDescriptor::kBytecodeArray)),
35 36
      TVARIABLE_CONSTRUCTOR(
          bytecode_offset_,
37 38 39 40 41
          UncheckedParameter<IntPtrT>(
              InterpreterDispatchDescriptor::kBytecodeOffset)),
      TVARIABLE_CONSTRUCTOR(dispatch_table_,
                            UncheckedParameter<ExternalReference>(
                                InterpreterDispatchDescriptor::kDispatchTable)),
42 43
      TVARIABLE_CONSTRUCTOR(
          accumulator_,
44
          Parameter<Object>(InterpreterDispatchDescriptor::kAccumulator)),
45
      implicit_register_use_(ImplicitRegisterUse::kNone),
46
      made_call_(false),
47
      reloaded_frame_ptr_(false),
48
      bytecode_array_valid_(true) {
49 50
#ifdef V8_TRACE_UNOPTIMIZED
  TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
51
#endif
52 53
  RegisterCallGenerationCallbacks([this] { CallPrologue(); },
                                  [this] { CallEpilogue(); });
54

55 56
  // Save the bytecode offset immediately if bytecode will make a call along
  // the critical path, or it is a return bytecode.
57
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
58
      Bytecodes::Returns(bytecode)) {
59
    SaveBytecodeOffset();
60
  }
61 62
}

63 64 65 66
InterpreterAssembler::~InterpreterAssembler() {
  // If the following check fails the handler does not use the
  // accumulator in the way described in the bytecode definitions in
  // bytecodes.h.
67 68
  DCHECK_EQ(implicit_register_use_,
            Bytecodes::GetImplicitRegisterUse(bytecode_));
69
  UnregisterCallGenerationCallbacks();
70 71
}

72
TNode<RawPtrT> InterpreterAssembler::GetInterpretedFramePointer() {
73
  if (!interpreted_frame_pointer_.IsBound()) {
74
    interpreted_frame_pointer_ = LoadParentFramePointer();
75 76
  } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
             !reloaded_frame_ptr_) {
77
    interpreted_frame_pointer_ = LoadParentFramePointer();
78
    reloaded_frame_ptr_ = true;
79 80 81 82
  }
  return interpreted_frame_pointer_.value();
}

83
TNode<IntPtrT> InterpreterAssembler::BytecodeOffset() {
84 85
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
      (bytecode_offset_.value() ==
86 87
       UncheckedParameter<IntPtrT>(
           InterpreterDispatchDescriptor::kBytecodeOffset))) {
88
    bytecode_offset_ = ReloadBytecodeOffset();
89 90 91 92
  }
  return bytecode_offset_.value();
}

93
TNode<IntPtrT> InterpreterAssembler::ReloadBytecodeOffset() {
94
  TNode<IntPtrT> offset = LoadAndUntagRegister(Register::bytecode_offset());
95 96 97 98 99 100 101 102 103
  if (operand_scale() != OperandScale::kSingle) {
    // Add one to the offset such that it points to the actual bytecode rather
    // than the Wide / ExtraWide prefix bytecode.
    offset = IntPtrAdd(offset, IntPtrConstant(1));
  }
  return offset;
}

void InterpreterAssembler::SaveBytecodeOffset() {
104
  TNode<IntPtrT> bytecode_offset = BytecodeOffset();
105
  if (operand_scale() != OperandScale::kSingle) {
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
    // Subtract one from the bytecode_offset such that it points to the Wide /
    // ExtraWide prefix bytecode.
    bytecode_offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
  }
  int store_offset =
      Register::bytecode_offset().ToOperand() * kSystemPointerSize;
  TNode<RawPtrT> base = GetInterpretedFramePointer();

  if (SmiValuesAre32Bits()) {
    int zero_offset = store_offset + 4;
    int payload_offset = store_offset;
#if V8_TARGET_LITTLE_ENDIAN
    std::swap(zero_offset, payload_offset);
#endif
    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
                        IntPtrConstant(zero_offset), Int32Constant(0));
    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
                        IntPtrConstant(payload_offset),
                        TruncateIntPtrToInt32(bytecode_offset));
  } else {
126 127
    StoreFullTaggedNoWriteBarrier(base, IntPtrConstant(store_offset),
                                  SmiTag(bytecode_offset));
128 129 130
  }
}

131
TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() {
132 133 134
  // Force a re-load of the bytecode array after every call in case the debugger
  // has been activated.
  if (!bytecode_array_valid_) {
135
    bytecode_array_ = CAST(LoadRegister(Register::bytecode_array()));
136 137 138 139 140
    bytecode_array_valid_ = true;
  }
  return bytecode_array_.value();
}

141
TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() {
142 143
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
      (dispatch_table_.value() ==
144 145
       UncheckedParameter<ExternalReference>(
           InterpreterDispatchDescriptor::kDispatchTable))) {
146 147
    dispatch_table_ = ExternalConstant(
        ExternalReference::interpreter_dispatch_table_address(isolate()));
148 149 150 151
  }
  return dispatch_table_.value();
}

152
TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() {
153 154
  return accumulator_.value();
}
155

156
TNode<Object> InterpreterAssembler::GetAccumulator() {
157
  DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
158 159
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kReadAccumulator;
160
  return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
161
}
162

163
void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
164
  DCHECK(Bytecodes::WritesAccumulator(bytecode_));
165 166
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
167
  accumulator_ = value;
168
}
169

170 171
TNode<Context> InterpreterAssembler::GetContext() {
  return CAST(LoadRegister(Register::current_context()));
172
}
173

174
void InterpreterAssembler::SetContext(TNode<Context> value) {
175 176 177
  StoreRegister(value, Register::current_context());
}

178 179
TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
                                                       TNode<Uint32T> depth) {
180 181
  TVARIABLE(Context, cur_context, context);
  TVARIABLE(Uint32T, cur_depth, depth);
182 183 184

  Label context_found(this);

185
  Label context_search(this, {&cur_depth, &cur_context});
186 187

  // Fast path if the depth is 0.
188
  Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
189 190

  // Loop until the depth is 0.
191
  BIND(&context_search);
192
  {
193 194 195
    cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
    cur_context =
        CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
196

197 198
    Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
           &context_search);
199 200
  }

201
  BIND(&context_found);
202 203 204
  return cur_context.value();
}

205 206
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
    TNode<IntPtrT> reg_index) {
207 208
  return Signed(WordPoisonOnSpeculation(
      IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))));
209 210
}

211
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
212 213 214
  return RegisterLocation(IntPtrConstant(reg.ToOperand()));
}

215 216
TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
  return TimesSystemPointerSize(index);
217 218
}

219
TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
220 221 222
  return LoadFullTagged(GetInterpretedFramePointer(),
                        RegisterFrameOffset(reg_index),
                        LoadSensitivity::kCritical);
223 224
}

225
TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
226 227
  return LoadFullTagged(GetInterpretedFramePointer(),
                        IntPtrConstant(reg.ToOperand() * kSystemPointerSize));
228 229
}

230
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
231 232 233 234 235 236
  TNode<RawPtrT> base = GetInterpretedFramePointer();
  int index = reg.ToOperand() * kSystemPointerSize;
  if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
    index += 4;
#endif
237
    return ChangeInt32ToIntPtr(Load<Int32T>(base, IntPtrConstant(index)));
238
  } else {
239
    return SmiToIntPtr(CAST(LoadFullTagged(base, IntPtrConstant(index))));
240
  }
241 242
}

243 244
TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
    int operand_index) {
245 246
  return LoadRegister(
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
247 248
}

249 250
std::pair<TNode<Object>, TNode<Object>>
InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
251 252
  DCHECK_EQ(OperandType::kRegPair,
            Bytecodes::GetOperandType(bytecode_, operand_index));
253
  TNode<IntPtrT> first_reg_index =
254
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
255
  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
256 257 258 259 260 261 262 263 264 265
  return std::make_pair(LoadRegister(first_reg_index),
                        LoadRegister(second_reg_index));
}

InterpreterAssembler::RegListNodePair
InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
  DCHECK(Bytecodes::IsRegisterListOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  DCHECK_EQ(OperandType::kRegCount,
            Bytecodes::GetOperandType(bytecode_, operand_index + 1));
266
  TNode<IntPtrT> base_reg = RegisterLocation(
267
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
268
  TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
269 270 271
  return RegListNodePair(base_reg, reg_count);
}

272
TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
273
    const RegListNodePair& reg_list, int index) {
274
  TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
275
  // Location is already poisoned on speculation, so no need to poison here.
276
  return LoadFullTagged(location);
277 278
}

279
TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
280 281 282
    const RegListNodePair& reg_list, int index) {
  CSA_ASSERT(this,
             Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
283
  TNode<IntPtrT> offset = RegisterFrameOffset(IntPtrConstant(index));
284 285
  // Register indexes are negative, so subtract index from base location to get
  // location.
286
  return Signed(IntPtrSub(reg_list.base_reg_location(), offset));
287 288
}

289
void InterpreterAssembler::StoreRegister(TNode<Object> value, Register reg) {
290 291
  StoreFullTaggedNoWriteBarrier(
      GetInterpretedFramePointer(),
292
      IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value);
293 294
}

295 296
void InterpreterAssembler::StoreRegister(TNode<Object> value,
                                         TNode<IntPtrT> reg_index) {
297 298
  StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(),
                                RegisterFrameOffset(reg_index), value);
299 300
}

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
void InterpreterAssembler::StoreRegisterForShortStar(TNode<Object> value,
                                                     TNode<WordT> opcode) {
  DCHECK(Bytecodes::IsShortStar(bytecode_));
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteShortStar;

  CSA_ASSERT(
      this, UintPtrGreaterThanOrEqual(opcode, UintPtrConstant(static_cast<int>(
                                                  Bytecode::kFirstShortStar))));
  CSA_ASSERT(
      this,
      UintPtrLessThanOrEqual(
          opcode, UintPtrConstant(static_cast<int>(Bytecode::kLastShortStar))));

  // Compute the constant that we can add to a Bytecode value to map the range
  // [Bytecode::kStar15, Bytecode::kStar0] to the range
  // [Register(15).ToOperand(), Register(0).ToOperand()].
  constexpr int short_star_to_operand =
      Register(0).ToOperand() - static_cast<int>(Bytecode::kStar0);
  // Make sure the values count in the right direction.
  STATIC_ASSERT(short_star_to_operand ==
                Register(1).ToOperand() - static_cast<int>(Bytecode::kStar1));

  TNode<IntPtrT> offset =
      IntPtrAdd(RegisterFrameOffset(Signed(opcode)),
                IntPtrConstant(short_star_to_operand * kSystemPointerSize));
  StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(), offset, value);
}

330
void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
331
                                                       int operand_index) {
332 333
  StoreRegister(value,
                BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
334 335
}

336 337
void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
                                                           TNode<Object> value2,
338 339 340
                                                           int operand_index) {
  DCHECK_EQ(OperandType::kRegOutPair,
            Bytecodes::GetOperandType(bytecode_, operand_index));
341
  TNode<IntPtrT> first_reg_index =
342
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
343
  StoreRegister(value1, first_reg_index);
344
  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
345 346 347 348
  StoreRegister(value2, second_reg_index);
}

void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
349 350
    TNode<Object> value1, TNode<Object> value2, TNode<Object> value3,
    int operand_index) {
351 352
  DCHECK_EQ(OperandType::kRegOutTriple,
            Bytecodes::GetOperandType(bytecode_, operand_index));
353
  TNode<IntPtrT> first_reg_index =
354
      BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
355
  StoreRegister(value1, first_reg_index);
356
  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
357
  StoreRegister(value2, second_reg_index);
358
  TNode<IntPtrT> third_reg_index = NextRegister(second_reg_index);
359
  StoreRegister(value3, third_reg_index);
360 361
}

362
TNode<IntPtrT> InterpreterAssembler::NextRegister(TNode<IntPtrT> reg_index) {
363
  // Register indexes are negative, so the next index is minus one.
364
  return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1)));
365 366
}

367
TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
368 369 370 371
  return IntPtrConstant(
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}

372
TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
373
    int operand_index, LoadSensitivity needs_poisoning) {
374
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
375 376
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
377
  TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
378 379 380
  return Load<Uint8T>(BytecodeArrayTaggedPointer(),
                      IntPtrAdd(BytecodeOffset(), operand_offset),
                      needs_poisoning);
381 382
}

383
TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
384
    int operand_index, LoadSensitivity needs_poisoning) {
385
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
386 387
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
388
  TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
389 390 391
  return Load<Int8T>(BytecodeArrayTaggedPointer(),
                     IntPtrAdd(BytecodeOffset(), operand_offset),
                     needs_poisoning);
392 393
}

394
TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
395 396
    int relative_offset, MachineType result_type,
    LoadSensitivity needs_poisoning) {
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
  static const int kMaxCount = 4;
  DCHECK(!TargetSupportsUnalignedAccess());

  int count;
  switch (result_type.representation()) {
    case MachineRepresentation::kWord16:
      count = 2;
      break;
    case MachineRepresentation::kWord32:
      count = 4;
      break;
    default:
      UNREACHABLE();
  }
  MachineType msb_type =
      result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();

414
#if V8_TARGET_LITTLE_ENDIAN
415 416
  const int kStep = -1;
  int msb_offset = count - 1;
417
#elif V8_TARGET_BIG_ENDIAN
418 419
  const int kStep = 1;
  int msb_offset = 0;
420 421 422
#else
#error "Unknown Architecture"
#endif
423 424 425

  // Read the most signicant bytecode into bytes[0] and then in order
  // down to least significant in bytes[count - 1].
426
  DCHECK_LE(count, kMaxCount);
427
  TNode<Word32T> bytes[kMaxCount];
428 429
  for (int i = 0; i < count; i++) {
    MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
430 431
    TNode<IntPtrT> offset =
        IntPtrConstant(relative_offset + msb_offset + i * kStep);
432
    TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
433 434 435
    bytes[i] =
        UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(),
                                    array_offset, needs_poisoning));
436 437 438
  }

  // Pack LSB to MSB.
439
  TNode<Word32T> result = bytes[--count];
440
  for (int i = 1; --count >= 0; i++) {
441 442
    TNode<Int32T> shift = Int32Constant(i * kBitsPerByte);
    TNode<Word32T> value = Word32Shl(bytes[count], shift);
443 444 445 446 447
    result = Word32Or(value, result);
  }
  return result;
}

448
TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
449
    int operand_index, LoadSensitivity needs_poisoning) {
450 451 452 453 454 455 456
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
457 458 459 460
    return Load<Uint16T>(
        BytecodeArrayTaggedPointer(),
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
        needs_poisoning);
461
  } else {
462 463
    return UncheckedCast<Uint16T>(BytecodeOperandReadUnaligned(
        operand_offset, MachineType::Uint16(), needs_poisoning));
464 465 466
  }
}

467
TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
468
    int operand_index, LoadSensitivity needs_poisoning) {
469
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
470 471 472 473 474
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
475
  if (TargetSupportsUnalignedAccess()) {
476 477 478 479
    return Load<Int16T>(
        BytecodeArrayTaggedPointer(),
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
        needs_poisoning);
480
  } else {
481 482
    return UncheckedCast<Int16T>(BytecodeOperandReadUnaligned(
        operand_offset, MachineType::Int16(), needs_poisoning));
483 484 485
  }
}

486
TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
487
    int operand_index, LoadSensitivity needs_poisoning) {
488 489 490 491 492 493
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
494 495 496 497
    return Load<Uint32T>(
        BytecodeArrayTaggedPointer(),
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
        needs_poisoning);
498
  } else {
499 500
    return UncheckedCast<Uint32T>(BytecodeOperandReadUnaligned(
        operand_offset, MachineType::Uint32(), needs_poisoning));
501 502 503
  }
}

504
TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
505
    int operand_index, LoadSensitivity needs_poisoning) {
506 507 508 509 510 511
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
512 513 514 515
    return Load<Int32T>(
        BytecodeArrayTaggedPointer(),
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
        needs_poisoning);
516
  } else {
517 518
    return UncheckedCast<Int32T>(BytecodeOperandReadUnaligned(
        operand_offset, MachineType::Int32(), needs_poisoning));
519 520 521
  }
}

522
TNode<Int32T> InterpreterAssembler::BytecodeSignedOperand(
523 524
    int operand_index, OperandSize operand_size,
    LoadSensitivity needs_poisoning) {
525 526 527
  DCHECK(!Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
528
    case OperandSize::kByte:
529
      return BytecodeOperandSignedByte(operand_index, needs_poisoning);
530
    case OperandSize::kShort:
531
      return BytecodeOperandSignedShort(operand_index, needs_poisoning);
532
    case OperandSize::kQuad:
533
      return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
534 535 536 537 538
    case OperandSize::kNone:
      UNREACHABLE();
  }
}

539
TNode<Uint32T> InterpreterAssembler::BytecodeUnsignedOperand(
540 541
    int operand_index, OperandSize operand_size,
    LoadSensitivity needs_poisoning) {
542 543 544
  DCHECK(Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
545
    case OperandSize::kByte:
546
      return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
547
    case OperandSize::kShort:
548
      return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
549
    case OperandSize::kQuad:
550
      return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
551 552 553 554 555
    case OperandSize::kNone:
      UNREACHABLE();
  }
}

556
TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) {
557 558 559 560 561 562 563
  DCHECK_EQ(OperandType::kRegCount,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

564
TNode<Uint32T> InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
565 566 567 568 569 570 571 572
  DCHECK_EQ(OperandType::kFlag8,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

573
TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
574 575 576 577 578 579 580
  DCHECK_EQ(OperandType::kUImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

581 582
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandUImmWord(
    int operand_index) {
583 584 585
  return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
}

586 587
TNode<Smi> InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
  return SmiFromUint32(BytecodeOperandUImm(operand_index));
588 589
}

590
TNode<Int32T> InterpreterAssembler::BytecodeOperandImm(int operand_index) {
591 592 593 594 595 596 597
  DCHECK_EQ(OperandType::kImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeSignedOperand(operand_index, operand_size);
}

598 599
TNode<IntPtrT> InterpreterAssembler::BytecodeOperandImmIntPtr(
    int operand_index) {
600 601 602
  return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
}

603
TNode<Smi> InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
604
  return SmiFromInt32(BytecodeOperandImm(operand_index));
605 606
}

607 608
TNode<Uint32T> InterpreterAssembler::BytecodeOperandIdxInt32(
    int operand_index) {
609 610
  DCHECK_EQ(OperandType::kIdx,
            Bytecodes::GetOperandType(bytecode_, operand_index));
611 612
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
613 614 615
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

616
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
617
  return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
618 619
}

620 621
TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
  return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
622 623
}

624 625 626 627 628 629 630
TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
    int operand_index) {
  TNode<IntPtrT> index =
      ChangeInt32ToIntPtr(Signed(BytecodeOperandIdxInt32(operand_index)));
  return IntPtrToTaggedIndex(index);
}

631
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
632
    int operand_index, LoadSensitivity needs_poisoning) {
633 634 635 636 637
  DCHECK_EQ(OperandType::kIdx,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return ChangeUint32ToWord(
638
      BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
639 640
}

641
TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(
642
    int operand_index, LoadSensitivity needs_poisoning) {
643 644 645 646
  DCHECK(Bytecodes::IsRegisterOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
647
  return ChangeInt32ToIntPtr(
648
      BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
649 650
}

651 652
TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
    int operand_index) {
653 654
  DCHECK_EQ(OperandType::kRuntimeId,
            Bytecodes::GetOperandType(bytecode_, operand_index));
655 656 657 658
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kShort);
  return BytecodeUnsignedOperand(operand_index, operand_size);
659 660
}

661
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandNativeContextIndex(
662
    int operand_index) {
663 664
  DCHECK_EQ(OperandType::kNativeContextIndex,
            Bytecodes::GetOperandType(bytecode_, operand_index));
665 666 667 668 669 670
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return ChangeUint32ToWord(
      BytecodeUnsignedOperand(operand_index, operand_size));
}

671 672
TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId(
    int operand_index) {
673 674
  DCHECK_EQ(OperandType::kIntrinsicId,
            Bytecodes::GetOperandType(bytecode_, operand_index));
675 676 677 678 679 680
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

681
TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
682 683
  TNode<FixedArray> constant_pool = CAST(LoadObjectField(
      BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
684 685 686
  return UnsafeLoadFixedArrayElement(constant_pool,
                                     UncheckedCast<IntPtrT>(index), 0,
                                     LoadSensitivity::kCritical);
687 688
}

689
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
690 691
    TNode<WordT> index) {
  return SmiUntag(CAST(LoadConstantPoolEntry(index)));
692 693
}

694
TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
695
    int operand_index) {
696
  TNode<UintPtrT> index =
697
      BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
698 699 700
  return LoadConstantPoolEntry(index);
}

701 702
TNode<IntPtrT>
InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
703
    int operand_index) {
704
  return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index)));
705 706
}

707
TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() {
708
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
709
  return CodeStubAssembler::LoadFeedbackVector(function);
710 711 712
}

void InterpreterAssembler::CallPrologue() {
713 714 715 716
  if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
    // Bytecodes that make a call along the critical path save the bytecode
    // offset in the bytecode handler's prologue. For other bytecodes, if
    // there are multiple calls in the bytecode handler, you need to spill
717
    // before each of them, unless SaveBytecodeOffset has explicitly been called
718
    // in a path that dominates _all_ of those calls (which we don't track).
719
    SaveBytecodeOffset();
720
  }
721

722
  bytecode_array_valid_ = false;
723
  made_call_ = true;
724 725
}

Brice Dobry's avatar
Brice Dobry committed
726
void InterpreterAssembler::CallEpilogue() {}
727

728
void InterpreterAssembler::CallJSAndDispatch(
729
    TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
730
    ConvertReceiverMode receiver_mode) {
731
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
732 733
  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
         bytecode_ == Bytecode::kInvokeIntrinsic);
734
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
735

736
  TNode<Word32T> args_count;
737 738 739 740 741
  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    // The receiver is implied, so it is not in the argument list.
    args_count = args.reg_count();
  } else {
    // Subtract the receiver from the argument count.
742
    TNode<Int32T> receiver_count = Int32Constant(1);
743 744 745
    args_count = Int32Sub(args.reg_count(), receiver_count);
  }

746
  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
747
      isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
748
  TNode<Code> code_target = HeapConstant(callable.code());
749

750
  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
751 752
                                   args_count, args.base_reg_location(),
                                   function);
753
  // TailCallStubThenDispatch updates accumulator with result.
754 755
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
756 757
}

758
template <class... TArgs>
759 760 761
void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
                                             TNode<Context> context,
                                             TNode<Word32T> arg_count,
762 763 764 765 766 767 768
                                             ConvertReceiverMode receiver_mode,
                                             TArgs... args) {
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
         bytecode_ == Bytecode::kInvokeIntrinsic);
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
  Callable callable = CodeFactory::Call(isolate());
769
  TNode<Code> code_target = HeapConstant(callable.code());
770 771 772

  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    // The first argument parameter (the receiver) is implied to be undefined.
773 774 775
    TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
                                     context, function, arg_count, args...,
                                     UndefinedConstant());
776 777 778 779 780
  } else {
    TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
                                     context, function, arg_count, args...);
  }
  // TailCallStubThenDispatch updates accumulator with result.
781 782
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
783 784 785 786 787
}

// Instantiate CallJSAndDispatch() for argument counts used by interpreter
// generator.
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
788
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
789 790
    ConvertReceiverMode receiver_mode);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
791
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
792
    ConvertReceiverMode receiver_mode, TNode<Object>);
793
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
794
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
795
    ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>);
796
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
797
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
798 799
    ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>,
    TNode<Object>);
800 801

void InterpreterAssembler::CallJSWithSpreadAndDispatch(
802 803
    TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
    TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) {
804
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
805
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
806
  LazyNode<Object> receiver = [=] { return LoadRegisterAtOperandIndex(1); };
807 808
  CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
                      slot_id);
809
  Comment("call using CallWithSpread builtin");
810
  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
811
      isolate(), ConvertReceiverMode::kAny,
812
      InterpreterPushArgsMode::kWithFinalSpread);
813
  TNode<Code> code_target = HeapConstant(callable.code());
814

815 816
  TNode<Int32T> receiver_count = Int32Constant(1);
  TNode<Word32T> args_count = Int32Sub(args.reg_count(), receiver_count);
817
  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
818 819
                                   args_count, args.base_reg_location(),
                                   function);
820
  // TailCallStubThenDispatch updates accumulator with result.
821 822
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
823 824
}

825 826 827 828
TNode<Object> InterpreterAssembler::Construct(
    TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
    const RegListNodePair& args, TNode<UintPtrT> slot_id,
    TNode<HeapObject> maybe_feedback_vector) {
829
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
830 831
  TVARIABLE(Object, var_result);
  TVARIABLE(AllocationSite, var_site);
832 833
  Label return_result(this), construct_generic(this),
      construct_array(this, &var_site);
834

835
  CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
836 837
                           slot_id, UpdateFeedbackMode::kOptionalFeedback,
                           &construct_generic, &construct_array, &var_site);
838

839
  BIND(&construct_generic);
840
  {
841 842 843 844
    // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
    Comment("call using Construct builtin");
    Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
        isolate(), InterpreterPushArgsMode::kOther);
845 846 847
    var_result =
        CallStub(callable, context, args.reg_count(), args.base_reg_location(),
                 target, new_target, UndefinedConstant());
848
    Goto(&return_result);
849 850
  }

851 852 853 854 855 856
  BIND(&construct_array);
  {
    // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
    // constructor feedback collection inside of Ignition.
    Comment("call using ConstructArray builtin");
    Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
857
        isolate(), InterpreterPushArgsMode::kArrayFunction);
858 859 860
    var_result =
        CallStub(callable, context, args.reg_count(), args.base_reg_location(),
                 target, new_target, var_site.value());
861 862 863 864 865
    Goto(&return_result);
  }

  BIND(&return_result);
  return var_result.value();
866 867
}

868 869 870 871
TNode<Object> InterpreterAssembler::ConstructWithSpread(
    TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
    const RegListNodePair& args, TNode<UintPtrT> slot_id,
    TNode<HeapObject> maybe_feedback_vector) {
872 873 874
  // TODO(bmeurer): Unify this with the Construct bytecode feedback
  // above once we have a way to pass the AllocationSite to the Array
  // constructor _and_ spread the last argument at the same time.
875
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
876
  Label extra_checks(this, Label::kDeferred), construct(this);
877 878 879
  GotoIf(IsUndefined(maybe_feedback_vector), &construct);

  TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
880 881 882 883 884

  // Increment the call count.
  IncrementCallCount(feedback_vector, slot_id);

  // Check if we have monomorphic {new_target} feedback already.
885 886
  TNode<MaybeObject> feedback =
      LoadFeedbackVectorSlot(feedback_vector, slot_id);
887 888
  Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
         &extra_checks);
889 890 891 892 893 894 895

  BIND(&extra_checks);
  {
    Label check_initialized(this), initialize(this), mark_megamorphic(this);

    // Check if it is a megamorphic {new_target}.
    Comment("check if megamorphic");
896
    TNode<BoolT> is_megamorphic = TaggedEqual(
897
        feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
898 899
    GotoIf(is_megamorphic, &construct);

900
    Comment("check if weak reference");
901
    GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
902

903 904 905
    // If the weak reference is cleared, we have a new chance to become
    // monomorphic.
    Comment("check if weak reference is cleared");
906
    Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
907 908 909 910 911

    BIND(&check_initialized);
    {
      // Check if it is uninitialized.
      Comment("check if uninitialized");
912
      TNode<BoolT> is_uninitialized =
913
          TaggedEqual(feedback, UninitializedSymbolConstant());
914 915 916 917 918 919 920
      Branch(is_uninitialized, &initialize, &mark_megamorphic);
    }

    BIND(&initialize);
    {
      Comment("check if function in same native context");
      GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
921 922
      // Check if the {new_target} is a JSFunction or JSBoundFunction
      // in the current native context.
923
      TVARIABLE(HeapObject, var_current, CAST(new_target));
924 925 926 927 928
      Label loop(this, &var_current), done_loop(this);
      Goto(&loop);
      BIND(&loop);
      {
        Label if_boundfunction(this), if_function(this);
929
        TNode<HeapObject> current = var_current.value();
930
        TNode<Uint16T> current_instance_type = LoadInstanceType(current);
931 932
        GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
               &if_boundfunction);
933 934
        Branch(IsJSFunctionInstanceType(current_instance_type), &if_function,
               &mark_megamorphic);
935 936 937 938 939

        BIND(&if_function);
        {
          // Check that the JSFunction {current} is in the current native
          // context.
940 941
          TNode<Context> current_context =
              CAST(LoadObjectField(current, JSFunction::kContextOffset));
942
          TNode<NativeContext> current_native_context =
943 944 945 946
              LoadNativeContext(current_context);
          Branch(
              TaggedEqual(LoadNativeContext(context), current_native_context),
              &done_loop, &mark_megamorphic);
947
        }
948

949 950 951
        BIND(&if_boundfunction);
        {
          // Continue with the [[BoundTargetFunction]] of {current}.
952 953
          var_current = LoadObjectField<HeapObject>(
              current, JSBoundFunction::kBoundTargetFunctionOffset);
954 955 956 957
          Goto(&loop);
        }
      }
      BIND(&done_loop);
958 959
      StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
                                         CAST(new_target));
960 961
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "ConstructWithSpread:Initialize");
962 963 964 965 966 967 968 969
      Goto(&construct);
    }

    BIND(&mark_megamorphic);
    {
      // MegamorphicSentinel is an immortal immovable object so
      // write-barrier is not needed.
      Comment("transition to megamorphic");
970
      DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
971 972 973 974
      StoreFeedbackVectorSlot(
          feedback_vector, slot_id,
          HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
          SKIP_WRITE_BARRIER);
975 976
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "ConstructWithSpread:TransitionMegamorphic");
977 978 979 980 981
      Goto(&construct);
    }
  }

  BIND(&construct);
982
  Comment("call using ConstructWithSpread builtin");
983
  Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
984
      isolate(), InterpreterPushArgsMode::kWithFinalSpread);
985 986
  return CallStub(callable, context, args.reg_count(), args.base_reg_location(),
                  target, new_target, UndefinedConstant());
987 988
}

989 990 991 992 993
template <class T>
TNode<T> InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
                                            TNode<Context> context,
                                            const RegListNodePair& args,
                                            int return_count) {
994 995
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
  DCHECK(Bytecodes::IsCallRuntime(bytecode_));
996
  Callable callable = CodeFactory::InterpreterCEntry(isolate(), return_count);
997
  TNode<Code> code_target = HeapConstant(callable.code());
998 999

  // Get the function entry from the function id.
1000 1001
  TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant(
      ExternalReference::runtime_function_table_address(isolate())));
1002
  TNode<Word32T> function_offset =
1003
      Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
1004
  TNode<WordT> function =
1005
      IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
1006 1007
  TNode<RawPtrT> function_entry = Load<RawPtrT>(
      function, IntPtrConstant(offsetof(Runtime::Function, entry)));
1008

1009 1010 1011
  return CallStub<T>(callable.descriptor(), code_target, context,
                     args.reg_count(), args.base_reg_location(),
                     function_entry);
1012 1013
}

1014 1015 1016 1017 1018 1019 1020 1021 1022
template V8_EXPORT_PRIVATE TNode<Object> InterpreterAssembler::CallRuntimeN(
    TNode<Uint32T> function_id, TNode<Context> context,
    const RegListNodePair& args, int return_count);
template V8_EXPORT_PRIVATE TNode<PairT<Object, Object>>
InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
                                   TNode<Context> context,
                                   const RegListNodePair& args,
                                   int return_count);

1023 1024
void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
                                                 bool backward) {
1025 1026
  Comment("[ UpdateInterruptBudget");

1027 1028 1029 1030
  // Assert that the weight is positive (negative weights should be implemented
  // as backward updates).
  CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));

1031 1032
  Label load_budget_from_bytecode(this), load_budget_done(this);
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
1033
  TNode<FeedbackCell> feedback_cell =
1034
      LoadObjectField<FeedbackCell>(function, JSFunction::kFeedbackCellOffset);
1035 1036 1037
  TNode<Int32T> old_budget = LoadObjectField<Int32T>(
      feedback_cell, FeedbackCell::kInterruptBudgetOffset);

1038
  // Make sure we include the current bytecode in the budget calculation.
1039
  TNode<Int32T> budget_after_bytecode =
1040
      Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1041

1042
  Label done(this);
1043
  TVARIABLE(Int32T, new_budget);
1044
  if (backward) {
1045
    // Update budget by |weight| and check if it reaches zero.
1046
    new_budget = Int32Sub(budget_after_bytecode, weight);
1047
    TNode<BoolT> condition =
1048
        Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1049
    Label ok(this), interrupt_check(this, Label::kDeferred);
1050 1051 1052
    Branch(condition, &ok, &interrupt_check);

    BIND(&interrupt_check);
1053 1054 1055 1056 1057 1058
    // JumpLoop should do a stack check as part of the interrupt.
    CallRuntime(
        bytecode() == Bytecode::kJumpLoop
            ? Runtime::kBytecodeBudgetInterruptWithStackCheckFromBytecode
            : Runtime::kBytecodeBudgetInterruptFromBytecode,
        GetContext(), function);
1059
    Goto(&done);
1060 1061

    BIND(&ok);
1062
  } else {
1063 1064
    // For a forward jump, we know we only increase the interrupt budget, so
    // no need to check if it's below zero.
1065
    new_budget = Int32Add(budget_after_bytecode, weight);
1066
  }
1067 1068

  // Update budget.
1069
  StoreObjectFieldNoWriteBarrier(
1070
      feedback_cell, FeedbackCell::kInterruptBudgetOffset, new_budget.value());
1071 1072
  Goto(&done);
  BIND(&done);
1073
  Comment("] UpdateInterruptBudget");
1074 1075
}

1076 1077 1078
TNode<IntPtrT> InterpreterAssembler::Advance() {
  return Advance(CurrentBytecodeSize());
}
1079

1080
TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
1081
  return Advance(IntPtrConstant(delta));
1082 1083
}

1084
TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta,
1085
                                             bool backward) {
1086 1087
#ifdef V8_TRACE_UNOPTIMIZED
  TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit);
1088
#endif
1089 1090 1091
  TNode<IntPtrT> next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
                                        : IntPtrAdd(BytecodeOffset(), delta);
  bytecode_offset_ = next_offset;
1092
  return next_offset;
1093 1094
}

1095
void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset, bool backward) {
1096 1097
  DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));

1098 1099
  UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward);
  TNode<IntPtrT> new_bytecode_offset = Advance(jump_offset, backward);
1100 1101 1102
  TNode<RawPtrT> target_bytecode =
      UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset));
  DispatchToBytecode(target_bytecode, new_bytecode_offset);
1103
}
1104

1105 1106 1107
void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset) {
  Jump(jump_offset, false);
}
1108

1109 1110 1111
void InterpreterAssembler::JumpBackward(TNode<IntPtrT> jump_offset) {
  Jump(jump_offset, true);
}
1112

1113 1114
void InterpreterAssembler::JumpConditional(TNode<BoolT> condition,
                                           TNode<IntPtrT> jump_offset) {
1115
  Label match(this), no_match(this);
1116

1117
  Branch(condition, &match, &no_match);
1118
  BIND(&match);
1119
  Jump(jump_offset);
1120
  BIND(&no_match);
1121 1122 1123
  Dispatch();
}

1124
void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
1125 1126 1127
                                             TNode<Object> rhs,
                                             TNode<IntPtrT> jump_offset) {
  JumpConditional(TaggedEqual(lhs, rhs), jump_offset);
1128 1129
}

1130 1131
void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
                                                TNode<Object> rhs,
1132 1133
                                                TNode<IntPtrT> jump_offset) {
  JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset);
1134 1135
}

1136 1137 1138 1139
TNode<WordT> InterpreterAssembler::LoadBytecode(
    TNode<IntPtrT> bytecode_offset) {
  TNode<Uint8T> bytecode =
      Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset);
1140
  return ChangeUint32ToWord(bytecode);
1141 1142
}

1143
void InterpreterAssembler::StarDispatchLookahead(TNode<WordT> target_bytecode) {
1144 1145
  Label do_inline_star(this), done(this);

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
  // Check whether the following opcode is one of the short Star codes. All
  // opcodes higher than the short Star variants are invalid, and invalid
  // opcodes are never deliberately written, so we can use a one-sided check.
  // This is no less secure than the normal-length Star handler, which performs
  // no validation on its operand.
  STATIC_ASSERT(static_cast<int>(Bytecode::kLastShortStar) + 1 ==
                static_cast<int>(Bytecode::kIllegal));
  STATIC_ASSERT(Bytecode::kIllegal == Bytecode::kLast);
  TNode<Int32T> first_short_star_bytecode =
      Int32Constant(static_cast<int>(Bytecode::kFirstShortStar));
  TNode<BoolT> is_star = Uint32GreaterThanOrEqual(
      TruncateWordToInt32(target_bytecode), first_short_star_bytecode);
1158
  Branch(is_star, &do_inline_star, &done);
1159

1160
  BIND(&do_inline_star);
1161
  {
1162 1163 1164 1165 1166 1167 1168
    InlineShortStar(target_bytecode);

    // Rather than merging control flow to a single indirect jump, we can get
    // better branch prediction by duplicating it. This is because the
    // instruction following a merged X + StarN is a bad predictor of the
    // instruction following a non-merged X, and vice versa.
    DispatchToBytecode(LoadBytecode(BytecodeOffset()), BytecodeOffset());
1169
  }
1170
  BIND(&done);
1171 1172
}

1173
void InterpreterAssembler::InlineShortStar(TNode<WordT> target_bytecode) {
1174
  Bytecode previous_bytecode = bytecode_;
1175
  ImplicitRegisterUse previous_acc_use = implicit_register_use_;
1176

1177 1178 1179 1180
  // At this point we don't know statically what bytecode we're executing, but
  // kStar0 has the right attributes (namely, no operands) for any of the short
  // Star codes.
  bytecode_ = Bytecode::kStar0;
1181
  implicit_register_use_ = ImplicitRegisterUse::kNone;
1182

1183 1184
#ifdef V8_TRACE_UNOPTIMIZED
  TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
1185
#endif
1186 1187

  StoreRegisterForShortStar(GetAccumulator(), target_bytecode);
1188

1189 1190
  DCHECK_EQ(implicit_register_use_,
            Bytecodes::GetImplicitRegisterUse(bytecode_));
1191 1192 1193

  Advance();
  bytecode_ = previous_bytecode;
1194
  implicit_register_use_ = previous_acc_use;
1195 1196
}

1197
void InterpreterAssembler::Dispatch() {
1198
  Comment("========= Dispatch");
1199
  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1200
  TNode<IntPtrT> target_offset = Advance();
1201
  TNode<WordT> target_bytecode = LoadBytecode(target_offset);
1202 1203
  DispatchToBytecodeWithOptionalStarLookahead(target_bytecode);
}
1204

1205 1206
void InterpreterAssembler::DispatchToBytecodeWithOptionalStarLookahead(
    TNode<WordT> target_bytecode) {
1207
  if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1208
    StarDispatchLookahead(target_bytecode);
1209
  }
1210
  DispatchToBytecode(target_bytecode, BytecodeOffset());
1211
}
1212

1213 1214
void InterpreterAssembler::DispatchToBytecode(
    TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) {
1215
  if (V8_IGNITION_DISPATCH_COUNTING_BOOL) {
1216 1217 1218
    TraceBytecodeDispatch(target_bytecode);
  }

1219 1220
  TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
      DispatchTablePointer(), TimesSystemPointerSize(target_bytecode));
1221

1222
  DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1223 1224
}

1225 1226
void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
    TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
1227
  // Propagate speculation poisoning.
1228 1229 1230 1231 1232 1233
  TNode<RawPtrT> poisoned_handler_entry =
      UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry));
  TailCallBytecodeDispatch(InterpreterDispatchDescriptor{},
                           poisoned_handler_entry, GetAccumulatorUnchecked(),
                           bytecode_offset, BytecodeArrayTaggedPointer(),
                           DispatchTablePointer());
1234 1235
}

1236 1237 1238 1239 1240 1241 1242 1243
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
  // Dispatching a wide bytecode requires treating the prefix
  // bytecode a base pointer into the dispatch table and dispatching
  // the bytecode that follows relative to this base.
  //
  //   Indices 0-255 correspond to bytecodes with operand_scale == 0
  //   Indices 256-511 correspond to bytecodes with operand_scale == 1
  //   Indices 512-767 correspond to bytecodes with operand_scale == 2
1244
  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1245
  TNode<IntPtrT> next_bytecode_offset = Advance(1);
1246
  TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset);
1247

1248
  if (V8_IGNITION_DISPATCH_COUNTING_BOOL) {
1249 1250 1251
    TraceBytecodeDispatch(next_bytecode);
  }

1252
  TNode<IntPtrT> base_index;
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
  switch (operand_scale) {
    case OperandScale::kDouble:
      base_index = IntPtrConstant(1 << kBitsPerByte);
      break;
    case OperandScale::kQuadruple:
      base_index = IntPtrConstant(2 << kBitsPerByte);
      break;
    default:
      UNREACHABLE();
  }
1263
  TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode);
1264 1265
  TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
      DispatchTablePointer(), TimesSystemPointerSize(target_index));
1266

1267
  DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
1268 1269
}

1270
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1271 1272 1273
  // TODO(rmcilroy): Investigate whether it is worth supporting self
  // optimization of primitive functions like FullCodegen.

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
  // Update profiling count by the number of bytes between the end of the
  // current bytecode and the start of the first one, to simulate backedge to
  // start of function.
  //
  // With headers and current offset, the bytecode array layout looks like:
  //
  //           <---------- simulated backedge ----------
  // | header | first bytecode | .... | return bytecode |
  //  |<------ current offset ------->
  //  ^ tagged bytecode array pointer
  //
  // UpdateInterruptBudget already handles adding the bytecode size to the
  // length of the back-edge, so we just have to correct for the non-zero offset
  // of the first bytecode.

  const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
1290 1291 1292
  TNode<Int32T> profiling_weight =
      Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
               Int32Constant(kFirstBytecodeOffset));
1293
  UpdateInterruptBudget(profiling_weight, true);
1294 1295
}

1296 1297
TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() {
  return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
1298
                                BytecodeArray::kOsrLoopNestingLevelOffset);
1299 1300
}

1301
void InterpreterAssembler::Abort(AbortReason abort_reason) {
1302
  TNode<Smi> abort_id = SmiConstant(abort_reason);
1303
  CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1304 1305
}

1306 1307
void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
                                               TNode<WordT> rhs,
1308
                                               AbortReason abort_reason) {
1309
  Label ok(this), abort(this, Label::kDeferred);
1310
  Branch(WordEqual(lhs, rhs), &ok, &abort);
1311

1312
  BIND(&abort);
1313
  Abort(abort_reason);
1314 1315
  Goto(&ok);

1316
  BIND(&ok);
1317 1318
}

1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
                                              TNode<IntPtrT> relative_jump) {
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
  TNode<HeapObject> shared_info = LoadJSFunctionSharedFunctionInfo(function);
  TNode<Object> sfi_data =
      LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
  TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));

  Label baseline(this);
  GotoIf(InstanceTypeEqual(data_type, BASELINE_DATA_TYPE), &baseline);
  {
    Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
    CallStub(callable, context);
    JumpBackward(relative_jump);
  }

  BIND(&baseline);
  {
    Callable callable =
        CodeFactory::InterpreterOnStackReplacement_ToBaseline(isolate());
    // We already compiled the baseline code, so we don't need to handle failed
    // compilation as in the Ignition -> Turbofan case. Therefore we can just
    // tailcall to the OSR builtin.
    SaveBytecodeOffset();
    TailCallStub(callable, context);
  }
}

1347 1348
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
  CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1349
              SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1350 1351
}

1352
void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
1353
  TNode<ExternalReference> counters_table = ExternalConstant(
1354
      ExternalReference::interpreter_dispatch_counters(isolate()));
1355
  TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant(
1356 1357
      static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));

1358
  TNode<WordT> counter_offset = TimesSystemPointerSize(
1359
      IntPtrAdd(source_bytecode_table_index, target_bytecode));
1360
  TNode<IntPtrT> old_counter = Load<IntPtrT>(counters_table, counter_offset);
1361

1362
  Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1363

1364
  TNode<BoolT> counter_reached_max = WordEqual(
1365
      old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1366
  Branch(counter_reached_max, &counter_saturated, &counter_ok);
1367

1368
  BIND(&counter_ok);
1369
  {
1370
    TNode<IntPtrT> new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1371 1372 1373 1374 1375
    StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
                        counter_offset, new_counter);
    Goto(&counter_saturated);
  }

1376
  BIND(&counter_saturated);
1377 1378
}

1379 1380
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
Brice Dobry's avatar
Brice Dobry committed
1381
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
1382
  return false;
Jakob Kummerow's avatar
Jakob Kummerow committed
1383
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
1384 1385
    V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC ||   \
    V8_TARGET_ARCH_PPC64
1386 1387 1388 1389 1390 1391
  return true;
#else
#error "Unknown Architecture"
#endif
}

1392
void InterpreterAssembler::AbortIfRegisterCountInvalid(
1393 1394
    TNode<FixedArrayBase> parameters_and_registers,
    TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count) {
1395 1396
  TNode<IntPtrT> array_size =
      LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1397 1398

  Label ok(this), abort(this, Label::kDeferred);
1399 1400 1401
  Branch(UintPtrLessThanOrEqual(
             IntPtrAdd(formal_parameter_count, register_count), array_size),
         &ok, &abort);
1402 1403

  BIND(&abort);
1404
  Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1405 1406 1407
  Goto(&ok);

  BIND(&ok);
1408 1409
}

1410
TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
1411 1412
    TNode<FixedArray> array, const RegListNodePair& registers,
    TNode<Int32T> formal_parameter_count) {
1413 1414
  // Store the formal parameters (without receiver) followed by the
  // registers into the generator's internal parameters_and_registers field.
1415
  TNode<IntPtrT> formal_parameter_count_intptr =
1416
      Signed(ChangeUint32ToWord(formal_parameter_count));
1417
  TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1418
  if (FLAG_debug_code) {
1419 1420
    CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
                                 RegisterLocation(Register(0))));
1421 1422
    AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
                                register_count);
1423
  }
1424 1425

  {
1426
    TVARIABLE(IntPtrT, var_index);
1427
    var_index = IntPtrConstant(0);
1428

1429 1430
    // Iterate over parameters and write them into the array.
    Label loop(this, &var_index), done_loop(this);
1431

1432 1433
    TNode<IntPtrT> reg_base =
        IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() + 1);
1434 1435

    Goto(&loop);
1436 1437
    BIND(&loop);
    {
1438
      TNode<IntPtrT> index = var_index.value();
1439 1440
      GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
                &done_loop);
1441

1442
      TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
1443
      TNode<Object> value = LoadRegister(reg_index);
1444 1445 1446

      StoreFixedArrayElement(array, index, value);

1447
      var_index = IntPtrAdd(index, IntPtrConstant(1));
1448 1449 1450 1451 1452 1453 1454 1455 1456
      Goto(&loop);
    }
    BIND(&done_loop);
  }

  {
    // Iterate over register file and write values into array.
    // The mapping of register to array index must match that used in
    // BytecodeGraphBuilder::VisitResumeGenerator.
1457
    TVARIABLE(IntPtrT, var_index);
1458
    var_index = IntPtrConstant(0);
1459 1460 1461 1462 1463

    Label loop(this, &var_index), done_loop(this);
    Goto(&loop);
    BIND(&loop);
    {
1464
      TNode<IntPtrT> index = var_index.value();
1465 1466
      GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);

1467
      TNode<IntPtrT> reg_index =
1468
          IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1469
      TNode<Object> value = LoadRegister(reg_index);
1470

1471
      TNode<IntPtrT> array_index =
1472
          IntPtrAdd(formal_parameter_count_intptr, index);
1473 1474
      StoreFixedArrayElement(array, array_index, value);

1475
      var_index = IntPtrAdd(index, IntPtrConstant(1));
1476 1477 1478
      Goto(&loop);
    }
    BIND(&done_loop);
1479 1480 1481 1482 1483
  }

  return array;
}

1484
TNode<FixedArray> InterpreterAssembler::ImportRegisterFile(
1485 1486 1487
    TNode<FixedArray> array, const RegListNodePair& registers,
    TNode<Int32T> formal_parameter_count) {
  TNode<IntPtrT> formal_parameter_count_intptr =
1488
      Signed(ChangeUint32ToWord(formal_parameter_count));
1489
  TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1490
  if (FLAG_debug_code) {
1491 1492
    CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
                                 RegisterLocation(Register(0))));
1493 1494
    AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
                                register_count);
1495
  }
1496

1497
  TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1498

1499 1500
  // Iterate over array and write values into register file.  Also erase the
  // array contents to not keep them alive artificially.
1501 1502
  Label loop(this, &var_index), done_loop(this);
  Goto(&loop);
1503
  BIND(&loop);
1504
  {
1505
    TNode<IntPtrT> index = var_index.value();
1506
    GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1507

1508 1509 1510
    TNode<IntPtrT> array_index =
        IntPtrAdd(formal_parameter_count_intptr, index);
    TNode<Object> value = LoadFixedArrayElement(array, array_index);
1511

1512 1513
    TNode<IntPtrT> reg_index =
        IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1514
    StoreRegister(value, reg_index);
1515

1516
    StoreFixedArrayElement(array, array_index, StaleRegisterConstant());
1517

1518
    var_index = IntPtrAdd(index, IntPtrConstant(1));
1519 1520
    Goto(&loop);
  }
1521
  BIND(&done_loop);
1522 1523 1524 1525

  return array;
}

1526 1527 1528 1529
int InterpreterAssembler::CurrentBytecodeSize() const {
  return Bytecodes::Size(bytecode_, operand_scale_);
}

1530
void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
1531 1532
  TNode<Object> object = GetAccumulator();
  TNode<Context> context = GetContext();
1533

1534 1535
  TVARIABLE(Smi, var_type_feedback);
  TVARIABLE(Numeric, var_result);
1536 1537 1538 1539
  Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
      if_objectisother(this, Label::kDeferred);

  GotoIf(TaggedIsSmi(object), &if_objectissmi);
1540
  Branch(IsHeapNumber(CAST(object)), &if_objectisheapnumber, &if_objectisother);
1541 1542 1543

  BIND(&if_objectissmi);
  {
1544 1545
    var_result = CAST(object);
    var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
1546 1547 1548 1549 1550
    Goto(&if_done);
  }

  BIND(&if_objectisheapnumber);
  {
1551 1552
    var_result = CAST(object);
    var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
1553 1554 1555 1556 1557
    Goto(&if_done);
  }

  BIND(&if_objectisother);
  {
1558
    auto builtin = Builtin::kNonNumberToNumber;
1559
    if (mode == Object::Conversion::kToNumeric) {
1560
      builtin = Builtin::kNonNumberToNumeric;
1561 1562
      // Special case for collecting BigInt feedback.
      Label not_bigint(this);
1563
      GotoIfNot(IsBigInt(CAST(object)), &not_bigint);
1564
      {
1565 1566
        var_result = CAST(object);
        var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
1567 1568 1569 1570 1571 1572
        Goto(&if_done);
      }
      BIND(&not_bigint);
    }

    // Convert {object} by calling out to the appropriate builtin.
1573 1574
    var_result = CAST(CallBuiltin(builtin, context, object));
    var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
1575 1576 1577 1578 1579 1580
    Goto(&if_done);
  }

  BIND(&if_done);

  // Record the type feedback collected for {object}.
1581
  TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
1582
  TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
1583

1584
  MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
1585
                      slot_index);
1586 1587 1588 1589 1590

  SetAccumulator(var_result.value());
  Dispatch();
}

1591 1592 1593
}  // namespace interpreter
}  // namespace internal
}  // namespace v8