interpreter-assembler.cc 61.2 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/interpreter/interpreter-assembler.h"

7
#include <limits>
8 9
#include <ostream>

10
#include "src/codegen/code-factory.h"
11
#include "src/codegen/interface-descriptors-inl.h"
12
#include "src/codegen/machine-type.h"
13
#include "src/execution/frames.h"
14
#include "src/interpreter/bytecodes.h"
15
#include "src/interpreter/interpreter.h"
16
#include "src/objects/objects-inl.h"
17
#include "src/zone/zone.h"
18 19 20 21 22

namespace v8 {
namespace internal {
namespace interpreter {

23
using compiler::CodeAssemblerState;
24

25
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
26 27
                                           Bytecode bytecode,
                                           OperandScale operand_scale)
28
    : CodeStubAssembler(state),
29
      bytecode_(bytecode),
30
      operand_scale_(operand_scale),
31
      TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
32 33 34
      TVARIABLE_CONSTRUCTOR(bytecode_array_,
                            Parameter<BytecodeArray>(
                                InterpreterDispatchDescriptor::kBytecodeArray)),
35 36
      TVARIABLE_CONSTRUCTOR(
          bytecode_offset_,
37 38 39 40 41
          UncheckedParameter<IntPtrT>(
              InterpreterDispatchDescriptor::kBytecodeOffset)),
      TVARIABLE_CONSTRUCTOR(dispatch_table_,
                            UncheckedParameter<ExternalReference>(
                                InterpreterDispatchDescriptor::kDispatchTable)),
42 43
      TVARIABLE_CONSTRUCTOR(
          accumulator_,
44
          Parameter<Object>(InterpreterDispatchDescriptor::kAccumulator)),
45
      implicit_register_use_(ImplicitRegisterUse::kNone),
46
      made_call_(false),
47
      reloaded_frame_ptr_(false),
48
      bytecode_array_valid_(true) {
49 50
#ifdef V8_TRACE_UNOPTIMIZED
  TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
51
#endif
52 53
  RegisterCallGenerationCallbacks([this] { CallPrologue(); },
                                  [this] { CallEpilogue(); });
54

55 56
  // Save the bytecode offset immediately if bytecode will make a call along
  // the critical path, or it is a return bytecode.
57
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
58
      Bytecodes::Returns(bytecode)) {
59
    SaveBytecodeOffset();
60
  }
61 62
}

63 64 65 66
InterpreterAssembler::~InterpreterAssembler() {
  // If the following check fails the handler does not use the
  // accumulator in the way described in the bytecode definitions in
  // bytecodes.h.
67 68
  DCHECK_EQ(implicit_register_use_,
            Bytecodes::GetImplicitRegisterUse(bytecode_));
69
  UnregisterCallGenerationCallbacks();
70 71
}

72
TNode<RawPtrT> InterpreterAssembler::GetInterpretedFramePointer() {
73
  if (!interpreted_frame_pointer_.IsBound()) {
74
    interpreted_frame_pointer_ = LoadParentFramePointer();
75 76
  } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
             !reloaded_frame_ptr_) {
77
    interpreted_frame_pointer_ = LoadParentFramePointer();
78
    reloaded_frame_ptr_ = true;
79 80 81 82
  }
  return interpreted_frame_pointer_.value();
}

83
TNode<IntPtrT> InterpreterAssembler::BytecodeOffset() {
84 85
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
      (bytecode_offset_.value() ==
86 87
       UncheckedParameter<IntPtrT>(
           InterpreterDispatchDescriptor::kBytecodeOffset))) {
88
    bytecode_offset_ = ReloadBytecodeOffset();
89 90 91 92
  }
  return bytecode_offset_.value();
}

93
TNode<IntPtrT> InterpreterAssembler::ReloadBytecodeOffset() {
94
  TNode<IntPtrT> offset = LoadAndUntagRegister(Register::bytecode_offset());
95 96 97 98 99 100 101 102 103
  if (operand_scale() != OperandScale::kSingle) {
    // Add one to the offset such that it points to the actual bytecode rather
    // than the Wide / ExtraWide prefix bytecode.
    offset = IntPtrAdd(offset, IntPtrConstant(1));
  }
  return offset;
}

void InterpreterAssembler::SaveBytecodeOffset() {
104
  TNode<IntPtrT> bytecode_offset = BytecodeOffset();
105
  if (operand_scale() != OperandScale::kSingle) {
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
    // Subtract one from the bytecode_offset such that it points to the Wide /
    // ExtraWide prefix bytecode.
    bytecode_offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
  }
  int store_offset =
      Register::bytecode_offset().ToOperand() * kSystemPointerSize;
  TNode<RawPtrT> base = GetInterpretedFramePointer();

  if (SmiValuesAre32Bits()) {
    int zero_offset = store_offset + 4;
    int payload_offset = store_offset;
#if V8_TARGET_LITTLE_ENDIAN
    std::swap(zero_offset, payload_offset);
#endif
    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
                        IntPtrConstant(zero_offset), Int32Constant(0));
    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
                        IntPtrConstant(payload_offset),
                        TruncateIntPtrToInt32(bytecode_offset));
  } else {
126 127
    StoreFullTaggedNoWriteBarrier(base, IntPtrConstant(store_offset),
                                  SmiTag(bytecode_offset));
128 129 130
  }
}

131
TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() {
132 133 134
  // Force a re-load of the bytecode array after every call in case the debugger
  // has been activated.
  if (!bytecode_array_valid_) {
135
    bytecode_array_ = CAST(LoadRegister(Register::bytecode_array()));
136 137 138 139 140
    bytecode_array_valid_ = true;
  }
  return bytecode_array_.value();
}

141
TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() {
142 143
  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
      (dispatch_table_.value() ==
144 145
       UncheckedParameter<ExternalReference>(
           InterpreterDispatchDescriptor::kDispatchTable))) {
146 147
    dispatch_table_ = ExternalConstant(
        ExternalReference::interpreter_dispatch_table_address(isolate()));
148 149 150 151
  }
  return dispatch_table_.value();
}

152
TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() {
153 154
  return accumulator_.value();
}
155

156
TNode<Object> InterpreterAssembler::GetAccumulator() {
157
  DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
158 159
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kReadAccumulator;
160
  return GetAccumulatorUnchecked();
161
}
162

163
void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
164
  DCHECK(Bytecodes::WritesAccumulator(bytecode_));
165 166
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
167
  accumulator_ = value;
168
}
169

170 171
TNode<Context> InterpreterAssembler::GetContext() {
  return CAST(LoadRegister(Register::current_context()));
172
}
173

174
void InterpreterAssembler::SetContext(TNode<Context> value) {
175 176 177
  StoreRegister(value, Register::current_context());
}

178 179
TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
                                                       TNode<Uint32T> depth) {
180 181
  TVARIABLE(Context, cur_context, context);
  TVARIABLE(Uint32T, cur_depth, depth);
182 183 184

  Label context_found(this);

185
  Label context_search(this, {&cur_depth, &cur_context});
186 187

  // Fast path if the depth is 0.
188
  Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
189 190

  // Loop until the depth is 0.
191
  BIND(&context_search);
192
  {
193 194 195
    cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
    cur_context =
        CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
196

197 198
    Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
           &context_search);
199 200
  }

201
  BIND(&context_found);
202 203 204
  return cur_context.value();
}

205 206
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
    TNode<IntPtrT> reg_index) {
207 208
  return Signed(
      IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
209 210
}

211
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
212 213 214
  return RegisterLocation(IntPtrConstant(reg.ToOperand()));
}

215 216
TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
  return TimesSystemPointerSize(index);
217 218
}

219
TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
220
  return LoadFullTagged(GetInterpretedFramePointer(),
221
                        RegisterFrameOffset(reg_index));
222 223
}

224
TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
225 226
  return LoadFullTagged(GetInterpretedFramePointer(),
                        IntPtrConstant(reg.ToOperand() * kSystemPointerSize));
227 228
}

229
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
230 231 232 233 234 235
  TNode<RawPtrT> base = GetInterpretedFramePointer();
  int index = reg.ToOperand() * kSystemPointerSize;
  if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
    index += 4;
#endif
236
    return ChangeInt32ToIntPtr(Load<Int32T>(base, IntPtrConstant(index)));
237
  } else {
238
    return SmiToIntPtr(CAST(LoadFullTagged(base, IntPtrConstant(index))));
239
  }
240 241
}

242 243
TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
    int operand_index) {
244
  return LoadRegister(BytecodeOperandReg(operand_index));
245 246
}

247 248
std::pair<TNode<Object>, TNode<Object>>
InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
249 250
  DCHECK_EQ(OperandType::kRegPair,
            Bytecodes::GetOperandType(bytecode_, operand_index));
251
  TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
252
  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
253 254 255 256 257 258 259 260 261 262
  return std::make_pair(LoadRegister(first_reg_index),
                        LoadRegister(second_reg_index));
}

InterpreterAssembler::RegListNodePair
InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
  DCHECK(Bytecodes::IsRegisterListOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  DCHECK_EQ(OperandType::kRegCount,
            Bytecodes::GetOperandType(bytecode_, operand_index + 1));
263
  TNode<IntPtrT> base_reg = RegisterLocation(BytecodeOperandReg(operand_index));
264
  TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
265 266 267
  return RegListNodePair(base_reg, reg_count);
}

268
TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
269
    const RegListNodePair& reg_list, int index) {
270
  TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
271
  return LoadFullTagged(location);
272 273
}

274
TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
275
    const RegListNodePair& reg_list, int index) {
276
  CSA_DCHECK(this,
277
             Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
278
  TNode<IntPtrT> offset = RegisterFrameOffset(IntPtrConstant(index));
279 280
  // Register indexes are negative, so subtract index from base location to get
  // location.
281
  return Signed(IntPtrSub(reg_list.base_reg_location(), offset));
282 283
}

284
void InterpreterAssembler::StoreRegister(TNode<Object> value, Register reg) {
285 286
  StoreFullTaggedNoWriteBarrier(
      GetInterpretedFramePointer(),
287
      IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value);
288 289
}

290 291
void InterpreterAssembler::StoreRegister(TNode<Object> value,
                                         TNode<IntPtrT> reg_index) {
292 293
  StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(),
                                RegisterFrameOffset(reg_index), value);
294 295
}

296 297 298 299 300 301
void InterpreterAssembler::StoreRegisterForShortStar(TNode<Object> value,
                                                     TNode<WordT> opcode) {
  DCHECK(Bytecodes::IsShortStar(bytecode_));
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteShortStar;

302
  CSA_DCHECK(
303 304
      this, UintPtrGreaterThanOrEqual(opcode, UintPtrConstant(static_cast<int>(
                                                  Bytecode::kFirstShortStar))));
305
  CSA_DCHECK(
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
      this,
      UintPtrLessThanOrEqual(
          opcode, UintPtrConstant(static_cast<int>(Bytecode::kLastShortStar))));

  // Compute the constant that we can add to a Bytecode value to map the range
  // [Bytecode::kStar15, Bytecode::kStar0] to the range
  // [Register(15).ToOperand(), Register(0).ToOperand()].
  constexpr int short_star_to_operand =
      Register(0).ToOperand() - static_cast<int>(Bytecode::kStar0);
  // Make sure the values count in the right direction.
  STATIC_ASSERT(short_star_to_operand ==
                Register(1).ToOperand() - static_cast<int>(Bytecode::kStar1));

  TNode<IntPtrT> offset =
      IntPtrAdd(RegisterFrameOffset(Signed(opcode)),
                IntPtrConstant(short_star_to_operand * kSystemPointerSize));
  StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(), offset, value);
}

325
void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
326
                                                       int operand_index) {
327
  StoreRegister(value, BytecodeOperandReg(operand_index));
328 329
}

330 331
void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
                                                           TNode<Object> value2,
332 333 334
                                                           int operand_index) {
  DCHECK_EQ(OperandType::kRegOutPair,
            Bytecodes::GetOperandType(bytecode_, operand_index));
335
  TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
336
  StoreRegister(value1, first_reg_index);
337
  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
338 339 340 341
  StoreRegister(value2, second_reg_index);
}

void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
342 343
    TNode<Object> value1, TNode<Object> value2, TNode<Object> value3,
    int operand_index) {
344 345
  DCHECK_EQ(OperandType::kRegOutTriple,
            Bytecodes::GetOperandType(bytecode_, operand_index));
346
  TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
347
  StoreRegister(value1, first_reg_index);
348
  TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
349
  StoreRegister(value2, second_reg_index);
350
  TNode<IntPtrT> third_reg_index = NextRegister(second_reg_index);
351
  StoreRegister(value3, third_reg_index);
352 353
}

354
TNode<IntPtrT> InterpreterAssembler::NextRegister(TNode<IntPtrT> reg_index) {
355
  // Register indexes are negative, so the next index is minus one.
356
  return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1)));
357 358
}

359
TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
360 361 362 363
  return IntPtrConstant(
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}

364
TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
365
    int operand_index) {
366
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
367 368
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
369
  TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
370
  return Load<Uint8T>(BytecodeArrayTaggedPointer(),
371
                      IntPtrAdd(BytecodeOffset(), operand_offset));
372 373
}

374
TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
375
    int operand_index) {
376
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
377 378
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
379
  TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
380
  return Load<Int8T>(BytecodeArrayTaggedPointer(),
381
                     IntPtrAdd(BytecodeOffset(), operand_offset));
382 383
}

384
TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
385
    int relative_offset, MachineType result_type) {
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
  static const int kMaxCount = 4;
  DCHECK(!TargetSupportsUnalignedAccess());

  int count;
  switch (result_type.representation()) {
    case MachineRepresentation::kWord16:
      count = 2;
      break;
    case MachineRepresentation::kWord32:
      count = 4;
      break;
    default:
      UNREACHABLE();
  }
  MachineType msb_type =
      result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();

403
#if V8_TARGET_LITTLE_ENDIAN
404 405
  const int kStep = -1;
  int msb_offset = count - 1;
406
#elif V8_TARGET_BIG_ENDIAN
407 408
  const int kStep = 1;
  int msb_offset = 0;
409 410 411
#else
#error "Unknown Architecture"
#endif
412 413 414

  // Read the most signicant bytecode into bytes[0] and then in order
  // down to least significant in bytes[count - 1].
415
  DCHECK_LE(count, kMaxCount);
416
  TNode<Word32T> bytes[kMaxCount];
417 418
  for (int i = 0; i < count; i++) {
    MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
419 420
    TNode<IntPtrT> offset =
        IntPtrConstant(relative_offset + msb_offset + i * kStep);
421
    TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
422 423
    bytes[i] = UncheckedCast<Word32T>(
        Load(machine_type, BytecodeArrayTaggedPointer(), array_offset));
424 425 426
  }

  // Pack LSB to MSB.
427
  TNode<Word32T> result = bytes[--count];
428
  for (int i = 1; --count >= 0; i++) {
429 430
    TNode<Int32T> shift = Int32Constant(i * kBitsPerByte);
    TNode<Word32T> value = Word32Shl(bytes[count], shift);
431 432 433 434 435
    result = Word32Or(value, result);
  }
  return result;
}

436
TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
437
    int operand_index) {
438 439 440 441 442 443 444
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
445 446
    return Load<Uint16T>(
        BytecodeArrayTaggedPointer(),
447
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
448
  } else {
449 450
    return UncheckedCast<Uint16T>(
        BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16()));
451 452 453
  }
}

454
TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
455
    int operand_index) {
456
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
457 458 459 460 461
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
462
  if (TargetSupportsUnalignedAccess()) {
463 464
    return Load<Int16T>(
        BytecodeArrayTaggedPointer(),
465
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
466
  } else {
467 468
    return UncheckedCast<Int16T>(
        BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16()));
469 470 471
  }
}

472
TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
473
    int operand_index) {
474 475 476 477 478 479
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
480 481
    return Load<Uint32T>(
        BytecodeArrayTaggedPointer(),
482
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
483
  } else {
484 485
    return UncheckedCast<Uint32T>(
        BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32()));
486 487 488
  }
}

489
TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
490
    int operand_index) {
491 492 493 494 495 496
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
497 498
    return Load<Int32T>(
        BytecodeArrayTaggedPointer(),
499
        IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
500
  } else {
501 502
    return UncheckedCast<Int32T>(
        BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32()));
503 504 505
  }
}

506
TNode<Int32T> InterpreterAssembler::BytecodeSignedOperand(
507
    int operand_index, OperandSize operand_size) {
508 509 510
  DCHECK(!Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
511
    case OperandSize::kByte:
512
      return BytecodeOperandSignedByte(operand_index);
513
    case OperandSize::kShort:
514
      return BytecodeOperandSignedShort(operand_index);
515
    case OperandSize::kQuad:
516
      return BytecodeOperandSignedQuad(operand_index);
517 518 519 520 521
    case OperandSize::kNone:
      UNREACHABLE();
  }
}

522
TNode<Uint32T> InterpreterAssembler::BytecodeUnsignedOperand(
523
    int operand_index, OperandSize operand_size) {
524 525 526
  DCHECK(Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
527
    case OperandSize::kByte:
528
      return BytecodeOperandUnsignedByte(operand_index);
529
    case OperandSize::kShort:
530
      return BytecodeOperandUnsignedShort(operand_index);
531
    case OperandSize::kQuad:
532
      return BytecodeOperandUnsignedQuad(operand_index);
533 534 535 536 537
    case OperandSize::kNone:
      UNREACHABLE();
  }
}

538
TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) {
539 540 541 542 543 544 545
  DCHECK_EQ(OperandType::kRegCount,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

546
TNode<Uint32T> InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
547 548 549 550 551 552 553 554
  DCHECK_EQ(OperandType::kFlag8,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

555
TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
556 557 558 559 560 561 562
  DCHECK_EQ(OperandType::kUImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

563 564
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandUImmWord(
    int operand_index) {
565 566 567
  return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
}

568 569
TNode<Smi> InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
  return SmiFromUint32(BytecodeOperandUImm(operand_index));
570 571
}

572
TNode<Int32T> InterpreterAssembler::BytecodeOperandImm(int operand_index) {
573 574 575 576 577 578 579
  DCHECK_EQ(OperandType::kImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeSignedOperand(operand_index, operand_size);
}

580 581
TNode<IntPtrT> InterpreterAssembler::BytecodeOperandImmIntPtr(
    int operand_index) {
582 583 584
  return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
}

585
TNode<Smi> InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
586
  return SmiFromInt32(BytecodeOperandImm(operand_index));
587 588
}

589 590
TNode<Uint32T> InterpreterAssembler::BytecodeOperandIdxInt32(
    int operand_index) {
591 592
  DCHECK_EQ(OperandType::kIdx,
            Bytecodes::GetOperandType(bytecode_, operand_index));
593 594
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
595 596 597
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

598
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
599
  return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
600 601
}

602 603
TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
  return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
604 605
}

606 607 608 609 610 611 612
TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
    int operand_index) {
  TNode<IntPtrT> index =
      ChangeInt32ToIntPtr(Signed(BytecodeOperandIdxInt32(operand_index)));
  return IntPtrToTaggedIndex(index);
}

613
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
614
    int operand_index) {
615 616 617 618 619
  DCHECK_EQ(OperandType::kIdx,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return ChangeUint32ToWord(
620
      BytecodeUnsignedOperand(operand_index, operand_size));
621 622
}

623
TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(int operand_index) {
624 625 626 627
  DCHECK(Bytecodes::IsRegisterOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
628
  return ChangeInt32ToIntPtr(
629
      BytecodeSignedOperand(operand_index, operand_size));
630 631
}

632 633
TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
    int operand_index) {
634 635
  DCHECK_EQ(OperandType::kRuntimeId,
            Bytecodes::GetOperandType(bytecode_, operand_index));
636 637 638 639
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kShort);
  return BytecodeUnsignedOperand(operand_index, operand_size);
640 641
}

642
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandNativeContextIndex(
643
    int operand_index) {
644 645
  DCHECK_EQ(OperandType::kNativeContextIndex,
            Bytecodes::GetOperandType(bytecode_, operand_index));
646 647 648 649 650 651
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return ChangeUint32ToWord(
      BytecodeUnsignedOperand(operand_index, operand_size));
}

652 653
TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId(
    int operand_index) {
654 655
  DCHECK_EQ(OperandType::kIntrinsicId,
            Bytecodes::GetOperandType(bytecode_, operand_index));
656 657 658 659 660 661
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

662
TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
663 664
  TNode<FixedArray> constant_pool = CAST(LoadObjectField(
      BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
665
  return UnsafeLoadFixedArrayElement(constant_pool,
666
                                     UncheckedCast<IntPtrT>(index), 0);
667 668
}

669
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
670 671
    TNode<WordT> index) {
  return SmiUntag(CAST(LoadConstantPoolEntry(index)));
672 673
}

674
TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
675
    int operand_index) {
676
  TNode<UintPtrT> index = BytecodeOperandConstantPoolIdx(operand_index);
677 678 679
  return LoadConstantPoolEntry(index);
}

680 681
TNode<IntPtrT>
InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
682
    int operand_index) {
683
  return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index)));
684 685
}

686
TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() {
687
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
688
  return CodeStubAssembler::LoadFeedbackVector(function);
689 690 691
}

void InterpreterAssembler::CallPrologue() {
692 693 694 695
  if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
    // Bytecodes that make a call along the critical path save the bytecode
    // offset in the bytecode handler's prologue. For other bytecodes, if
    // there are multiple calls in the bytecode handler, you need to spill
696
    // before each of them, unless SaveBytecodeOffset has explicitly been called
697
    // in a path that dominates _all_ of those calls (which we don't track).
698
    SaveBytecodeOffset();
699
  }
700

701
  bytecode_array_valid_ = false;
702
  made_call_ = true;
703 704
}

Brice Dobry's avatar
Brice Dobry committed
705
void InterpreterAssembler::CallEpilogue() {}
706

707
void InterpreterAssembler::CallJSAndDispatch(
708
    TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
709
    ConvertReceiverMode receiver_mode) {
710
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
711 712
  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
         bytecode_ == Bytecode::kInvokeIntrinsic);
713
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
714

715
  TNode<Word32T> args_count = args.reg_count();
716 717
  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    // Add receiver. It is not included in args as it is implicit.
718
    args_count = Int32Add(args_count, Int32Constant(kJSArgcReceiverSlots));
719 720
  }

721
  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
722
      isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
723
  TNode<CodeT> code_target = HeapConstant(callable.code());
724

725
  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
726 727
                                   args_count, args.base_reg_location(),
                                   function);
728
  // TailCallStubThenDispatch updates accumulator with result.
729 730
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
731 732
}

733
template <class... TArgs>
734 735 736
void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
                                             TNode<Context> context,
                                             TNode<Word32T> arg_count,
737 738 739 740 741 742 743
                                             ConvertReceiverMode receiver_mode,
                                             TArgs... args) {
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
         bytecode_ == Bytecode::kInvokeIntrinsic);
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
  Callable callable = CodeFactory::Call(isolate());
744
  TNode<CodeT> code_target = HeapConstant(callable.code());
745

746
  arg_count = JSParameterCount(arg_count);
747 748
  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    // The first argument parameter (the receiver) is implied to be undefined.
749 750 751
    TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
                                     context, function, arg_count, args...,
                                     UndefinedConstant());
752 753 754 755 756
  } else {
    TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
                                     context, function, arg_count, args...);
  }
  // TailCallStubThenDispatch updates accumulator with result.
757 758
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
759 760 761 762 763
}

// Instantiate CallJSAndDispatch() for argument counts used by interpreter
// generator.
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
764
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
765 766
    ConvertReceiverMode receiver_mode);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
767
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
768
    ConvertReceiverMode receiver_mode, TNode<Object>);
769
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
770
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
771
    ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>);
772
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
773
    TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
774 775
    ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>,
    TNode<Object>);
776 777

void InterpreterAssembler::CallJSWithSpreadAndDispatch(
778 779
    TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
    TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) {
780
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
781
  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
782
  LazyNode<Object> receiver = [=] { return LoadRegisterAtOperandIndex(1); };
783 784
  CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
                      slot_id);
785
  Comment("call using CallWithSpread builtin");
786
  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
787
      isolate(), ConvertReceiverMode::kAny,
788
      InterpreterPushArgsMode::kWithFinalSpread);
789
  TNode<CodeT> code_target = HeapConstant(callable.code());
790

791
  TNode<Word32T> args_count = args.reg_count();
792
  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
793 794
                                   args_count, args.base_reg_location(),
                                   function);
795
  // TailCallStubThenDispatch updates accumulator with result.
796 797
  implicit_register_use_ =
      implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
798 799
}

800 801 802 803
TNode<Object> InterpreterAssembler::Construct(
    TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
    const RegListNodePair& args, TNode<UintPtrT> slot_id,
    TNode<HeapObject> maybe_feedback_vector) {
804
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
805 806
  TVARIABLE(Object, var_result);
  TVARIABLE(AllocationSite, var_site);
807 808
  Label return_result(this), construct_generic(this),
      construct_array(this, &var_site);
809

810
  TNode<Word32T> args_count = JSParameterCount(args.reg_count());
811
  CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
812 813
                           slot_id, UpdateFeedbackMode::kOptionalFeedback,
                           &construct_generic, &construct_array, &var_site);
814

815
  BIND(&construct_generic);
816
  {
817 818 819 820
    // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
    Comment("call using Construct builtin");
    Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
        isolate(), InterpreterPushArgsMode::kOther);
821
    var_result =
822
        CallStub(callable, context, args_count, args.base_reg_location(),
823
                 target, new_target, UndefinedConstant());
824
    Goto(&return_result);
825 826
  }

827 828 829 830 831 832
  BIND(&construct_array);
  {
    // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
    // constructor feedback collection inside of Ignition.
    Comment("call using ConstructArray builtin");
    Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
833
        isolate(), InterpreterPushArgsMode::kArrayFunction);
834
    var_result =
835
        CallStub(callable, context, args_count, args.base_reg_location(),
836
                 target, new_target, var_site.value());
837 838 839 840 841
    Goto(&return_result);
  }

  BIND(&return_result);
  return var_result.value();
842 843
}

844 845 846 847
TNode<Object> InterpreterAssembler::ConstructWithSpread(
    TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
    const RegListNodePair& args, TNode<UintPtrT> slot_id,
    TNode<HeapObject> maybe_feedback_vector) {
848 849 850
  // TODO(bmeurer): Unify this with the Construct bytecode feedback
  // above once we have a way to pass the AllocationSite to the Array
  // constructor _and_ spread the last argument at the same time.
851
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
852
  Label extra_checks(this, Label::kDeferred), construct(this);
853 854 855
  GotoIf(IsUndefined(maybe_feedback_vector), &construct);

  TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
856 857 858 859 860

  // Increment the call count.
  IncrementCallCount(feedback_vector, slot_id);

  // Check if we have monomorphic {new_target} feedback already.
861 862
  TNode<MaybeObject> feedback =
      LoadFeedbackVectorSlot(feedback_vector, slot_id);
863 864
  Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
         &extra_checks);
865 866 867 868 869 870 871

  BIND(&extra_checks);
  {
    Label check_initialized(this), initialize(this), mark_megamorphic(this);

    // Check if it is a megamorphic {new_target}.
    Comment("check if megamorphic");
872
    TNode<BoolT> is_megamorphic = TaggedEqual(
873
        feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
874 875
    GotoIf(is_megamorphic, &construct);

876
    Comment("check if weak reference");
877
    GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
878

879 880 881
    // If the weak reference is cleared, we have a new chance to become
    // monomorphic.
    Comment("check if weak reference is cleared");
882
    Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
883 884 885 886 887

    BIND(&check_initialized);
    {
      // Check if it is uninitialized.
      Comment("check if uninitialized");
888
      TNode<BoolT> is_uninitialized =
889
          TaggedEqual(feedback, UninitializedSymbolConstant());
890 891 892 893 894 895 896
      Branch(is_uninitialized, &initialize, &mark_megamorphic);
    }

    BIND(&initialize);
    {
      Comment("check if function in same native context");
      GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
897 898
      // Check if the {new_target} is a JSFunction or JSBoundFunction
      // in the current native context.
899
      TVARIABLE(HeapObject, var_current, CAST(new_target));
900 901 902 903 904
      Label loop(this, &var_current), done_loop(this);
      Goto(&loop);
      BIND(&loop);
      {
        Label if_boundfunction(this), if_function(this);
905
        TNode<HeapObject> current = var_current.value();
906
        TNode<Uint16T> current_instance_type = LoadInstanceType(current);
907 908
        GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
               &if_boundfunction);
909 910
        Branch(IsJSFunctionInstanceType(current_instance_type), &if_function,
               &mark_megamorphic);
911 912 913 914 915

        BIND(&if_function);
        {
          // Check that the JSFunction {current} is in the current native
          // context.
916 917
          TNode<Context> current_context =
              CAST(LoadObjectField(current, JSFunction::kContextOffset));
918
          TNode<NativeContext> current_native_context =
919 920 921 922
              LoadNativeContext(current_context);
          Branch(
              TaggedEqual(LoadNativeContext(context), current_native_context),
              &done_loop, &mark_megamorphic);
923
        }
924

925 926 927
        BIND(&if_boundfunction);
        {
          // Continue with the [[BoundTargetFunction]] of {current}.
928 929
          var_current = LoadObjectField<HeapObject>(
              current, JSBoundFunction::kBoundTargetFunctionOffset);
930 931 932 933
          Goto(&loop);
        }
      }
      BIND(&done_loop);
934 935
      StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
                                         CAST(new_target));
936 937
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "ConstructWithSpread:Initialize");
938 939 940 941 942 943 944 945
      Goto(&construct);
    }

    BIND(&mark_megamorphic);
    {
      // MegamorphicSentinel is an immortal immovable object so
      // write-barrier is not needed.
      Comment("transition to megamorphic");
946
      DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
947 948 949 950
      StoreFeedbackVectorSlot(
          feedback_vector, slot_id,
          HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
          SKIP_WRITE_BARRIER);
951 952
      ReportFeedbackUpdate(feedback_vector, slot_id,
                           "ConstructWithSpread:TransitionMegamorphic");
953 954 955 956 957
      Goto(&construct);
    }
  }

  BIND(&construct);
958
  Comment("call using ConstructWithSpread builtin");
959
  Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
960
      isolate(), InterpreterPushArgsMode::kWithFinalSpread);
961 962
  TNode<Word32T> args_count = JSParameterCount(args.reg_count());
  return CallStub(callable, context, args_count, args.base_reg_location(),
963
                  target, new_target, UndefinedConstant());
964 965
}

966 967 968 969 970
template <class T>
TNode<T> InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
                                            TNode<Context> context,
                                            const RegListNodePair& args,
                                            int return_count) {
971 972
  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
  DCHECK(Bytecodes::IsCallRuntime(bytecode_));
973
  Callable callable = CodeFactory::InterpreterCEntry(isolate(), return_count);
974
  TNode<CodeT> code_target = HeapConstant(callable.code());
975 976

  // Get the function entry from the function id.
977 978
  TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant(
      ExternalReference::runtime_function_table_address(isolate())));
979
  TNode<Word32T> function_offset =
980
      Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
981
  TNode<WordT> function =
982
      IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
983 984
  TNode<RawPtrT> function_entry = Load<RawPtrT>(
      function, IntPtrConstant(offsetof(Runtime::Function, entry)));
985

986 987 988
  return CallStub<T>(callable.descriptor(), code_target, context,
                     args.reg_count(), args.base_reg_location(),
                     function_entry);
989 990
}

991 992 993 994 995 996 997 998 999
template V8_EXPORT_PRIVATE TNode<Object> InterpreterAssembler::CallRuntimeN(
    TNode<Uint32T> function_id, TNode<Context> context,
    const RegListNodePair& args, int return_count);
template V8_EXPORT_PRIVATE TNode<PairT<Object, Object>>
InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
                                   TNode<Context> context,
                                   const RegListNodePair& args,
                                   int return_count);

1000 1001
void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
                                                 bool backward) {
1002 1003
  Comment("[ UpdateInterruptBudget");

1004 1005
  // Assert that the weight is positive (negative weights should be implemented
  // as backward updates).
1006
  CSA_DCHECK(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
1007

1008 1009
  Label load_budget_from_bytecode(this), load_budget_done(this);
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
1010
  TNode<FeedbackCell> feedback_cell =
1011
      LoadObjectField<FeedbackCell>(function, JSFunction::kFeedbackCellOffset);
1012 1013 1014
  TNode<Int32T> old_budget = LoadObjectField<Int32T>(
      feedback_cell, FeedbackCell::kInterruptBudgetOffset);

1015
  // Make sure we include the current bytecode in the budget calculation.
1016
  TNode<Int32T> budget_after_bytecode =
1017
      Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1018

1019
  Label done(this);
1020
  TVARIABLE(Int32T, new_budget);
1021
  if (backward) {
1022
    // Update budget by |weight| and check if it reaches zero.
1023
    new_budget = Int32Sub(budget_after_bytecode, weight);
1024
    TNode<BoolT> condition =
1025
        Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1026
    Label ok(this), interrupt_check(this, Label::kDeferred);
1027 1028 1029
    Branch(condition, &ok, &interrupt_check);

    BIND(&interrupt_check);
1030
    // JumpLoop should do a stack check as part of the interrupt.
1031 1032 1033 1034
    CallRuntime(bytecode() == Bytecode::kJumpLoop
                    ? Runtime::kBytecodeBudgetInterruptWithStackCheck
                    : Runtime::kBytecodeBudgetInterrupt,
                GetContext(), function);
1035
    Goto(&done);
1036 1037

    BIND(&ok);
1038
  } else {
1039 1040
    // For a forward jump, we know we only increase the interrupt budget, so
    // no need to check if it's below zero.
1041
    new_budget = Int32Add(budget_after_bytecode, weight);
1042
  }
1043 1044

  // Update budget.
1045
  StoreObjectFieldNoWriteBarrier(
1046
      feedback_cell, FeedbackCell::kInterruptBudgetOffset, new_budget.value());
1047 1048
  Goto(&done);
  BIND(&done);
1049
  Comment("] UpdateInterruptBudget");
1050 1051
}

1052 1053 1054
TNode<IntPtrT> InterpreterAssembler::Advance() {
  return Advance(CurrentBytecodeSize());
}
1055

1056
TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
1057
  return Advance(IntPtrConstant(delta));
1058 1059
}

1060
TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta,
1061
                                             bool backward) {
1062 1063
#ifdef V8_TRACE_UNOPTIMIZED
  TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit);
1064
#endif
1065 1066 1067
  TNode<IntPtrT> next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
                                        : IntPtrAdd(BytecodeOffset(), delta);
  bytecode_offset_ = next_offset;
1068
  return next_offset;
1069 1070
}

1071
void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset, bool backward) {
1072 1073
  DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));

1074 1075
  UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward);
  TNode<IntPtrT> new_bytecode_offset = Advance(jump_offset, backward);
1076 1077 1078
  TNode<RawPtrT> target_bytecode =
      UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset));
  DispatchToBytecode(target_bytecode, new_bytecode_offset);
1079
}
1080

1081 1082 1083
void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset) {
  Jump(jump_offset, false);
}
1084

1085 1086 1087
void InterpreterAssembler::JumpBackward(TNode<IntPtrT> jump_offset) {
  Jump(jump_offset, true);
}
1088

1089 1090
void InterpreterAssembler::JumpConditional(TNode<BoolT> condition,
                                           TNode<IntPtrT> jump_offset) {
1091
  Label match(this), no_match(this);
1092

1093
  Branch(condition, &match, &no_match);
1094
  BIND(&match);
1095
  Jump(jump_offset);
1096
  BIND(&no_match);
1097 1098 1099
  Dispatch();
}

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
void InterpreterAssembler::JumpConditionalByImmediateOperand(
    TNode<BoolT> condition, int operand_index) {
  Label match(this), no_match(this);

  Branch(condition, &match, &no_match);
  BIND(&match);
  TNode<IntPtrT> jump_offset = Signed(BytecodeOperandUImmWord(operand_index));
  Jump(jump_offset);
  BIND(&no_match);
  Dispatch();
}

void InterpreterAssembler::JumpConditionalByConstantOperand(
    TNode<BoolT> condition, int operand_index) {
  Label match(this), no_match(this);

  Branch(condition, &match, &no_match);
  BIND(&match);
  TNode<IntPtrT> jump_offset =
      LoadAndUntagConstantPoolEntryAtOperandIndex(operand_index);
  Jump(jump_offset);
  BIND(&no_match);
  Dispatch();
}

1125
void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
1126 1127 1128
                                             TNode<Object> rhs,
                                             TNode<IntPtrT> jump_offset) {
  JumpConditional(TaggedEqual(lhs, rhs), jump_offset);
1129 1130
}

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
                                             TNode<Object> rhs,
                                             int operand_index) {
  JumpConditionalByImmediateOperand(TaggedEqual(lhs, rhs), operand_index);
}

void InterpreterAssembler::JumpIfTaggedEqualConstant(TNode<Object> lhs,
                                                     TNode<Object> rhs,
                                                     int operand_index) {
  JumpConditionalByConstantOperand(TaggedEqual(lhs, rhs), operand_index);
}

1143 1144
void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
                                                TNode<Object> rhs,
1145 1146
                                                TNode<IntPtrT> jump_offset) {
  JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset);
1147 1148
}

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
                                                TNode<Object> rhs,
                                                int operand_index) {
  JumpConditionalByImmediateOperand(TaggedNotEqual(lhs, rhs), operand_index);
}

void InterpreterAssembler::JumpIfTaggedNotEqualConstant(TNode<Object> lhs,
                                                        TNode<Object> rhs,
                                                        int operand_index) {
  JumpConditionalByConstantOperand(TaggedNotEqual(lhs, rhs), operand_index);
}

1161 1162 1163 1164
TNode<WordT> InterpreterAssembler::LoadBytecode(
    TNode<IntPtrT> bytecode_offset) {
  TNode<Uint8T> bytecode =
      Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset);
1165
  return ChangeUint32ToWord(bytecode);
1166 1167
}

1168
void InterpreterAssembler::StarDispatchLookahead(TNode<WordT> target_bytecode) {
1169 1170
  Label do_inline_star(this), done(this);

1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
  // Check whether the following opcode is one of the short Star codes. All
  // opcodes higher than the short Star variants are invalid, and invalid
  // opcodes are never deliberately written, so we can use a one-sided check.
  // This is no less secure than the normal-length Star handler, which performs
  // no validation on its operand.
  STATIC_ASSERT(static_cast<int>(Bytecode::kLastShortStar) + 1 ==
                static_cast<int>(Bytecode::kIllegal));
  STATIC_ASSERT(Bytecode::kIllegal == Bytecode::kLast);
  TNode<Int32T> first_short_star_bytecode =
      Int32Constant(static_cast<int>(Bytecode::kFirstShortStar));
  TNode<BoolT> is_star = Uint32GreaterThanOrEqual(
      TruncateWordToInt32(target_bytecode), first_short_star_bytecode);
1183
  Branch(is_star, &do_inline_star, &done);
1184

1185
  BIND(&do_inline_star);
1186
  {
1187 1188 1189 1190 1191 1192 1193
    InlineShortStar(target_bytecode);

    // Rather than merging control flow to a single indirect jump, we can get
    // better branch prediction by duplicating it. This is because the
    // instruction following a merged X + StarN is a bad predictor of the
    // instruction following a non-merged X, and vice versa.
    DispatchToBytecode(LoadBytecode(BytecodeOffset()), BytecodeOffset());
1194
  }
1195
  BIND(&done);
1196 1197
}

1198
void InterpreterAssembler::InlineShortStar(TNode<WordT> target_bytecode) {
1199
  Bytecode previous_bytecode = bytecode_;
1200
  ImplicitRegisterUse previous_acc_use = implicit_register_use_;
1201

1202 1203 1204 1205
  // At this point we don't know statically what bytecode we're executing, but
  // kStar0 has the right attributes (namely, no operands) for any of the short
  // Star codes.
  bytecode_ = Bytecode::kStar0;
1206
  implicit_register_use_ = ImplicitRegisterUse::kNone;
1207

1208 1209
#ifdef V8_TRACE_UNOPTIMIZED
  TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
1210
#endif
1211 1212

  StoreRegisterForShortStar(GetAccumulator(), target_bytecode);
1213

1214 1215
  DCHECK_EQ(implicit_register_use_,
            Bytecodes::GetImplicitRegisterUse(bytecode_));
1216 1217 1218

  Advance();
  bytecode_ = previous_bytecode;
1219
  implicit_register_use_ = previous_acc_use;
1220 1221
}

1222
void InterpreterAssembler::Dispatch() {
1223
  Comment("========= Dispatch");
1224
  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1225
  TNode<IntPtrT> target_offset = Advance();
1226
  TNode<WordT> target_bytecode = LoadBytecode(target_offset);
1227 1228
  DispatchToBytecodeWithOptionalStarLookahead(target_bytecode);
}
1229

1230 1231
void InterpreterAssembler::DispatchToBytecodeWithOptionalStarLookahead(
    TNode<WordT> target_bytecode) {
1232
  if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1233
    StarDispatchLookahead(target_bytecode);
1234
  }
1235
  DispatchToBytecode(target_bytecode, BytecodeOffset());
1236
}
1237

1238 1239
void InterpreterAssembler::DispatchToBytecode(
    TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) {
1240
  if (V8_IGNITION_DISPATCH_COUNTING_BOOL) {
1241 1242 1243
    TraceBytecodeDispatch(target_bytecode);
  }

1244 1245
  TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
      DispatchTablePointer(), TimesSystemPointerSize(target_bytecode));
1246

1247
  DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1248 1249
}

1250 1251
void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
    TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
1252 1253 1254
  TailCallBytecodeDispatch(
      InterpreterDispatchDescriptor{}, handler_entry, GetAccumulatorUnchecked(),
      bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTablePointer());
1255 1256
}

1257 1258 1259 1260 1261 1262 1263 1264
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
  // Dispatching a wide bytecode requires treating the prefix
  // bytecode a base pointer into the dispatch table and dispatching
  // the bytecode that follows relative to this base.
  //
  //   Indices 0-255 correspond to bytecodes with operand_scale == 0
  //   Indices 256-511 correspond to bytecodes with operand_scale == 1
  //   Indices 512-767 correspond to bytecodes with operand_scale == 2
1265
  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1266
  TNode<IntPtrT> next_bytecode_offset = Advance(1);
1267
  TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset);
1268

1269
  if (V8_IGNITION_DISPATCH_COUNTING_BOOL) {
1270 1271 1272
    TraceBytecodeDispatch(next_bytecode);
  }

1273
  TNode<IntPtrT> base_index;
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
  switch (operand_scale) {
    case OperandScale::kDouble:
      base_index = IntPtrConstant(1 << kBitsPerByte);
      break;
    case OperandScale::kQuadruple:
      base_index = IntPtrConstant(2 << kBitsPerByte);
      break;
    default:
      UNREACHABLE();
  }
1284
  TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode);
1285 1286
  TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
      DispatchTablePointer(), TimesSystemPointerSize(target_index));
1287

1288
  DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
1289 1290
}

1291
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1292 1293 1294
  // TODO(rmcilroy): Investigate whether it is worth supporting self
  // optimization of primitive functions like FullCodegen.

1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
  // Update profiling count by the number of bytes between the end of the
  // current bytecode and the start of the first one, to simulate backedge to
  // start of function.
  //
  // With headers and current offset, the bytecode array layout looks like:
  //
  //           <---------- simulated backedge ----------
  // | header | first bytecode | .... | return bytecode |
  //  |<------ current offset ------->
  //  ^ tagged bytecode array pointer
  //
  // UpdateInterruptBudget already handles adding the bytecode size to the
  // length of the back-edge, so we just have to correct for the non-zero offset
  // of the first bytecode.

  const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
1311 1312 1313
  TNode<Int32T> profiling_weight =
      Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
               Int32Constant(kFirstBytecodeOffset));
1314
  UpdateInterruptBudget(profiling_weight, true);
1315 1316
}

1317 1318
TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() {
  return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
1319
                                BytecodeArray::kOsrLoopNestingLevelOffset);
1320 1321
}

1322
void InterpreterAssembler::Abort(AbortReason abort_reason) {
1323
  TNode<Smi> abort_id = SmiConstant(abort_reason);
1324
  CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1325 1326
}

1327 1328
void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
                                               TNode<WordT> rhs,
1329
                                               AbortReason abort_reason) {
1330
  Label ok(this), abort(this, Label::kDeferred);
1331
  Branch(WordEqual(lhs, rhs), &ok, &abort);
1332

1333
  BIND(&abort);
1334
  Abort(abort_reason);
1335 1336
  Goto(&ok);

1337
  BIND(&ok);
1338 1339
}

1340 1341 1342 1343 1344 1345 1346 1347 1348
void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
                                              TNode<IntPtrT> relative_jump) {
  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
  TNode<HeapObject> shared_info = LoadJSFunctionSharedFunctionInfo(function);
  TNode<Object> sfi_data =
      LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
  TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));

  Label baseline(this);
1349
  GotoIf(InstanceTypeEqual(data_type, CODET_TYPE), &baseline);
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
  {
    Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
    CallStub(callable, context);
    JumpBackward(relative_jump);
  }

  BIND(&baseline);
  {
    Callable callable =
        CodeFactory::InterpreterOnStackReplacement_ToBaseline(isolate());
    // We already compiled the baseline code, so we don't need to handle failed
    // compilation as in the Ignition -> Turbofan case. Therefore we can just
    // tailcall to the OSR builtin.
    SaveBytecodeOffset();
    TailCallStub(callable, context);
  }
}

1368 1369
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
  CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1370
              SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1371 1372
}

1373
void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
1374
  TNode<ExternalReference> counters_table = ExternalConstant(
1375
      ExternalReference::interpreter_dispatch_counters(isolate()));
1376
  TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant(
1377 1378
      static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));

1379
  TNode<WordT> counter_offset = TimesSystemPointerSize(
1380
      IntPtrAdd(source_bytecode_table_index, target_bytecode));
1381
  TNode<IntPtrT> old_counter = Load<IntPtrT>(counters_table, counter_offset);
1382

1383
  Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1384

1385
  TNode<BoolT> counter_reached_max = WordEqual(
1386
      old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1387
  Branch(counter_reached_max, &counter_saturated, &counter_ok);
1388

1389
  BIND(&counter_ok);
1390
  {
1391
    TNode<IntPtrT> new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1392 1393 1394 1395 1396
    StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
                        counter_offset, new_counter);
    Goto(&counter_saturated);
  }

1397
  BIND(&counter_saturated);
1398 1399
}

1400 1401
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
Brice Dobry's avatar
Brice Dobry committed
1402
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
1403
  return false;
Jakob Kummerow's avatar
Jakob Kummerow committed
1404
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
1405
    V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC ||   \
1406
    V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
1407 1408 1409 1410 1411 1412
  return true;
#else
#error "Unknown Architecture"
#endif
}

1413
void InterpreterAssembler::AbortIfRegisterCountInvalid(
1414 1415
    TNode<FixedArrayBase> parameters_and_registers,
    TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count) {
1416 1417
  TNode<IntPtrT> array_size =
      LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1418 1419

  Label ok(this), abort(this, Label::kDeferred);
1420 1421 1422
  Branch(UintPtrLessThanOrEqual(
             IntPtrAdd(formal_parameter_count, register_count), array_size),
         &ok, &abort);
1423 1424

  BIND(&abort);
1425
  Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1426 1427 1428
  Goto(&ok);

  BIND(&ok);
1429 1430
}

1431
TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
1432 1433
    TNode<FixedArray> array, const RegListNodePair& registers,
    TNode<Int32T> formal_parameter_count) {
1434 1435
  // Store the formal parameters (without receiver) followed by the
  // registers into the generator's internal parameters_and_registers field.
1436
  TNode<IntPtrT> formal_parameter_count_intptr =
1437
      Signed(ChangeUint32ToWord(formal_parameter_count));
1438
  TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1439
  if (FLAG_debug_code) {
1440
    CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
1441
                                 RegisterLocation(Register(0))));
1442 1443
    AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
                                register_count);
1444
  }
1445 1446

  {
1447
    TVARIABLE(IntPtrT, var_index);
1448
    var_index = IntPtrConstant(0);
1449

1450 1451
    // Iterate over parameters and write them into the array.
    Label loop(this, &var_index), done_loop(this);
1452

1453
    TNode<IntPtrT> reg_base =
1454
        IntPtrConstant(Register::FromParameterIndex(0).ToOperand() + 1);
1455 1456

    Goto(&loop);
1457 1458
    BIND(&loop);
    {
1459
      TNode<IntPtrT> index = var_index.value();
1460 1461
      GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
                &done_loop);
1462

1463
      TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
1464
      TNode<Object> value = LoadRegister(reg_index);
1465 1466 1467

      StoreFixedArrayElement(array, index, value);

1468
      var_index = IntPtrAdd(index, IntPtrConstant(1));
1469 1470 1471 1472 1473 1474 1475 1476 1477
      Goto(&loop);
    }
    BIND(&done_loop);
  }

  {
    // Iterate over register file and write values into array.
    // The mapping of register to array index must match that used in
    // BytecodeGraphBuilder::VisitResumeGenerator.
1478
    TVARIABLE(IntPtrT, var_index);
1479
    var_index = IntPtrConstant(0);
1480 1481 1482 1483 1484

    Label loop(this, &var_index), done_loop(this);
    Goto(&loop);
    BIND(&loop);
    {
1485
      TNode<IntPtrT> index = var_index.value();
1486 1487
      GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);

1488
      TNode<IntPtrT> reg_index =
1489
          IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1490
      TNode<Object> value = LoadRegister(reg_index);
1491

1492
      TNode<IntPtrT> array_index =
1493
          IntPtrAdd(formal_parameter_count_intptr, index);
1494 1495
      StoreFixedArrayElement(array, array_index, value);

1496
      var_index = IntPtrAdd(index, IntPtrConstant(1));
1497 1498 1499
      Goto(&loop);
    }
    BIND(&done_loop);
1500 1501 1502 1503 1504
  }

  return array;
}

1505
TNode<FixedArray> InterpreterAssembler::ImportRegisterFile(
1506 1507 1508
    TNode<FixedArray> array, const RegListNodePair& registers,
    TNode<Int32T> formal_parameter_count) {
  TNode<IntPtrT> formal_parameter_count_intptr =
1509
      Signed(ChangeUint32ToWord(formal_parameter_count));
1510
  TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1511
  if (FLAG_debug_code) {
1512
    CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
1513
                                 RegisterLocation(Register(0))));
1514 1515
    AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
                                register_count);
1516
  }
1517

1518
  TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1519

1520 1521
  // Iterate over array and write values into register file.  Also erase the
  // array contents to not keep them alive artificially.
1522 1523
  Label loop(this, &var_index), done_loop(this);
  Goto(&loop);
1524
  BIND(&loop);
1525
  {
1526
    TNode<IntPtrT> index = var_index.value();
1527
    GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1528

1529 1530 1531
    TNode<IntPtrT> array_index =
        IntPtrAdd(formal_parameter_count_intptr, index);
    TNode<Object> value = LoadFixedArrayElement(array, array_index);
1532

1533 1534
    TNode<IntPtrT> reg_index =
        IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1535
    StoreRegister(value, reg_index);
1536

1537
    StoreFixedArrayElement(array, array_index, StaleRegisterConstant());
1538

1539
    var_index = IntPtrAdd(index, IntPtrConstant(1));
1540 1541
    Goto(&loop);
  }
1542
  BIND(&done_loop);
1543 1544 1545 1546

  return array;
}

1547 1548 1549 1550
int InterpreterAssembler::CurrentBytecodeSize() const {
  return Bytecodes::Size(bytecode_, operand_scale_);
}

1551
void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
1552 1553
  TNode<Object> object = GetAccumulator();
  TNode<Context> context = GetContext();
1554

1555 1556
  TVARIABLE(Smi, var_type_feedback);
  TVARIABLE(Numeric, var_result);
1557 1558 1559 1560
  Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
      if_objectisother(this, Label::kDeferred);

  GotoIf(TaggedIsSmi(object), &if_objectissmi);
1561
  Branch(IsHeapNumber(CAST(object)), &if_objectisheapnumber, &if_objectisother);
1562 1563 1564

  BIND(&if_objectissmi);
  {
1565 1566
    var_result = CAST(object);
    var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
1567 1568 1569 1570 1571
    Goto(&if_done);
  }

  BIND(&if_objectisheapnumber);
  {
1572 1573
    var_result = CAST(object);
    var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
1574 1575 1576 1577 1578
    Goto(&if_done);
  }

  BIND(&if_objectisother);
  {
1579
    auto builtin = Builtin::kNonNumberToNumber;
1580
    if (mode == Object::Conversion::kToNumeric) {
1581
      builtin = Builtin::kNonNumberToNumeric;
1582 1583
      // Special case for collecting BigInt feedback.
      Label not_bigint(this);
1584
      GotoIfNot(IsBigInt(CAST(object)), &not_bigint);
1585
      {
1586 1587
        var_result = CAST(object);
        var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
1588 1589 1590 1591 1592 1593
        Goto(&if_done);
      }
      BIND(&not_bigint);
    }

    // Convert {object} by calling out to the appropriate builtin.
1594 1595
    var_result = CAST(CallBuiltin(builtin, context, object));
    var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
1596 1597 1598 1599 1600 1601
    Goto(&if_done);
  }

  BIND(&if_done);

  // Record the type feedback collected for {object}.
1602
  TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
1603
  TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
1604

1605
  MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
1606
                      slot_index);
1607 1608 1609 1610 1611

  SetAccumulator(var_result.value());
  Dispatch();
}

1612 1613 1614
}  // namespace interpreter
}  // namespace internal
}  // namespace v8