interpreter-assembler.cc 48.3 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/interpreter/interpreter-assembler.h"

7
#include <limits>
8 9 10 11 12 13
#include <ostream>

#include "src/code-factory.h"
#include "src/frames.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
14
#include "src/interpreter/interpreter.h"
15 16
#include "src/machine-type.h"
#include "src/macro-assembler.h"
17
#include "src/zone/zone.h"
18 19 20 21 22

namespace v8 {
namespace internal {
namespace interpreter {

23
using compiler::CodeAssemblerState;
24 25
using compiler::Node;

26
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
27 28
                                           Bytecode bytecode,
                                           OperandScale operand_scale)
29
    : CodeStubAssembler(state),
30
      bytecode_(bytecode),
31
      operand_scale_(operand_scale),
32
      bytecode_offset_(this, MachineType::PointerRepresentation()),
33
      interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
34
      accumulator_(this, MachineRepresentation::kTagged),
35
      accumulator_use_(AccumulatorUse::kNone),
36
      made_call_(false),
37 38
      disable_stack_check_across_call_(false),
      stack_pointer_before_call_(nullptr) {
39
  accumulator_.Bind(Parameter(InterpreterDispatchDescriptor::kAccumulator));
40
  bytecode_offset_.Bind(
41
      Parameter(InterpreterDispatchDescriptor::kBytecodeOffset));
42 43 44
  if (FLAG_trace_ignition) {
    TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
  }
45 46
  RegisterCallGenerationCallbacks([this] { CallPrologue(); },
                                  [this] { CallEpilogue(); });
47 48
}

49 50 51 52 53
InterpreterAssembler::~InterpreterAssembler() {
  // If the following check fails the handler does not use the
  // accumulator in the way described in the bytecode definitions in
  // bytecodes.h.
  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
54
  UnregisterCallGenerationCallbacks();
55 56
}

57 58 59 60 61 62 63
Node* InterpreterAssembler::GetInterpretedFramePointer() {
  if (!interpreted_frame_pointer_.IsBound()) {
    interpreted_frame_pointer_.Bind(LoadParentFramePointer());
  }
  return interpreted_frame_pointer_.value();
}

64 65 66
Node* InterpreterAssembler::GetAccumulatorUnchecked() {
  return accumulator_.value();
}
67

68 69 70 71 72
Node* InterpreterAssembler::GetAccumulator() {
  DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
  return GetAccumulatorUnchecked();
}
73

74
void InterpreterAssembler::SetAccumulator(Node* value) {
75 76
  DCHECK(Bytecodes::WritesAccumulator(bytecode_));
  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
77 78
  accumulator_.Bind(value);
}
79

80 81 82
Node* InterpreterAssembler::GetContext() {
  return LoadRegister(Register::current_context());
}
83 84 85 86 87

void InterpreterAssembler::SetContext(Node* value) {
  StoreRegister(value, Register::current_context());
}

88 89 90 91 92 93 94 95 96 97 98 99 100
Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
  cur_context.Bind(context);

  Variable cur_depth(this, MachineRepresentation::kWord32);
  cur_depth.Bind(depth);

  Label context_found(this);

  Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
  Label context_search(this, 2, context_search_loop_variables);

  // Fast path if the depth is 0.
101
  Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
102 103 104 105 106 107

  // Loop until the depth is 0.
  Bind(&context_search);
  {
    cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
    cur_context.Bind(
108
        LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
109

110 111
    Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
           &context_search);
112 113 114 115 116 117
  }

  Bind(&context_found);
  return cur_context.value();
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
                                                              Node* depth,
                                                              Label* target) {
  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
  cur_context.Bind(context);

  Variable cur_depth(this, MachineRepresentation::kWord32);
  cur_depth.Bind(depth);

  Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
  Label context_search(this, 2, context_search_loop_variables);

  // Loop until the depth is 0.
  Goto(&context_search);
  Bind(&context_search);
  {
    // TODO(leszeks): We only need to do this check if the context had a sloppy
    // eval, we could pass in a context chain bitmask to figure out which
    // contexts actually need to be checked.

    Node* extension_slot =
139
        LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
140 141 142 143 144 145

    // Jump to the target if the extension slot is not a hole.
    GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);

    cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
    cur_context.Bind(
146
        LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
147 148 149 150 151 152

    GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
           &context_search);
  }
}

153
Node* InterpreterAssembler::BytecodeOffset() {
154
  return bytecode_offset_.value();
155 156 157
}

Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
158 159 160
  if (made_call_) {
    // If we have made a call, restore bytecode array from stack frame in case
    // the debugger has swapped us to the patched debugger bytecode array.
161
    return LoadRegister(Register::bytecode_array());
162
  } else {
163
    return Parameter(InterpreterDispatchDescriptor::kBytecodeArray);
164
  }
165 166 167
}

Node* InterpreterAssembler::DispatchTableRawPointer() {
168
  return Parameter(InterpreterDispatchDescriptor::kDispatchTable);
169 170 171
}

Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
172 173
  return IntPtrAdd(GetInterpretedFramePointer(),
                   RegisterFrameOffset(reg_index));
174 175
}

176 177
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
  return WordShl(index, kPointerSizeLog2);
178 179 180
}

Node* InterpreterAssembler::LoadRegister(Register reg) {
181
  return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
182
              IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
183 184 185
}

Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
186
  return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
187 188 189 190
              RegisterFrameOffset(reg_index));
}

Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
191
  return StoreNoWriteBarrier(
192
      MachineRepresentation::kTagged, GetInterpretedFramePointer(),
193
      IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
194 195 196 197
}

Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
  return StoreNoWriteBarrier(MachineRepresentation::kTagged,
198
                             GetInterpretedFramePointer(),
199 200 201 202 203
                             RegisterFrameOffset(reg_index), value);
}

Node* InterpreterAssembler::NextRegister(Node* reg_index) {
  // Register indexes are negative, so the next index is minus one.
204
  return IntPtrAdd(reg_index, IntPtrConstant(-1));
205 206
}

207 208 209 210 211 212
Node* InterpreterAssembler::OperandOffset(int operand_index) {
  return IntPtrConstant(
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}

Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
213
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
214 215 216 217 218
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  Node* operand_offset = OperandOffset(operand_index);
  return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
              IntPtrAdd(BytecodeOffset(), operand_offset));
219 220
}

221
Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
222
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
223 224 225
  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  Node* operand_offset = OperandOffset(operand_index);
226 227
  return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
              IntPtrAdd(BytecodeOffset(), operand_offset));
228 229
}

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
    int relative_offset, MachineType result_type) {
  static const int kMaxCount = 4;
  DCHECK(!TargetSupportsUnalignedAccess());

  int count;
  switch (result_type.representation()) {
    case MachineRepresentation::kWord16:
      count = 2;
      break;
    case MachineRepresentation::kWord32:
      count = 4;
      break;
    default:
      UNREACHABLE();
      break;
  }
  MachineType msb_type =
      result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();

250
#if V8_TARGET_LITTLE_ENDIAN
251 252
  const int kStep = -1;
  int msb_offset = count - 1;
253
#elif V8_TARGET_BIG_ENDIAN
254 255
  const int kStep = 1;
  int msb_offset = 0;
256 257 258
#else
#error "Unknown Architecture"
#endif
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292

  // Read the most signicant bytecode into bytes[0] and then in order
  // down to least significant in bytes[count - 1].
  DCHECK(count <= kMaxCount);
  compiler::Node* bytes[kMaxCount];
  for (int i = 0; i < count; i++) {
    MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
    Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
    Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
    bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset);
  }

  // Pack LSB to MSB.
  Node* result = bytes[--count];
  for (int i = 1; --count >= 0; i++) {
    Node* shift = Int32Constant(i * kBitsPerByte);
    Node* value = Word32Shl(bytes[count], shift);
    result = Word32Or(value, result);
  }
  return result;
}

Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
    return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
  } else {
    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
293 294 295
  }
}

296
Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
297
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
298 299 300 301 302
  DCHECK_EQ(
      OperandSize::kShort,
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
303
  if (TargetSupportsUnalignedAccess()) {
304
    return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
305
                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
306
  } else {
307
    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
308 309 310
  }
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
    return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
  } else {
    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
  }
}

Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
                                    bytecode_, operand_index, operand_scale()));
  int operand_offset =
      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
  if (TargetSupportsUnalignedAccess()) {
332
    return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
333 334
                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
  } else {
335
    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
336 337 338 339 340 341 342 343
  }
}

Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
                                                  OperandSize operand_size) {
  DCHECK(!Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
344
    case OperandSize::kByte:
345
      return BytecodeOperandSignedByte(operand_index);
346
    case OperandSize::kShort:
347 348 349
      return BytecodeOperandSignedShort(operand_index);
    case OperandSize::kQuad:
      return BytecodeOperandSignedQuad(operand_index);
350 351 352 353 354 355
    case OperandSize::kNone:
      UNREACHABLE();
  }
  return nullptr;
}

356 357 358 359 360
Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
                                                    OperandSize operand_size) {
  DCHECK(Bytecodes::IsUnsignedOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  switch (operand_size) {
361
    case OperandSize::kByte:
362
      return BytecodeOperandUnsignedByte(operand_index);
363
    case OperandSize::kShort:
364 365 366
      return BytecodeOperandUnsignedShort(operand_index);
    case OperandSize::kQuad:
      return BytecodeOperandUnsignedQuad(operand_index);
367 368 369 370 371 372
    case OperandSize::kNone:
      UNREACHABLE();
  }
  return nullptr;
}

373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
  DCHECK_EQ(OperandType::kRegCount,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
  DCHECK_EQ(OperandType::kFlag8,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

390 391 392 393 394 395 396 397
Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
  DCHECK_EQ(OperandType::kUImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

398 399 400 401 402 403 404 405
Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
  DCHECK_EQ(OperandType::kImm,
            Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  return BytecodeSignedOperand(operand_index, operand_size);
}

406 407 408 409 410 411 412 413
Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
  return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
}

Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
  return SmiFromWord32(BytecodeOperandImm(operand_index));
}

414 415 416 417 418
Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
  DCHECK(OperandType::kIdx ==
         Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
419 420 421 422 423 424
  return ChangeUint32ToWord(
      BytecodeUnsignedOperand(operand_index, operand_size));
}

Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
  return SmiTag(BytecodeOperandIdx(operand_index));
425 426
}

427
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
428 429 430 431
  DCHECK(Bytecodes::IsRegisterOperandType(
      Bytecodes::GetOperandType(bytecode_, operand_index)));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
432 433
  return ChangeInt32ToIntPtr(
      BytecodeSignedOperand(operand_index, operand_size));
434 435 436 437 438 439 440 441 442
}

Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
  DCHECK(OperandType::kRuntimeId ==
         Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kShort);
  return BytecodeUnsignedOperand(operand_index, operand_size);
443 444
}

445 446 447 448 449 450 451 452 453
Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
  DCHECK(OperandType::kIntrinsicId ==
         Bytecodes::GetOperandType(bytecode_, operand_index));
  OperandSize operand_size =
      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
  DCHECK_EQ(operand_size, OperandSize::kByte);
  return BytecodeUnsignedOperand(operand_index, operand_size);
}

454 455 456
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
  Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
                                        BytecodeArray::kConstantPoolOffset);
457
  return LoadFixedArrayElement(constant_pool, index);
458 459
}

460
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
461
  return SmiUntag(LoadConstantPoolEntry(index));
462 463
}

464
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
465
  Node* function = LoadRegister(Register::function_closure());
466
  Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
467
  Node* vector =
468
      LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
469 470 471 472
  return vector;
}

void InterpreterAssembler::CallPrologue() {
473
  StoreRegister(SmiTag(BytecodeOffset()), Register::bytecode_offset());
474 475 476 477 478

  if (FLAG_debug_code && !disable_stack_check_across_call_) {
    DCHECK(stack_pointer_before_call_ == nullptr);
    stack_pointer_before_call_ = LoadStackPointer();
  }
479
  made_call_ = true;
480 481 482 483 484 485 486 487 488 489 490 491
}

void InterpreterAssembler::CallEpilogue() {
  if (FLAG_debug_code && !disable_stack_check_across_call_) {
    Node* stack_pointer_after_call = LoadStackPointer();
    Node* stack_pointer_before_call = stack_pointer_before_call_;
    stack_pointer_before_call_ = nullptr;
    AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
                        kUnexpectedStackPointer);
  }
}

492 493
Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector,
                                               Node* slot_id) {
494
  Comment("increment call count");
495
  Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
496 497
  Node* call_count =
      LoadFixedArrayElement(type_feedback_vector, call_count_slot);
498
  Node* new_count = SmiAdd(call_count, SmiConstant(1));
499 500
  // Count is Smi, so we don't need a write barrier.
  return StoreFixedArrayElement(type_feedback_vector, call_count_slot,
501
                                new_count, SKIP_WRITE_BARRIER);
502 503
}

504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
                                               Node* first_arg, Node* arg_count,
                                               Node* slot_id,
                                               Node* type_feedback_vector,
                                               TailCallMode tail_call_mode) {
  // Static checks to assert it is safe to examine the type feedback element.
  // We don't know that we have a weak cell. We might have a private symbol
  // or an AllocationSite, but the memory is safe to examine.
  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
  // FixedArray.
  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
  // computed, meaning that it can't appear to be a pointer. If the low bit is
  // 0, then hash is computed, but the 0 bit prevents the field from appearing
  // to be a pointer.
  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
                    WeakCell::kValueOffset &&
                WeakCell::kValueOffset == Symbol::kHashFieldSlot);

  Variable return_value(this, MachineRepresentation::kTagged);
525 526
  Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
      end(this);
527

528
  // The checks. First, does function match the recorded monomorphic target?
529
  Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
530
  Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
531
  Node* is_monomorphic = WordEqual(function, feedback_value);
532
  GotoUnless(is_monomorphic, &extra_checks);
533

534 535 536 537
  // The compare above could have been a SMI/SMI comparison. Guard against
  // this convincing us that we have a monomorphic JSFunction.
  Node* is_smi = TaggedIsSmi(function);
  Branch(is_smi, &extra_checks, &call_function);
538

539 540
  Bind(&call_function);
  {
541
    // Increment the call count.
542
    IncrementCallCount(type_feedback_vector, slot_id);
543 544 545 546 547 548 549 550 551 552 553 554 555

    // Call using call function builtin.
    Callable callable = CodeFactory::InterpreterPushArgsAndCall(
        isolate(), tail_call_mode, CallableType::kJSFunction);
    Node* code_target = HeapConstant(callable.code());
    Node* ret_value = CallStub(callable.descriptor(), code_target, context,
                               arg_count, first_arg, function);
    return_value.Bind(ret_value);
    Goto(&end);
  }

  Bind(&extra_checks);
  {
556 557 558 559 560
    Label check_initialized(this), mark_megamorphic(this),
        create_allocation_site(this);

    Comment("check if megamorphic");
    // Check if it is a megamorphic target.
561 562 563
    Node* is_megamorphic = WordEqual(
        feedback_element,
        HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
564
    GotoIf(is_megamorphic, &call);
565

566
    Comment("check if it is an allocation site");
567 568
    GotoUnless(IsAllocationSiteMap(LoadMap(feedback_element)),
               &check_initialized);
569

570
    // If it is not the Array() function, mark megamorphic.
571 572
    Node* context_slot = LoadContextElement(LoadNativeContext(context),
                                            Context::ARRAY_FUNCTION_INDEX);
573 574
    Node* is_array_function = WordEqual(context_slot, function);
    GotoUnless(is_array_function, &mark_megamorphic);
575

576 577 578 579 580 581 582 583 584 585 586 587
    // It is a monomorphic Array function. Increment the call count.
    IncrementCallCount(type_feedback_vector, slot_id);

    // Call ArrayConstructorStub.
    Callable callable_call =
        CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
    Node* code_target_call = HeapConstant(callable_call.code());
    Node* ret_value =
        CallStub(callable_call.descriptor(), code_target_call, context,
                 arg_count, function, feedback_element, first_arg);
    return_value.Bind(ret_value);
    Goto(&end);
588 589 590

    Bind(&check_initialized);
    {
591 592
      Comment("check if uninitialized");
      // Check if it is uninitialized target first.
593 594 595 596 597
      Node* is_uninitialized = WordEqual(
          feedback_element,
          HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
      GotoUnless(is_uninitialized, &mark_megamorphic);

598 599
      Comment("handle_unitinitialized");
      // If it is not a JSFunction mark it as megamorphic.
600
      Node* is_smi = TaggedIsSmi(function);
601 602
      GotoIf(is_smi, &mark_megamorphic);

603
      // Check if function is an object of JSFunction type.
604 605
      Node* instance_type = LoadInstanceType(function);
      Node* is_js_function =
606
          Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
607 608
      GotoUnless(is_js_function, &mark_megamorphic);

609
      // Check if it is the Array() function.
610 611
      Node* context_slot = LoadContextElement(LoadNativeContext(context),
                                              Context::ARRAY_FUNCTION_INDEX);
612
      Node* is_array_function = WordEqual(context_slot, function);
613
      GotoIf(is_array_function, &create_allocation_site);
614

615
      // Check if the function belongs to the same native context.
616 617 618 619 620 621
      Node* native_context = LoadNativeContext(
          LoadObjectField(function, JSFunction::kContextOffset));
      Node* is_same_native_context =
          WordEqual(native_context, LoadNativeContext(context));
      GotoUnless(is_same_native_context, &mark_megamorphic);

622 623
      CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
                                     function);
624 625

      // Call using call function builtin.
626 627 628 629 630
      Goto(&call_function);
    }

    Bind(&create_allocation_site);
    {
631 632
      CreateAllocationSiteInFeedbackVector(type_feedback_vector,
                                           SmiTag(slot_id));
633 634 635 636 637

      // Call using CallFunction builtin. CallICs have a PREMONOMORPHIC state.
      // They start collecting feedback only when a call is executed the second
      // time. So, do not pass any feedback here.
      Goto(&call_function);
638 639 640 641 642 643 644 645 646 647 648
    }

    Bind(&mark_megamorphic);
    {
      // Mark it as a megamorphic.
      // MegamorphicSentinel is created as a part of Heap::InitialObjects
      // and will not move during a GC. So it is safe to skip write barrier.
      DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
      StoreFixedArrayElement(
          type_feedback_vector, slot_id,
          HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
649
          SKIP_WRITE_BARRIER);
650 651 652 653 654
      Goto(&call);
    }
  }

  Bind(&call);
655
  {
656
    Comment("Increment call count and call using Call builtin");
657 658 659 660 661 662 663 664 665 666 667 668 669
    // Increment the call count.
    IncrementCallCount(type_feedback_vector, slot_id);

    // Call using call builtin.
    Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
        isolate(), tail_call_mode, CallableType::kAny);
    Node* code_target_call = HeapConstant(callable_call.code());
    Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
                               context, arg_count, first_arg, function);
    return_value.Bind(ret_value);
    Goto(&end);
  }

670 671 672 673
  Bind(&end);
  return return_value.value();
}

674
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
675 676
                                   Node* first_arg, Node* arg_count,
                                   TailCallMode tail_call_mode) {
677 678
  Callable callable = CodeFactory::InterpreterPushArgsAndCall(
      isolate(), tail_call_mode, CallableType::kAny);
679
  Node* code_target = HeapConstant(callable.code());
680

681 682 683 684 685 686
  return CallStub(callable.descriptor(), code_target, context, arg_count,
                  first_arg, function);
}

Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
                                          Node* new_target, Node* first_arg,
687 688 689 690
                                          Node* arg_count, Node* slot_id,
                                          Node* type_feedback_vector) {
  Variable return_value(this, MachineRepresentation::kTagged);
  Variable allocation_feedback(this, MachineRepresentation::kTagged);
691 692
  Label call_construct_function(this, &allocation_feedback),
      extra_checks(this, Label::kDeferred), call_construct(this), end(this);
693 694 695

  // Slot id of 0 is used to indicate no type feedback is available.
  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
696
  Node* is_feedback_unavailable = WordEqual(slot_id, IntPtrConstant(0));
697 698 699
  GotoIf(is_feedback_unavailable, &call_construct);

  // Check that the constructor is not a smi.
700
  Node* is_smi = TaggedIsSmi(constructor);
701 702 703 704 705
  GotoIf(is_smi, &call_construct);

  // Check that constructor is a JSFunction.
  Node* instance_type = LoadInstanceType(constructor);
  Node* is_js_function =
706
      Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
707 708 709
  GotoUnless(is_js_function, &call_construct);

  // Check if it is a monomorphic constructor.
710
  Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
711 712 713 714
  Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
  Node* is_monomorphic = WordEqual(constructor, feedback_value);
  allocation_feedback.Bind(UndefinedConstant());
  Branch(is_monomorphic, &call_construct_function, &extra_checks);
715

716
  Bind(&call_construct_function);
717
  {
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
    Comment("call using callConstructFunction");
    IncrementCallCount(type_feedback_vector, slot_id);
    Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
        isolate(), CallableType::kJSFunction);
    return_value.Bind(CallStub(callable_function.descriptor(),
                               HeapConstant(callable_function.code()), context,
                               arg_count, new_target, constructor,
                               allocation_feedback.value(), first_arg));
    Goto(&end);
  }

  Bind(&extra_checks);
  {
    Label check_allocation_site(this), check_initialized(this),
        initialize(this), mark_megamorphic(this);

    // Check if it is a megamorphic target.
    Comment("check if megamorphic");
    Node* is_megamorphic = WordEqual(
        feedback_element,
        HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
    GotoIf(is_megamorphic, &call_construct_function);

    Comment("check if weak cell");
    Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
                                   LoadRoot(Heap::kWeakCellMapRootIndex));
    GotoUnless(is_weak_cell, &check_allocation_site);

    // If the weak cell is cleared, we have a new chance to become
    // monomorphic.
    Comment("check if weak cell is cleared");
    Node* is_smi = TaggedIsSmi(feedback_value);
    Branch(is_smi, &initialize, &mark_megamorphic);

    Bind(&check_allocation_site);
753
    {
754 755 756 757 758
      Comment("check if it is an allocation site");
      Node* is_allocation_site =
          WordEqual(LoadObjectField(feedback_element, 0),
                    LoadRoot(Heap::kAllocationSiteMapRootIndex));
      GotoUnless(is_allocation_site, &check_initialized);
759

760
      // Make sure the function is the Array() function.
761 762
      Node* context_slot = LoadContextElement(LoadNativeContext(context),
                                              Context::ARRAY_FUNCTION_INDEX);
763 764
      Node* is_array_function = WordEqual(context_slot, constructor);
      GotoUnless(is_array_function, &mark_megamorphic);
765

766 767 768
      allocation_feedback.Bind(feedback_element);
      Goto(&call_construct_function);
    }
769

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
    Bind(&check_initialized);
    {
      // Check if it is uninitialized.
      Comment("check if uninitialized");
      Node* is_uninitialized = WordEqual(
          feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
      Branch(is_uninitialized, &initialize, &mark_megamorphic);
    }

    Bind(&initialize);
    {
      Label create_allocation_site(this), create_weak_cell(this);
      Comment("initialize the feedback element");
      // Create an allocation site if the function is an array function,
      // otherwise create a weak cell.
785 786
      Node* context_slot = LoadContextElement(LoadNativeContext(context),
                                              Context::ARRAY_FUNCTION_INDEX);
787 788
      Node* is_array_function = WordEqual(context_slot, constructor);
      Branch(is_array_function, &create_allocation_site, &create_weak_cell);
789

790
      Bind(&create_allocation_site);
791
      {
792 793 794 795
        Node* site = CreateAllocationSiteInFeedbackVector(type_feedback_vector,
                                                          SmiTag(slot_id));
        allocation_feedback.Bind(site);
        Goto(&call_construct_function);
796 797
      }

798
      Bind(&create_weak_cell);
799
      {
800 801
        CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
                                       constructor);
802 803 804 805
        Goto(&call_construct_function);
      }
    }

806
    Bind(&mark_megamorphic);
807
    {
808 809 810 811 812 813 814
      // MegamorphicSentinel is an immortal immovable object so
      // write-barrier is not needed.
      Comment("transition to megamorphic");
      DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
      StoreFixedArrayElement(
          type_feedback_vector, slot_id,
          HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
815
          SKIP_WRITE_BARRIER);
816
      Goto(&call_construct_function);
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
    }
  }

  Bind(&call_construct);
  {
    Comment("call using callConstruct builtin");
    Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
        isolate(), CallableType::kAny);
    Node* code_target = HeapConstant(callable.code());
    return_value.Bind(CallStub(callable.descriptor(), code_target, context,
                               arg_count, new_target, constructor,
                               UndefinedConstant(), first_arg));
    Goto(&end);
  }

  Bind(&end);
  return return_value.value();
834 835 836 837 838 839 840 841 842 843 844 845 846
}

Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
                                         Node* first_arg, Node* arg_count,
                                         int result_size) {
  Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
  Node* code_target = HeapConstant(callable.code());

  // Get the function entry from the function id.
  Node* function_table = ExternalConstant(
      ExternalReference::runtime_function_table_address(isolate()));
  Node* function_offset =
      Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
847 848
  Node* function =
      IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
849 850
  Node* function_entry =
      Load(MachineType::Pointer(), function,
851
           IntPtrConstant(offsetof(Runtime::Function, entry)));
852

853 854
  return CallStubR(callable.descriptor(), result_size, code_target, context,
                   arg_count, first_arg, function_entry);
855 856
}

857
void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
858 859 860
  // TODO(rmcilroy): It might be worthwhile to only update the budget for
  // backwards branches. Those are distinguishable by the {JumpLoop} bytecode.

861
  Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
862 863 864 865
  Node* budget_offset =
      IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);

  // Update budget by |weight| and check if it reaches zero.
866
  Variable new_budget(this, MachineRepresentation::kWord32);
867 868
  Node* old_budget =
      Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
869 870 871
  new_budget.Bind(Int32Add(old_budget, weight));
  Node* condition =
      Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
872 873 874 875
  Branch(condition, &ok, &interrupt_check);

  // Perform interrupt and reset budget.
  Bind(&interrupt_check);
876 877 878 879 880
  {
    CallRuntime(Runtime::kInterrupt, GetContext());
    new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
    Goto(&ok);
  }
881 882 883 884

  // Update budget.
  Bind(&ok);
  StoreNoWriteBarrier(MachineRepresentation::kWord32,
885 886
                      BytecodeArrayTaggedPointer(), budget_offset,
                      new_budget.value());
887 888
}

889 890 891 892
Node* InterpreterAssembler::Advance() {
  return Advance(Bytecodes::Size(bytecode_, operand_scale_));
}

893
Node* InterpreterAssembler::Advance(int delta) {
894
  return Advance(IntPtrConstant(delta));
895 896 897
}

Node* InterpreterAssembler::Advance(Node* delta) {
898 899 900 901 902 903
  if (FLAG_trace_ignition) {
    TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
  }
  Node* next_offset = IntPtrAdd(BytecodeOffset(), delta);
  bytecode_offset_.Bind(next_offset);
  return next_offset;
904 905
}

906
Node* InterpreterAssembler::Jump(Node* delta) {
907 908
  DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));

909
  UpdateInterruptBudget(TruncateWordToWord32(delta));
910 911 912
  Node* new_bytecode_offset = Advance(delta);
  Node* target_bytecode = LoadBytecode(new_bytecode_offset);
  return DispatchToBytecode(target_bytecode, new_bytecode_offset);
913
}
914 915

void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
916
  Label match(this), no_match(this);
917

918
  Branch(condition, &match, &no_match);
919
  Bind(&match);
920
  Jump(delta);
921 922 923 924 925 926 927 928 929 930 931 932 933
  Bind(&no_match);
  Dispatch();
}

void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
  JumpConditional(WordEqual(lhs, rhs), delta);
}

void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
                                              Node* delta) {
  JumpConditional(WordNotEqual(lhs, rhs), delta);
}

934 935 936
Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
  Node* bytecode =
      Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
937
  return ChangeUint32ToWord(bytecode);
938 939
}

940 941 942
Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
  Label do_inline_star(this), done(this);

943
  Variable var_bytecode(this, MachineType::PointerRepresentation());
944 945 946 947
  var_bytecode.Bind(target_bytecode);

  Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
  Node* is_star = WordEqual(target_bytecode, star_bytecode);
948
  Branch(is_star, &do_inline_star, &done);
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979

  Bind(&do_inline_star);
  {
    InlineStar();
    var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
    Goto(&done);
  }
  Bind(&done);
  return var_bytecode.value();
}

void InterpreterAssembler::InlineStar() {
  Bytecode previous_bytecode = bytecode_;
  AccumulatorUse previous_acc_use = accumulator_use_;

  bytecode_ = Bytecode::kStar;
  accumulator_use_ = AccumulatorUse::kNone;

  if (FLAG_trace_ignition) {
    TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
  }
  StoreRegister(GetAccumulator(), BytecodeOperandReg(0));

  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));

  Advance();
  bytecode_ = previous_bytecode;
  accumulator_use_ = previous_acc_use;
}

Node* InterpreterAssembler::Dispatch() {
980
  Comment("========= Dispatch");
981 982 983 984 985
  Node* target_offset = Advance();
  Node* target_bytecode = LoadBytecode(target_offset);

  if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
    target_bytecode = StarDispatchLookahead(target_bytecode);
986
  }
987 988
  return DispatchToBytecode(target_bytecode, BytecodeOffset());
}
989

990 991
Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
                                               Node* new_bytecode_offset) {
992 993 994 995
  if (FLAG_trace_ignition_dispatches) {
    TraceBytecodeDispatch(target_bytecode);
  }

996
  Node* target_code_entry =
997
      Load(MachineType::Pointer(), DispatchTableRawPointer(),
998
           WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
999

1000
  return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1001 1002
}

1003 1004
Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
                                                      Node* bytecode_offset) {
1005
  // TODO(ishell): Add CSA::CodeEntryPoint(code).
1006
  Node* handler_entry =
1007 1008
      IntPtrAdd(BitcastTaggedToWord(handler),
                IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
1009
  return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
1010 1011
}

1012
Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1013
    Node* handler_entry, Node* bytecode_offset) {
1014
  InterpreterDispatchDescriptor descriptor(isolate());
1015 1016 1017
  return TailCallBytecodeDispatch(
      descriptor, handler_entry, GetAccumulatorUnchecked(), bytecode_offset,
      BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
1018 1019
}

1020 1021 1022 1023 1024 1025 1026 1027 1028
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
  // Dispatching a wide bytecode requires treating the prefix
  // bytecode a base pointer into the dispatch table and dispatching
  // the bytecode that follows relative to this base.
  //
  //   Indices 0-255 correspond to bytecodes with operand_scale == 0
  //   Indices 256-511 correspond to bytecodes with operand_scale == 1
  //   Indices 512-767 correspond to bytecodes with operand_scale == 2
  Node* next_bytecode_offset = Advance(1);
1029
  Node* next_bytecode = LoadBytecode(next_bytecode_offset);
1030 1031 1032 1033 1034

  if (FLAG_trace_ignition_dispatches) {
    TraceBytecodeDispatch(next_bytecode);
  }

1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
  Node* base_index;
  switch (operand_scale) {
    case OperandScale::kDouble:
      base_index = IntPtrConstant(1 << kBitsPerByte);
      break;
    case OperandScale::kQuadruple:
      base_index = IntPtrConstant(2 << kBitsPerByte);
      break;
    default:
      UNREACHABLE();
      base_index = nullptr;
  }
  Node* target_index = IntPtrAdd(base_index, next_bytecode);
1048
  Node* target_code_entry =
1049 1050 1051
      Load(MachineType::Pointer(), DispatchTableRawPointer(),
           WordShl(target_index, kPointerSizeLog2));

1052
  DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
1053 1054
}

1055 1056 1057 1058 1059 1060 1061 1062
Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
    Node* context, Node* value, Variable* var_type_feedback) {
  // We might need to loop once due to ToNumber conversion.
  Variable var_value(this, MachineRepresentation::kTagged),
      var_result(this, MachineRepresentation::kWord32);
  Variable* loop_vars[] = {&var_value, var_type_feedback};
  Label loop(this, 2, loop_vars), done_loop(this, &var_result);
  var_value.Bind(value);
1063
  var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
1064 1065 1066 1067 1068 1069 1070 1071
  Goto(&loop);
  Bind(&loop);
  {
    // Load the current {value}.
    value = var_value.value();

    // Check if the {value} is a Smi or a HeapObject.
    Label if_valueissmi(this), if_valueisnotsmi(this);
1072
    Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
1073 1074 1075 1076 1077 1078

    Bind(&if_valueissmi);
    {
      // Convert the Smi {value}.
      var_result.Bind(SmiToWord32(value));
      var_type_feedback->Bind(
1079 1080
          SmiOr(var_type_feedback->value(),
                SmiConstant(BinaryOperationFeedback::kSignedSmall)));
1081 1082 1083 1084 1085 1086 1087 1088
      Goto(&done_loop);
    }

    Bind(&if_valueisnotsmi);
    {
      // Check if {value} is a HeapNumber.
      Label if_valueisheapnumber(this),
          if_valueisnotheapnumber(this, Label::kDeferred);
1089
      Node* value_map = LoadMap(value);
1090 1091
      Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber,
             &if_valueisnotheapnumber);
1092 1093 1094 1095 1096 1097

      Bind(&if_valueisheapnumber);
      {
        // Truncate the floating point value.
        var_result.Bind(TruncateHeapNumberValueToWord32(value));
        var_type_feedback->Bind(
1098 1099
            SmiOr(var_type_feedback->value(),
                  SmiConstant(BinaryOperationFeedback::kNumber)));
1100 1101 1102 1103 1104
        Goto(&done_loop);
      }

      Bind(&if_valueisnotheapnumber);
      {
1105 1106 1107
        // We do not require an Or with earlier feedback here because once we
        // convert the value to a number, we cannot reach this path. We can
        // only reach this path on the first pass when the feedback is kNone.
1108 1109
        CSA_ASSERT(this, SmiEqual(var_type_feedback->value(),
                                  SmiConstant(BinaryOperationFeedback::kNone)));
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121

        Label if_valueisoddball(this),
            if_valueisnotoddball(this, Label::kDeferred);
        Node* is_oddball = Word32Equal(LoadMapInstanceType(value_map),
                                       Int32Constant(ODDBALL_TYPE));
        Branch(is_oddball, &if_valueisoddball, &if_valueisnotoddball);

        Bind(&if_valueisoddball);
        {
          // Convert Oddball to a Number and perform checks again.
          var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
          var_type_feedback->Bind(
1122
              SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
1123 1124 1125 1126 1127 1128 1129 1130
          Goto(&loop);
        }

        Bind(&if_valueisnotoddball);
        {
          // Convert the {value} to a Number first.
          Callable callable = CodeFactory::NonNumberToNumber(isolate());
          var_value.Bind(CallStub(callable, context, value));
1131
          var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
1132 1133
          Goto(&loop);
        }
1134 1135 1136 1137 1138 1139 1140
      }
    }
  }
  Bind(&done_loop);
  return var_result.value();
}

1141
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1142 1143 1144 1145 1146 1147 1148
  // TODO(rmcilroy): Investigate whether it is worth supporting self
  // optimization of primitive functions like FullCodegen.

  // Update profiling count by -BytecodeOffset to simulate backedge to start of
  // function.
  Node* profiling_weight =
      Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
1149
               TruncateWordToWord32(BytecodeOffset()));
1150
  UpdateInterruptBudget(profiling_weight);
1151 1152
}

1153
Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
1154 1155 1156 1157
  Node* sp = LoadStackPointer();
  Node* stack_limit = Load(
      MachineType::Pointer(),
      ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
1158
  return UintPtrLessThan(sp, stack_limit);
1159 1160
}

1161
Node* InterpreterAssembler::LoadOSRNestingLevel() {
1162 1163 1164
  return LoadObjectField(BytecodeArrayTaggedPointer(),
                         BytecodeArray::kOSRNestingLevelOffset,
                         MachineType::Int8());
1165 1166
}

1167 1168 1169
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
  disable_stack_check_across_call_ = true;
  Node* abort_id = SmiTag(Int32Constant(bailout_reason));
1170
  CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1171 1172 1173 1174 1175
  disable_stack_check_across_call_ = false;
}

void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
                                               BailoutReason bailout_reason) {
1176
  Label ok(this), abort(this, Label::kDeferred);
1177
  Branch(WordEqual(lhs, rhs), &ok, &abort);
1178

1179
  Bind(&abort);
1180
  Abort(bailout_reason);
1181 1182 1183
  Goto(&ok);

  Bind(&ok);
1184 1185 1186 1187
}

void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
  CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1188
              SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1189 1190
}

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
  Node* counters_table = ExternalConstant(
      ExternalReference::interpreter_dispatch_counters(isolate()));
  Node* source_bytecode_table_index = IntPtrConstant(
      static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));

  Node* counter_offset =
      WordShl(IntPtrAdd(source_bytecode_table_index, target_bytecode),
              IntPtrConstant(kPointerSizeLog2));
  Node* old_counter =
      Load(MachineType::IntPtr(), counters_table, counter_offset);

1203
  Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1204 1205 1206

  Node* counter_reached_max = WordEqual(
      old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1207
  Branch(counter_reached_max, &counter_saturated, &counter_ok);
1208

1209
  Bind(&counter_ok);
1210 1211 1212 1213 1214 1215 1216
  {
    Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
    StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
                        counter_offset, new_counter);
    Goto(&counter_saturated);
  }

1217 1218 1219
  Bind(&counter_saturated);
}

1220 1221 1222 1223
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
  return false;
1224
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
1225 1226
    V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
    V8_TARGET_ARCH_PPC
1227 1228 1229 1230 1231 1232
  return true;
#else
#error "Unknown Architecture"
#endif
}

1233 1234 1235
Node* InterpreterAssembler::RegisterCount() {
  Node* bytecode_array = LoadRegister(Register::bytecode_array());
  Node* frame_size = LoadObjectField(
1236 1237 1238
      bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Uint32());
  return WordShr(ChangeUint32ToWord(frame_size),
                 IntPtrConstant(kPointerSizeLog2));
1239 1240
}

1241
Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
1242
  Node* register_count = RegisterCount();
1243
  if (FLAG_debug_code) {
1244
    Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
1245 1246
    AbortIfWordNotEqual(array_size, register_count,
                        kInvalidRegisterFileInGenerator);
1247
  }
1248

1249 1250
  Variable var_index(this, MachineType::PointerRepresentation());
  var_index.Bind(IntPtrConstant(0));
1251 1252

  // Iterate over register file and write values into array.
1253 1254
  // The mapping of register to array index must match that used in
  // BytecodeGraphBuilder::VisitResumeGenerator.
1255 1256 1257 1258 1259
  Label loop(this, &var_index), done_loop(this);
  Goto(&loop);
  Bind(&loop);
  {
    Node* index = var_index.value();
1260
    GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
1261

1262 1263
    Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
    Node* value = LoadRegister(reg_index);
1264

1265
    StoreFixedArrayElement(array, index, value);
1266

1267
    var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1268 1269 1270 1271 1272 1273 1274 1275
    Goto(&loop);
  }
  Bind(&done_loop);

  return array;
}

Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
1276
  Node* register_count = RegisterCount();
1277
  if (FLAG_debug_code) {
1278
    Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
1279 1280
    AbortIfWordNotEqual(array_size, register_count,
                        kInvalidRegisterFileInGenerator);
1281
  }
1282

1283 1284
  Variable var_index(this, MachineType::PointerRepresentation());
  var_index.Bind(IntPtrConstant(0));
1285

1286 1287
  // Iterate over array and write values into register file.  Also erase the
  // array contents to not keep them alive artificially.
1288 1289 1290 1291 1292
  Label loop(this, &var_index), done_loop(this);
  Goto(&loop);
  Bind(&loop);
  {
    Node* index = var_index.value();
1293
    GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
1294

1295
    Node* value = LoadFixedArrayElement(array, index);
1296

1297 1298
    Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
    StoreRegister(value, reg_index);
1299

1300
    StoreFixedArrayElement(array, index, StaleRegisterConstant());
1301

1302
    var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1303 1304 1305 1306 1307 1308 1309
    Goto(&loop);
  }
  Bind(&done_loop);

  return array;
}

1310 1311 1312
}  // namespace interpreter
}  // namespace internal
}  // namespace v8