interpreter-assembler.h 20 KB
Newer Older
1 2 3 4 5 6 7 8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
#define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_

#include "src/allocation.h"
9
#include "src/builtins/builtins.h"
10
#include "src/code-stub-assembler.h"
11
#include "src/globals.h"
12
#include "src/interpreter/bytecode-register.h"
13 14 15 16 17 18 19
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"

namespace v8 {
namespace internal {
namespace interpreter {

20
class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
21
 public:
22
  InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
23
                       OperandScale operand_scale);
24
  ~InterpreterAssembler();
25

26 27
  // Returns the 32-bit unsigned count immediate for bytecode operand
  // |operand_index| in the current bytecode.
28
  compiler::Node* BytecodeOperandCount(int operand_index);
29 30
  // Returns the 32-bit unsigned flag for bytecode operand |operand_index|
  // in the current bytecode.
31
  compiler::Node* BytecodeOperandFlag(int operand_index);
32 33
  // Returns the 32-bit zero-extended index immediate for bytecode operand
  // |operand_index| in the current bytecode.
34 35 36
  compiler::Node* BytecodeOperandIdxInt32(int operand_index);
  // Returns the word zero-extended index immediate for bytecode operand
  // |operand_index| in the current bytecode.
37
  compiler::Node* BytecodeOperandIdx(int operand_index);
38 39 40 41 42
  // Returns the smi index immediate for bytecode operand |operand_index|
  // in the current bytecode.
  compiler::Node* BytecodeOperandIdxSmi(int operand_index);
  // Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
  // in the current bytecode.
43
  compiler::Node* BytecodeOperandUImm(int operand_index);
44 45 46
  // Returns the word-size unsigned immediate for bytecode operand
  // |operand_index| in the current bytecode.
  compiler::Node* BytecodeOperandUImmWord(int operand_index);
47 48 49
  // Returns the unsigned smi immediate for bytecode operand |operand_index| in
  // the current bytecode.
  compiler::Node* BytecodeOperandUImmSmi(int operand_index);
50 51
  // Returns the 32-bit signed immediate for bytecode operand |operand_index|
  // in the current bytecode.
52
  compiler::Node* BytecodeOperandImm(int operand_index);
53 54 55 56
  // Returns the word-size signed immediate for bytecode operand |operand_index|
  // in the current bytecode.
  compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
  // Returns the smi immediate for bytecode operand |operand_index| in the
57
  // current bytecode.
58 59
  compiler::Node* BytecodeOperandImmSmi(int operand_index);
  // Returns the 32-bit unsigned runtime id immediate for bytecode operand
60 61
  // |operand_index| in the current bytecode.
  compiler::Node* BytecodeOperandRuntimeId(int operand_index);
62 63 64
  // Returns the 32-bit unsigned native context index immediate for bytecode
  // operand |operand_index| in the current bytecode.
  compiler::Node* BytecodeOperandNativeContextIndex(int operand_index);
65
  // Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
66 67
  // |operand_index| in the current bytecode.
  compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
68 69 70 71 72 73 74 75 76

  // Accumulator.
  compiler::Node* GetAccumulator();
  void SetAccumulator(compiler::Node* value);

  // Context.
  compiler::Node* GetContext();
  void SetContext(compiler::Node* value);

77
  // Context at |depth| in the context chain starting at |context|.
78 79 80
  compiler::Node* GetContextAtDepth(compiler::Node* context,
                                    compiler::Node* depth);

81 82 83 84 85
  // Goto the given |target| if the context chain starting at |context| has any
  // extensions up to the given |depth|.
  void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
                                          compiler::Node* depth, Label* target);

86 87 88 89 90 91 92 93 94 95 96 97 98 99
  // A RegListNodePair provides an abstraction over lists of registers.
  class RegListNodePair {
   public:
    RegListNodePair(Node* base_reg_location, Node* reg_count)
        : base_reg_location_(base_reg_location), reg_count_(reg_count) {}

    compiler::Node* reg_count() const { return reg_count_; }
    compiler::Node* base_reg_location() const { return base_reg_location_; }

   private:
    compiler::Node* base_reg_location_;
    compiler::Node* reg_count_;
  };

100
  // Backup/restore register file to/from a fixed array of the correct length.
101 102 103 104 105
  // There is an asymmetry between suspend/export and resume/import.
  // - Suspend copies arguments and registers to the generator.
  // - Resume copies only the registers from the generator, the arguments
  //   are copied by the ResumeGenerator trampoline.
  compiler::Node* ExportParametersAndRegisterFile(
106 107 108
      TNode<FixedArray> array, const RegListNodePair& registers,
      TNode<Int32T> formal_parameter_count);
  compiler::Node* ImportRegisterFile(TNode<FixedArray> array,
109
                                     const RegListNodePair& registers,
110
                                     TNode<Int32T> formal_parameter_count);
111

112 113
  // Loads from and stores to the interpreter register file.
  compiler::Node* LoadRegister(Register reg);
114
  compiler::Node* LoadAndUntagRegister(Register reg);
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
  compiler::Node* LoadRegisterAtOperandIndex(int operand_index);
  std::pair<compiler::Node*, compiler::Node*> LoadRegisterPairAtOperandIndex(
      int operand_index);
  void StoreRegister(compiler::Node* value, Register reg);
  void StoreAndTagRegister(compiler::Node* value, Register reg);
  void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
  void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
                                       compiler::Node* value2,
                                       int operand_index);
  void StoreRegisterTripleAtOperandIndex(compiler::Node* value1,
                                         compiler::Node* value2,
                                         compiler::Node* value3,
                                         int operand_index);

  RegListNodePair GetRegisterListAtOperandIndex(int operand_index);
  Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
                                     int index);
  Node* RegisterLocationInRegisterList(const RegListNodePair& reg_list,
                                       int index);

  // Load constant at the index specified in operand |operand_index| from the
  // constant pool.
  compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index);
  // Load and untag constant at the index specified in operand |operand_index|
  // from the constant pool.
  compiler::Node* LoadAndUntagConstantPoolEntryAtOperandIndex(
      int operand_index);
142 143
  // Load constant at |index| in the constant pool.
  compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
144 145 146
  // Load and untag constant at |index| in the constant pool.
  compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);

147
  // Load the FeedbackVector for the current function.
148
  compiler::TNode<FeedbackVector> LoadFeedbackVector();
149

150 151 152 153
  // Load the FeedbackVector for the current function. The returned node
  // could be undefined.
  compiler::Node* LoadFeedbackVectorUnchecked();

154 155
  // Increment the call count for a CALL_IC or construct call.
  // The call count is located at feedback_vector[slot_id + 1].
156 157 158 159 160 161 162 163
  void IncrementCallCount(compiler::Node* feedback_vector,
                          compiler::Node* slot_id);

  // Collect the callable |target| feedback for either a CALL_IC or
  // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
  void CollectCallableFeedback(compiler::Node* target, compiler::Node* context,
                               compiler::Node* feedback_vector,
                               compiler::Node* slot_id);
164

165
  // Collect CALL_IC feedback for |target| function in the
166 167
  // |feedback_vector| at |slot_id|, and the call counts in
  // the |feedback_vector| at |slot_id+1|.
168
  void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
169
                           compiler::Node* maybe_feedback_vector,
170
                           compiler::Node* slot_id);
171

172
  // Call JSFunction or Callable |function| with |args| arguments, possibly
173 174 175
  // including the receiver depending on |receiver_mode|. After the call returns
  // directly dispatches to the next bytecode.
  void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
176
                         const RegListNodePair& args,
177
                         ConvertReceiverMode receiver_mode);
178

179 180 181 182 183 184 185 186
  // Call JSFunction or Callable |function| with |arg_count| arguments (not
  // including receiver) passed as |args|, possibly including the receiver
  // depending on |receiver_mode|. After the call returns directly dispatches to
  // the next bytecode.
  template <class... TArgs>
  void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
                         ConvertReceiverMode receiver_mode, TArgs... args);

187 188 189
  // Call JSFunction or Callable |function| with |args|
  // arguments (not including receiver), and the final argument being spread.
  // After the call returns directly dispatches to the next bytecode.
190
  void CallJSWithSpreadAndDispatch(compiler::Node* function,
191
                                   compiler::Node* context,
192
                                   const RegListNodePair& args,
193 194
                                   compiler::Node* slot_id,
                                   compiler::Node* feedback_vector);
195

196 197 198
  // Call constructor |target| with |args| arguments (not including receiver).
  // The |new_target| is the same as the |target| for the new keyword, but
  // differs for the super keyword.
199 200
  compiler::Node* Construct(compiler::Node* target, compiler::Node* context,
                            compiler::Node* new_target,
201 202
                            const RegListNodePair& args,
                            compiler::Node* slot_id,
203
                            compiler::Node* feedback_vector);
204

205 206 207 208
  // Call constructor |target| with |args| arguments (not including
  // receiver). The last argument is always a spread. The |new_target| is the
  // same as the |target| for the new keyword, but differs for the super
  // keyword.
209
  compiler::Node* ConstructWithSpread(compiler::Node* target,
210 211
                                      compiler::Node* context,
                                      compiler::Node* new_target,
212
                                      const RegListNodePair& args,
213 214
                                      compiler::Node* slot_id,
                                      compiler::Node* feedback_vector);
215

216 217
  // Call runtime function with |args| arguments which will return |return_size|
  // number of values.
218 219
  compiler::Node* CallRuntimeN(compiler::Node* function_id,
                               compiler::Node* context,
220 221
                               const RegListNodePair& args,
                               int return_size = 1);
222

223
  // Jump forward relative to the current bytecode by the |jump_offset|.
224
  compiler::Node* Jump(compiler::Node* jump_offset);
225

226 227 228 229
  // Jump backward relative to the current bytecode by the |jump_offset|.
  compiler::Node* JumpBackward(compiler::Node* jump_offset);

  // Jump forward relative to the current bytecode by |jump_offset| if the
230 231 232 233
  // word values |lhs| and |rhs| are equal.
  void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
                       compiler::Node* jump_offset);

234
  // Jump forward relative to the current bytecode by |jump_offset| if the
235 236 237 238
  // word values |lhs| and |rhs| are not equal.
  void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
                          compiler::Node* jump_offset);

239 240
  // Updates the profiler interrupt budget for a return.
  void UpdateInterruptBudgetOnReturn();
241

242 243 244
  // Returns the OSR nesting level from the bytecode header.
  compiler::Node* LoadOSRNestingLevel();

245
  // Dispatch to the bytecode.
246
  compiler::Node* Dispatch();
247

248 249 250
  // Dispatch bytecode as wide operand variant.
  void DispatchWide(OperandScale operand_scale);

251 252 253 254 255
  // Dispatch to |target_bytecode| at |new_bytecode_offset|.
  // |target_bytecode| should be equivalent to loading from the offset.
  compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
                                     compiler::Node* new_bytecode_offset);

256 257
  // Abort with the given abort reason.
  void Abort(AbortReason abort_reason);
258
  void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
259
                           AbortReason abort_reason);
260
  // Abort if |register_count| is invalid for given register file array.
261 262
  void AbortIfRegisterCountInvalid(compiler::Node* parameters_and_registers,
                                   compiler::Node* formal_parameter_count,
263
                                   compiler::Node* register_count);
264

265 266 267
  // Dispatch to frame dropper trampoline if necessary.
  void MaybeDropFrames(compiler::Node* context);

268 269 270
  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
  compiler::Node* BytecodeOffset();

271
 protected:
272
  Bytecode bytecode() const { return bytecode_; }
273 274
  static bool TargetSupportsUnalignedAccess();

275 276
  void ToNumberOrNumeric(Object::Conversion mode);

277 278 279
 private:
  // Returns a tagged pointer to the current function's BytecodeArray object.
  compiler::Node* BytecodeArrayTaggedPointer();
280

281 282 283
  // Returns a raw pointer to first entry in the interpreter dispatch table.
  compiler::Node* DispatchTableRawPointer();

284 285 286 287 288
  // Returns the accumulator value without checking whether bytecode
  // uses it. This is intended to be used only in dispatch and in
  // tracing as these need to bypass accumulator use validity checks.
  compiler::Node* GetAccumulatorUnchecked();

289 290 291 292
  // Returns the frame pointer for the interpreted frame of the function being
  // interpreted.
  compiler::Node* GetInterpretedFramePointer();

293 294 295 296 297 298 299
  // Operations on registers.
  compiler::Node* RegisterLocation(Register reg);
  compiler::Node* RegisterLocation(compiler::Node* reg_index);
  compiler::Node* NextRegister(compiler::Node* reg_index);
  compiler::Node* LoadRegister(Node* reg_index);
  void StoreRegister(compiler::Node* value, compiler::Node* reg_index);

300 301
  // Saves and restores interpreter bytecode offset to the interpreter stack
  // frame when performing a call.
302 303
  void CallPrologue();
  void CallEpilogue();
304

305 306 307
  // Increment the dispatch counter for the (current, next) bytecode pair.
  void TraceBytecodeDispatch(compiler::Node* target_index);

308 309 310
  // Traces the current bytecode by calling |function_id|.
  void TraceBytecode(Runtime::FunctionId function_id);

311 312 313 314
  // Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight|
  // and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then
  // the interrupt budget is decremented, otherwise it is incremented.
  void UpdateInterruptBudget(compiler::Node* weight, bool backward);
315

316 317 318
  // Returns the offset of register |index| relative to RegisterFilePointer().
  compiler::Node* RegisterFrameOffset(compiler::Node* index);

319 320 321 322 323 324 325 326
  // Returns the offset of an operand relative to the current bytecode offset.
  compiler::Node* OperandOffset(int operand_index);

  // Returns a value built from an sequence of bytes in the bytecode
  // array starting at |relative_offset| from the current bytecode.
  // The |result_type| determines the size and signedness.  of the
  // value read. This method should only be used on architectures that
  // do not support unaligned memory accesses.
327 328
  compiler::Node* BytecodeOperandReadUnaligned(
      int relative_offset, MachineType result_type,
329
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
330 331 332 333

  // Returns zero- or sign-extended to word32 value of the operand.
  compiler::Node* BytecodeOperandUnsignedByte(
      int operand_index,
334
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
335 336
  compiler::Node* BytecodeOperandSignedByte(
      int operand_index,
337
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
338 339
  compiler::Node* BytecodeOperandUnsignedShort(
      int operand_index,
340
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
341 342
  compiler::Node* BytecodeOperandSignedShort(
      int operand_index,
343
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
344 345
  compiler::Node* BytecodeOperandUnsignedQuad(
      int operand_index,
346
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
347 348
  compiler::Node* BytecodeOperandSignedQuad(
      int operand_index,
349
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
350

351 352
  // Returns zero- or sign-extended to word32 value of the operand of
  // given size.
353 354
  compiler::Node* BytecodeSignedOperand(
      int operand_index, OperandSize operand_size,
355
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
356 357
  compiler::Node* BytecodeUnsignedOperand(
      int operand_index, OperandSize operand_size,
358
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
359

360 361 362
  // Returns the word-size sign-extended register index for bytecode operand
  // |operand_index| in the current bytecode. Value is not poisoned on
  // speculation since the value loaded from the register is poisoned instead.
363 364
  compiler::Node* BytecodeOperandReg(
      int operand_index,
365
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
366 367 368

  // Returns the word zero-extended index immediate for bytecode operand
  // |operand_index| in the current bytecode for use when loading a .
369 370
  compiler::Node* BytecodeOperandConstantPoolIdx(
      int operand_index,
371
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
372

373 374 375 376 377 378
  // Jump relative to the current bytecode by the |jump_offset|. If |backward|,
  // then jump backward (subtract the offset), otherwise jump forward (add the
  // offset). Helper function for Jump and JumpBackward.
  compiler::Node* Jump(compiler::Node* jump_offset, bool backward);

  // Jump forward relative to the current bytecode by |jump_offset| if the
379 380 381 382
  // |condition| is true. Helper function for JumpIfWordEqual and
  // JumpIfWordNotEqual.
  void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);

383 384
  // Save the bytecode offset to the interpreter frame.
  void SaveBytecodeOffset();
385 386
  // Reload the bytecode offset from the interpreter frame.
  Node* ReloadBytecodeOffset();
387

388 389 390 391 392 393
  // Updates and returns BytecodeOffset() advanced by the current bytecode's
  // size. Traces the exit of the current bytecode.
  compiler::Node* Advance();

  // Updates and returns BytecodeOffset() advanced by delta bytecodes.
  // Traces the exit of the current bytecode.
394
  compiler::Node* Advance(int delta);
395
  compiler::Node* Advance(compiler::Node* delta, bool backward = false);
396

397 398 399 400 401 402 403 404 405 406 407
  // Load the bytecode at |bytecode_offset|.
  compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);

  // Look ahead for Star and inline it in a branch. Returns a new target
  // bytecode node for dispatch.
  compiler::Node* StarDispatchLookahead(compiler::Node* target_bytecode);

  // Build code for Star at the current BytecodeOffset() and Advance() to the
  // next dispatch offset.
  void InlineStar();

408
  // Dispatch to the bytecode handler with code offset |handler|.
409
  compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
410 411
                                            compiler::Node* bytecode_offset,
                                            compiler::Node* target_bytecode);
412 413

  // Dispatch to the bytecode handler with code entry point |handler_entry|.
414
  compiler::Node* DispatchToBytecodeHandlerEntry(
415 416
      compiler::Node* handler_entry, compiler::Node* bytecode_offset,
      compiler::Node* target_bytecode);
417

418 419
  int CurrentBytecodeSize() const;

420 421
  OperandScale operand_scale() const { return operand_scale_; }

422
  Bytecode bytecode_;
423
  OperandScale operand_scale_;
424
  CodeStubAssembler::Variable interpreted_frame_pointer_;
425
  CodeStubAssembler::Variable bytecode_array_;
426
  CodeStubAssembler::Variable bytecode_offset_;
427
  CodeStubAssembler::Variable dispatch_table_;
428
  CodeStubAssembler::Variable accumulator_;
429
  AccumulatorUse accumulator_use_;
430
  bool made_call_;
431
  bool reloaded_frame_ptr_;
432
  bool bytecode_array_valid_;
433 434 435 436 437 438 439 440 441 442 443
  bool disable_stack_check_across_call_;
  compiler::Node* stack_pointer_before_call_;

  DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
};

}  // namespace interpreter
}  // namespace internal
}  // namespace v8

#endif  // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_