interpreter-assembler.h 19.9 KB
Newer Older
1 2 3 4 5 6 7 8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
#define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_

#include "src/allocation.h"
9
#include "src/builtins/builtins.h"
10
#include "src/code-stub-assembler.h"
11
#include "src/globals.h"
12
#include "src/interpreter/bytecode-register.h"
13 14 15 16 17 18 19
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"

namespace v8 {
namespace internal {
namespace interpreter {

20
class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
21
 public:
22
  InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
23
                       OperandScale operand_scale);
24
  ~InterpreterAssembler();
25

26 27
  // Returns the 32-bit unsigned count immediate for bytecode operand
  // |operand_index| in the current bytecode.
28
  compiler::Node* BytecodeOperandCount(int operand_index);
29 30
  // Returns the 32-bit unsigned flag for bytecode operand |operand_index|
  // in the current bytecode.
31
  compiler::Node* BytecodeOperandFlag(int operand_index);
32 33
  // Returns the 32-bit zero-extended index immediate for bytecode operand
  // |operand_index| in the current bytecode.
34 35 36
  compiler::Node* BytecodeOperandIdxInt32(int operand_index);
  // Returns the word zero-extended index immediate for bytecode operand
  // |operand_index| in the current bytecode.
37
  compiler::Node* BytecodeOperandIdx(int operand_index);
38 39 40 41 42
  // Returns the smi index immediate for bytecode operand |operand_index|
  // in the current bytecode.
  compiler::Node* BytecodeOperandIdxSmi(int operand_index);
  // Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
  // in the current bytecode.
43
  compiler::Node* BytecodeOperandUImm(int operand_index);
44 45 46
  // Returns the word-size unsigned immediate for bytecode operand
  // |operand_index| in the current bytecode.
  compiler::Node* BytecodeOperandUImmWord(int operand_index);
47 48 49
  // Returns the unsigned smi immediate for bytecode operand |operand_index| in
  // the current bytecode.
  compiler::Node* BytecodeOperandUImmSmi(int operand_index);
50 51
  // Returns the 32-bit signed immediate for bytecode operand |operand_index|
  // in the current bytecode.
52
  compiler::Node* BytecodeOperandImm(int operand_index);
53 54 55 56
  // Returns the word-size signed immediate for bytecode operand |operand_index|
  // in the current bytecode.
  compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
  // Returns the smi immediate for bytecode operand |operand_index| in the
57
  // current bytecode.
58 59
  compiler::Node* BytecodeOperandImmSmi(int operand_index);
  // Returns the 32-bit unsigned runtime id immediate for bytecode operand
60 61
  // |operand_index| in the current bytecode.
  compiler::Node* BytecodeOperandRuntimeId(int operand_index);
62 63 64
  // Returns the 32-bit unsigned native context index immediate for bytecode
  // operand |operand_index| in the current bytecode.
  compiler::Node* BytecodeOperandNativeContextIndex(int operand_index);
65
  // Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
66 67
  // |operand_index| in the current bytecode.
  compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
68 69 70 71 72 73 74 75 76

  // Accumulator.
  compiler::Node* GetAccumulator();
  void SetAccumulator(compiler::Node* value);

  // Context.
  compiler::Node* GetContext();
  void SetContext(compiler::Node* value);

77
  // Context at |depth| in the context chain starting at |context|.
78 79 80
  compiler::Node* GetContextAtDepth(compiler::Node* context,
                                    compiler::Node* depth);

81 82 83 84 85
  // Goto the given |target| if the context chain starting at |context| has any
  // extensions up to the given |depth|.
  void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
                                          compiler::Node* depth, Label* target);

86 87 88 89 90 91 92 93 94 95 96 97 98 99
  // A RegListNodePair provides an abstraction over lists of registers.
  class RegListNodePair {
   public:
    RegListNodePair(Node* base_reg_location, Node* reg_count)
        : base_reg_location_(base_reg_location), reg_count_(reg_count) {}

    compiler::Node* reg_count() const { return reg_count_; }
    compiler::Node* base_reg_location() const { return base_reg_location_; }

   private:
    compiler::Node* base_reg_location_;
    compiler::Node* reg_count_;
  };

100
  // Backup/restore register file to/from a fixed array of the correct length.
101 102 103 104 105
  // There is an asymmetry between suspend/export and resume/import.
  // - Suspend copies arguments and registers to the generator.
  // - Resume copies only the registers from the generator, the arguments
  //   are copied by the ResumeGenerator trampoline.
  compiler::Node* ExportParametersAndRegisterFile(
106 107 108
      TNode<FixedArray> array, const RegListNodePair& registers,
      TNode<Int32T> formal_parameter_count);
  compiler::Node* ImportRegisterFile(TNode<FixedArray> array,
109
                                     const RegListNodePair& registers,
110
                                     TNode<Int32T> formal_parameter_count);
111

112 113
  // Loads from and stores to the interpreter register file.
  compiler::Node* LoadRegister(Register reg);
114
  compiler::Node* LoadAndUntagRegister(Register reg);
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
  compiler::Node* LoadRegisterAtOperandIndex(int operand_index);
  std::pair<compiler::Node*, compiler::Node*> LoadRegisterPairAtOperandIndex(
      int operand_index);
  void StoreRegister(compiler::Node* value, Register reg);
  void StoreAndTagRegister(compiler::Node* value, Register reg);
  void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
  void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
                                       compiler::Node* value2,
                                       int operand_index);
  void StoreRegisterTripleAtOperandIndex(compiler::Node* value1,
                                         compiler::Node* value2,
                                         compiler::Node* value3,
                                         int operand_index);

  RegListNodePair GetRegisterListAtOperandIndex(int operand_index);
  Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
                                     int index);
  Node* RegisterLocationInRegisterList(const RegListNodePair& reg_list,
                                       int index);

  // Load constant at the index specified in operand |operand_index| from the
  // constant pool.
  compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index);
  // Load and untag constant at the index specified in operand |operand_index|
  // from the constant pool.
  compiler::Node* LoadAndUntagConstantPoolEntryAtOperandIndex(
      int operand_index);
142 143
  // Load constant at |index| in the constant pool.
  compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
144 145 146
  // Load and untag constant at |index| in the constant pool.
  compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);

147
  // Load the FeedbackVector for the current function.
148
  compiler::TNode<FeedbackVector> LoadFeedbackVector();
149

150 151
  // Increment the call count for a CALL_IC or construct call.
  // The call count is located at feedback_vector[slot_id + 1].
152 153 154 155 156 157 158 159
  void IncrementCallCount(compiler::Node* feedback_vector,
                          compiler::Node* slot_id);

  // Collect the callable |target| feedback for either a CALL_IC or
  // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
  void CollectCallableFeedback(compiler::Node* target, compiler::Node* context,
                               compiler::Node* feedback_vector,
                               compiler::Node* slot_id);
160

161
  // Collect CALL_IC feedback for |target| function in the
162 163
  // |feedback_vector| at |slot_id|, and the call counts in
  // the |feedback_vector| at |slot_id+1|.
164
  void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
165 166
                           compiler::Node* feedback_vector,
                           compiler::Node* slot_id);
167

168
  // Call JSFunction or Callable |function| with |args| arguments, possibly
169 170 171
  // including the receiver depending on |receiver_mode|. After the call returns
  // directly dispatches to the next bytecode.
  void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
172
                         const RegListNodePair& args,
173
                         ConvertReceiverMode receiver_mode);
174

175 176 177 178 179 180 181 182
  // Call JSFunction or Callable |function| with |arg_count| arguments (not
  // including receiver) passed as |args|, possibly including the receiver
  // depending on |receiver_mode|. After the call returns directly dispatches to
  // the next bytecode.
  template <class... TArgs>
  void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
                         ConvertReceiverMode receiver_mode, TArgs... args);

183 184 185
  // Call JSFunction or Callable |function| with |args|
  // arguments (not including receiver), and the final argument being spread.
  // After the call returns directly dispatches to the next bytecode.
186
  void CallJSWithSpreadAndDispatch(compiler::Node* function,
187
                                   compiler::Node* context,
188
                                   const RegListNodePair& args,
189 190
                                   compiler::Node* slot_id,
                                   compiler::Node* feedback_vector);
191

192 193 194
  // Call constructor |target| with |args| arguments (not including receiver).
  // The |new_target| is the same as the |target| for the new keyword, but
  // differs for the super keyword.
195 196
  compiler::Node* Construct(compiler::Node* target, compiler::Node* context,
                            compiler::Node* new_target,
197 198
                            const RegListNodePair& args,
                            compiler::Node* slot_id,
199
                            compiler::Node* feedback_vector);
200

201 202 203 204
  // Call constructor |target| with |args| arguments (not including
  // receiver). The last argument is always a spread. The |new_target| is the
  // same as the |target| for the new keyword, but differs for the super
  // keyword.
205
  compiler::Node* ConstructWithSpread(compiler::Node* target,
206 207
                                      compiler::Node* context,
                                      compiler::Node* new_target,
208
                                      const RegListNodePair& args,
209 210
                                      compiler::Node* slot_id,
                                      compiler::Node* feedback_vector);
211

212 213
  // Call runtime function with |args| arguments which will return |return_size|
  // number of values.
214 215
  compiler::Node* CallRuntimeN(compiler::Node* function_id,
                               compiler::Node* context,
216 217
                               const RegListNodePair& args,
                               int return_size = 1);
218

219
  // Jump forward relative to the current bytecode by the |jump_offset|.
220
  compiler::Node* Jump(compiler::Node* jump_offset);
221

222 223 224 225
  // Jump backward relative to the current bytecode by the |jump_offset|.
  compiler::Node* JumpBackward(compiler::Node* jump_offset);

  // Jump forward relative to the current bytecode by |jump_offset| if the
226 227 228 229
  // word values |lhs| and |rhs| are equal.
  void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
                       compiler::Node* jump_offset);

230
  // Jump forward relative to the current bytecode by |jump_offset| if the
231 232 233 234
  // word values |lhs| and |rhs| are not equal.
  void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
                          compiler::Node* jump_offset);

235 236
  // Updates the profiler interrupt budget for a return.
  void UpdateInterruptBudgetOnReturn();
237

238 239 240
  // Returns the OSR nesting level from the bytecode header.
  compiler::Node* LoadOSRNestingLevel();

241
  // Dispatch to the bytecode.
242
  compiler::Node* Dispatch();
243

244 245 246
  // Dispatch bytecode as wide operand variant.
  void DispatchWide(OperandScale operand_scale);

247 248 249 250 251
  // Dispatch to |target_bytecode| at |new_bytecode_offset|.
  // |target_bytecode| should be equivalent to loading from the offset.
  compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
                                     compiler::Node* new_bytecode_offset);

252 253
  // Abort with the given abort reason.
  void Abort(AbortReason abort_reason);
254
  void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
255
                           AbortReason abort_reason);
256
  // Abort if |register_count| is invalid for given register file array.
257 258
  void AbortIfRegisterCountInvalid(compiler::Node* parameters_and_registers,
                                   compiler::Node* formal_parameter_count,
259
                                   compiler::Node* register_count);
260

261 262 263
  // Dispatch to frame dropper trampoline if necessary.
  void MaybeDropFrames(compiler::Node* context);

264 265 266
  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
  compiler::Node* BytecodeOffset();

267
 protected:
268
  Bytecode bytecode() const { return bytecode_; }
269 270
  static bool TargetSupportsUnalignedAccess();

271 272
  void ToNumberOrNumeric(Object::Conversion mode);

273 274 275
 private:
  // Returns a tagged pointer to the current function's BytecodeArray object.
  compiler::Node* BytecodeArrayTaggedPointer();
276

277 278 279
  // Returns a raw pointer to first entry in the interpreter dispatch table.
  compiler::Node* DispatchTableRawPointer();

280 281 282 283 284
  // Returns the accumulator value without checking whether bytecode
  // uses it. This is intended to be used only in dispatch and in
  // tracing as these need to bypass accumulator use validity checks.
  compiler::Node* GetAccumulatorUnchecked();

285 286 287 288
  // Returns the frame pointer for the interpreted frame of the function being
  // interpreted.
  compiler::Node* GetInterpretedFramePointer();

289 290 291 292 293 294 295
  // Operations on registers.
  compiler::Node* RegisterLocation(Register reg);
  compiler::Node* RegisterLocation(compiler::Node* reg_index);
  compiler::Node* NextRegister(compiler::Node* reg_index);
  compiler::Node* LoadRegister(Node* reg_index);
  void StoreRegister(compiler::Node* value, compiler::Node* reg_index);

296 297
  // Saves and restores interpreter bytecode offset to the interpreter stack
  // frame when performing a call.
298 299
  void CallPrologue();
  void CallEpilogue();
300

301 302 303
  // Increment the dispatch counter for the (current, next) bytecode pair.
  void TraceBytecodeDispatch(compiler::Node* target_index);

304 305 306
  // Traces the current bytecode by calling |function_id|.
  void TraceBytecode(Runtime::FunctionId function_id);

307 308 309 310
  // Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight|
  // and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then
  // the interrupt budget is decremented, otherwise it is incremented.
  void UpdateInterruptBudget(compiler::Node* weight, bool backward);
311

312 313 314
  // Returns the offset of register |index| relative to RegisterFilePointer().
  compiler::Node* RegisterFrameOffset(compiler::Node* index);

315 316 317 318 319 320 321 322
  // Returns the offset of an operand relative to the current bytecode offset.
  compiler::Node* OperandOffset(int operand_index);

  // Returns a value built from an sequence of bytes in the bytecode
  // array starting at |relative_offset| from the current bytecode.
  // The |result_type| determines the size and signedness.  of the
  // value read. This method should only be used on architectures that
  // do not support unaligned memory accesses.
323 324
  compiler::Node* BytecodeOperandReadUnaligned(
      int relative_offset, MachineType result_type,
325
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
326 327 328 329

  // Returns zero- or sign-extended to word32 value of the operand.
  compiler::Node* BytecodeOperandUnsignedByte(
      int operand_index,
330
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
331 332
  compiler::Node* BytecodeOperandSignedByte(
      int operand_index,
333
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
334 335
  compiler::Node* BytecodeOperandUnsignedShort(
      int operand_index,
336
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
337 338
  compiler::Node* BytecodeOperandSignedShort(
      int operand_index,
339
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
340 341
  compiler::Node* BytecodeOperandUnsignedQuad(
      int operand_index,
342
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
343 344
  compiler::Node* BytecodeOperandSignedQuad(
      int operand_index,
345
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
346

347 348
  // Returns zero- or sign-extended to word32 value of the operand of
  // given size.
349 350
  compiler::Node* BytecodeSignedOperand(
      int operand_index, OperandSize operand_size,
351
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
352 353
  compiler::Node* BytecodeUnsignedOperand(
      int operand_index, OperandSize operand_size,
354
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
355

356 357 358
  // Returns the word-size sign-extended register index for bytecode operand
  // |operand_index| in the current bytecode. Value is not poisoned on
  // speculation since the value loaded from the register is poisoned instead.
359 360
  compiler::Node* BytecodeOperandReg(
      int operand_index,
361
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
362 363 364

  // Returns the word zero-extended index immediate for bytecode operand
  // |operand_index| in the current bytecode for use when loading a .
365 366
  compiler::Node* BytecodeOperandConstantPoolIdx(
      int operand_index,
367
      LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
368

369 370 371 372 373 374
  // Jump relative to the current bytecode by the |jump_offset|. If |backward|,
  // then jump backward (subtract the offset), otherwise jump forward (add the
  // offset). Helper function for Jump and JumpBackward.
  compiler::Node* Jump(compiler::Node* jump_offset, bool backward);

  // Jump forward relative to the current bytecode by |jump_offset| if the
375 376 377 378
  // |condition| is true. Helper function for JumpIfWordEqual and
  // JumpIfWordNotEqual.
  void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);

379 380
  // Save the bytecode offset to the interpreter frame.
  void SaveBytecodeOffset();
381 382
  // Reload the bytecode offset from the interpreter frame.
  Node* ReloadBytecodeOffset();
383

384 385 386 387 388 389
  // Updates and returns BytecodeOffset() advanced by the current bytecode's
  // size. Traces the exit of the current bytecode.
  compiler::Node* Advance();

  // Updates and returns BytecodeOffset() advanced by delta bytecodes.
  // Traces the exit of the current bytecode.
390
  compiler::Node* Advance(int delta);
391
  compiler::Node* Advance(compiler::Node* delta, bool backward = false);
392

393 394 395 396 397 398 399 400 401 402 403
  // Load the bytecode at |bytecode_offset|.
  compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);

  // Look ahead for Star and inline it in a branch. Returns a new target
  // bytecode node for dispatch.
  compiler::Node* StarDispatchLookahead(compiler::Node* target_bytecode);

  // Build code for Star at the current BytecodeOffset() and Advance() to the
  // next dispatch offset.
  void InlineStar();

404
  // Dispatch to the bytecode handler with code offset |handler|.
405
  compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
406 407
                                            compiler::Node* bytecode_offset,
                                            compiler::Node* target_bytecode);
408 409

  // Dispatch to the bytecode handler with code entry point |handler_entry|.
410
  compiler::Node* DispatchToBytecodeHandlerEntry(
411 412
      compiler::Node* handler_entry, compiler::Node* bytecode_offset,
      compiler::Node* target_bytecode);
413

414 415
  int CurrentBytecodeSize() const;

416 417
  OperandScale operand_scale() const { return operand_scale_; }

418
  Bytecode bytecode_;
419
  OperandScale operand_scale_;
420
  CodeStubAssembler::Variable interpreted_frame_pointer_;
421
  CodeStubAssembler::Variable bytecode_array_;
422
  CodeStubAssembler::Variable bytecode_offset_;
423
  CodeStubAssembler::Variable dispatch_table_;
424
  CodeStubAssembler::Variable accumulator_;
425
  AccumulatorUse accumulator_use_;
426
  bool made_call_;
427
  bool reloaded_frame_ptr_;
428
  bool bytecode_array_valid_;
429 430 431 432 433 434 435 436 437 438 439
  bool disable_stack_check_across_call_;
  compiler::Node* stack_pointer_before_call_;

  DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
};

}  // namespace interpreter
}  // namespace internal
}  // namespace v8

#endif  // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_