macro-assembler-x64.h 45.3 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6 7
#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
#define V8_X64_MACRO_ASSEMBLER_X64_H_

8
#include "src/bailout-reason.h"
9
#include "src/base/flags.h"
10
#include "src/globals.h"
11
#include "src/x64/assembler-x64.h"
12

13 14
namespace v8 {
namespace internal {
15

16
// Give alias names to registers for calling conventions.
17 18 19 20 21 22 23 24 25 26 27 28 29 30
constexpr Register kReturnRegister0 = rax;
constexpr Register kReturnRegister1 = rdx;
constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r12;
constexpr Register kInterpreterBytecodeArrayRegister = r14;
constexpr Register kInterpreterDispatchTableRegister = r15;
constexpr Register kJavaScriptCallArgCountRegister = rax;
constexpr Register kJavaScriptCallNewTargetRegister = rdx;
constexpr Register kRuntimeCallFunctionRegister = rbx;
constexpr Register kRuntimeCallArgCountRegister = rax;
31

lrn@chromium.org's avatar
lrn@chromium.org committed
32 33 34
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
35 36 37
constexpr Register kScratchRegister = r10;
constexpr XMMRegister kScratchDoubleReg = xmm15;
constexpr Register kRootRegister = r13;  // callee save
38 39
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
40
constexpr int kRootRegisterBias = 128;
41

42 43 44
// Convenience for platform-independent signatures.
typedef Operand MemOperand;

45 46 47
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };

48 49 50 51
enum class SmiOperationConstraint {
  kPreserveSourceRegister = 1 << 0,
  kBailoutOnNoOverflow = 1 << 1,
  kBailoutOnOverflow = 1 << 2
52 53
};

54 55
enum class ReturnAddressState { kOnStack, kNotOnStack };

56
typedef base::Flags<SmiOperationConstraint> SmiOperationConstraints;
57

58
DEFINE_OPERATORS_FOR_FLAGS(SmiOperationConstraints)
59

60 61 62 63 64 65 66 67 68 69
#ifdef DEBUG
bool AreAliased(Register reg1,
                Register reg2,
                Register reg3 = no_reg,
                Register reg4 = no_reg,
                Register reg5 = no_reg,
                Register reg6 = no_reg,
                Register reg7 = no_reg,
                Register reg8 = no_reg);
#endif
70

71 72 73
// Forward declaration.
class JumpTarget;

74 75 76 77 78 79 80
struct SmiIndex {
  SmiIndex(Register index_register, ScaleFactor scale)
      : reg(index_register),
        scale(scale) {}
  Register reg;
  ScaleFactor scale;
};
81

82 83 84
class TurboAssembler : public Assembler {
 public:
  TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
85
                 CodeObjectRequired create_code_object);
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308

  void set_has_frame(bool value) { has_frame_ = value; }
  bool has_frame() const { return has_frame_; }

  Isolate* isolate() const { return isolate_; }

  Handle<HeapObject> CodeObject() {
    DCHECK(!code_object_.is_null());
    return code_object_;
  }

#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
  void macro_name(XMMRegister dst, src_type src) {    \
    if (CpuFeatures::IsSupported(AVX)) {              \
      CpuFeatureScope scope(this, AVX);               \
      v##name(dst, dst, src);                         \
    } else {                                          \
      name(dst, src);                                 \
    }                                                 \
  }
#define AVX_OP2_X(macro_name, name) \
  AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
#define AVX_OP2_O(macro_name, name) \
  AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
#define AVX_OP2_XO(macro_name, name) \
  AVX_OP2_X(macro_name, name)        \
  AVX_OP2_O(macro_name, name)

  AVX_OP2_XO(Subsd, subsd)
  AVX_OP2_XO(Divss, divss)
  AVX_OP2_XO(Divsd, divsd)
  AVX_OP2_XO(Xorpd, xorpd)
  AVX_OP2_X(Pcmpeqd, pcmpeqd)
  AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
  AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)

#undef AVX_OP2_O
#undef AVX_OP2_X
#undef AVX_OP2_XO
#undef AVX_OP2_WITH_TYPE

  void Xorps(XMMRegister dst, XMMRegister src);
  void Xorps(XMMRegister dst, const Operand& src);

  void Movd(XMMRegister dst, Register src);
  void Movd(XMMRegister dst, const Operand& src);
  void Movd(Register dst, XMMRegister src);
  void Movq(XMMRegister dst, Register src);
  void Movq(Register dst, XMMRegister src);

  void Movsd(XMMRegister dst, XMMRegister src);
  void Movsd(XMMRegister dst, const Operand& src);
  void Movsd(const Operand& dst, XMMRegister src);
  void Movss(XMMRegister dst, XMMRegister src);
  void Movss(XMMRegister dst, const Operand& src);
  void Movss(const Operand& dst, XMMRegister src);

  void PushReturnAddressFrom(Register src) { pushq(src); }
  void PopReturnAddressTo(Register dst) { popq(dst); }

  void Ret();

  // Return and drop arguments from stack, where the number of arguments
  // may be bigger than 2^16 - 1.  Requires a scratch register.
  void Ret(int bytes_dropped, Register scratch);

  // Load a register with a long value as efficiently as possible.
  void Set(Register dst, int64_t x);
  void Set(const Operand& dst, intptr_t x);

  // Operations on roots in the root-array.
  void LoadRoot(Register destination, Heap::RootListIndex index);
  void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
    LoadRoot(kScratchRegister, index);
    movp(destination, kScratchRegister);
  }

  void Movups(XMMRegister dst, XMMRegister src);
  void Movups(XMMRegister dst, const Operand& src);
  void Movups(const Operand& dst, XMMRegister src);
  void Movapd(XMMRegister dst, XMMRegister src);
  void Movaps(XMMRegister dst, XMMRegister src);
  void Movmskpd(Register dst, XMMRegister src);
  void Movmskps(Register dst, XMMRegister src);

  void Push(Register src);
  void Push(const Operand& src);
  void Push(Immediate value);
  void Push(Smi* smi);
  void Push(Handle<HeapObject> source);

  // Before calling a C-function from generated code, align arguments on stack.
  // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
  // etc., not pushed. The argument count assumes all arguments are word sized.
  // The number of slots reserved for arguments depends on platform. On Windows
  // stack slots are reserved for the arguments passed in registers. On other
  // platforms stack slots are only reserved for the arguments actually passed
  // on the stack.
  void PrepareCallCFunction(int num_arguments);

  // Calls a C function and cleans up the space for arguments allocated
  // by PrepareCallCFunction. The called function is not allowed to trigger a
  // garbage collection, since that might move the code and invalidate the
  // return address (unless this is somehow accounted for by the called
  // function).
  void CallCFunction(ExternalReference function, int num_arguments);
  void CallCFunction(Register function, int num_arguments);

  // Calculate the number of stack slots to reserve for arguments when calling a
  // C function.
  int ArgumentStackSlotsForCFunctionCall(int num_arguments);

  void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
                     Label* condition_met,
                     Label::Distance condition_met_distance = Label::kFar);

  void Cvtss2sd(XMMRegister dst, XMMRegister src);
  void Cvtss2sd(XMMRegister dst, const Operand& src);
  void Cvtsd2ss(XMMRegister dst, XMMRegister src);
  void Cvtsd2ss(XMMRegister dst, const Operand& src);
  void Cvttsd2si(Register dst, XMMRegister src);
  void Cvttsd2si(Register dst, const Operand& src);
  void Cvttsd2siq(Register dst, XMMRegister src);
  void Cvttsd2siq(Register dst, const Operand& src);
  void Cvttss2si(Register dst, XMMRegister src);
  void Cvttss2si(Register dst, const Operand& src);
  void Cvttss2siq(Register dst, XMMRegister src);
  void Cvttss2siq(Register dst, const Operand& src);
  void Cvtqsi2ss(XMMRegister dst, Register src);
  void Cvtqsi2ss(XMMRegister dst, const Operand& src);
  void Cvtqsi2sd(XMMRegister dst, Register src);
  void Cvtqsi2sd(XMMRegister dst, const Operand& src);
  void Cvtlsi2ss(XMMRegister dst, Register src);
  void Cvtlsi2ss(XMMRegister dst, const Operand& src);
  void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
  void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);

  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
  // hinders register renaming and makes dependence chains longer. So we use
  // xorpd to clear the dst register before cvtsi2sd to solve this issue.
  void Cvtlsi2sd(XMMRegister dst, Register src);
  void Cvtlsi2sd(XMMRegister dst, const Operand& src);

  void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
  void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);

  void Sqrtsd(XMMRegister dst, XMMRegister src);
  void Sqrtsd(XMMRegister dst, const Operand& src);

  void Ucomiss(XMMRegister src1, XMMRegister src2);
  void Ucomiss(XMMRegister src1, const Operand& src2);
  void Ucomisd(XMMRegister src1, XMMRegister src2);
  void Ucomisd(XMMRegister src1, const Operand& src2);

  void Lzcntq(Register dst, Register src);
  void Lzcntq(Register dst, const Operand& src);
  void Lzcntl(Register dst, Register src);
  void Lzcntl(Register dst, const Operand& src);
  void Tzcntq(Register dst, Register src);
  void Tzcntq(Register dst, const Operand& src);
  void Tzcntl(Register dst, Register src);
  void Tzcntl(Register dst, const Operand& src);
  void Popcntl(Register dst, Register src);
  void Popcntl(Register dst, const Operand& src);
  void Popcntq(Register dst, Register src);
  void Popcntq(Register dst, const Operand& src);

  // Is the value a tagged smi.
  Condition CheckSmi(Register src);
  Condition CheckSmi(const Operand& src);

  // Jump to label if the value is a tagged smi.
  void JumpIfSmi(Register src, Label* on_smi,
                 Label::Distance near_jump = Label::kFar);

  void Move(Register dst, Smi* source);

  void Move(const Operand& dst, Smi* source) {
    Register constant = GetSmiConstant(source);
    movp(dst, constant);
  }

  void Move(Register dst, ExternalReference ext) {
    movp(dst, reinterpret_cast<void*>(ext.address()),
         RelocInfo::EXTERNAL_REFERENCE);
  }

  void Move(XMMRegister dst, uint32_t src);
  void Move(XMMRegister dst, uint64_t src);
  void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
  void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }

  // Move if the registers are not identical.
  void Move(Register target, Register source);

  void Move(Register dst, Handle<HeapObject> source,
            RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
  void Move(const Operand& dst, Handle<HeapObject> source,
            RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);

  // Loads a pointer into a register with a relocation mode.
  void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
    // This method must not be used with heap object references. The stored
    // address is not GC safe. Use the handle version instead.
    DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
    movp(dst, ptr, rmode);
  }

  // Convert smi to 32-bit integer. I.e., not sign extended into
  // high 32 bits of destination.
  void SmiToInteger32(Register dst, Register src);
  void SmiToInteger32(Register dst, const Operand& src);

  // Loads the address of the external reference into the destination
  // register.
  void LoadAddress(Register destination, ExternalReference source);

  void Call(const Operand& op);
  void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
  void Call(Address destination, RelocInfo::Mode rmode);
  void Call(ExternalReference ext);
  void Call(Label* target) { call(target); }

309 310 311 312
  void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
    call(target, rmode);
  }

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
  // The size of the code generated for different call instructions.
  int CallSize(ExternalReference ext);
  int CallSize(Address destination) { return kCallSequenceLength; }
  int CallSize(Handle<Code> code_object) {
    // Code calls use 32-bit relative addressing.
    return kShortCallInstructionLength;
  }
  int CallSize(Register target) {
    // Opcode: REX_opt FF /2 m64
    return (target.high_bit() != 0) ? 3 : 2;
  }
  int CallSize(const Operand& target) {
    // Opcode: REX_opt FF /2 m64
    return (target.requires_rex() ? 2 : 1) + target.operand_size();
  }

  // Returns the size of the code generated by LoadAddress.
  // Used by CallSize(ExternalReference) to find the size of a call.
  int LoadAddressSize(ExternalReference source);

  // Non-SSE2 instructions.
  void Pextrd(Register dst, XMMRegister src, int8_t imm8);
  void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
  void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);

  void CompareRoot(Register with, Heap::RootListIndex index);
  void CompareRoot(const Operand& with, Heap::RootListIndex index);

  // Generates function and stub prologue code.
  void StubPrologue(StackFrame::Type type);
343
  void Prologue();
344 345 346 347 348

  // Calls Abort(msg) if the condition cc is not satisfied.
  // Use --debug_code to enable.
  void Assert(Condition cc, BailoutReason reason);

349 350 351 352
  // Like Assert(), but without condition.
  // Use --debug_code to enable.
  void AssertUnreachable(BailoutReason reason);

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
  // Abort execution if a 64 bit register containing a 32 bit payload does not
  // have zeros in the top 32 bits, enabled via --debug-code.
  void AssertZeroExtended(Register reg);

  // Like Assert(), but always enabled.
  void Check(Condition cc, BailoutReason reason);

  // Print a message to stdout and abort execution.
  void Abort(BailoutReason msg);

  // Check that the stack is aligned.
  void CheckStackAlignment();

  // Activation support.
  void EnterFrame(StackFrame::Type type);
  void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
    // Out-of-line constant pool not implemented on x64.
    UNREACHABLE();
  }
  void LeaveFrame(StackFrame::Type type);

  // Removes current frame and its arguments from the stack preserving
  // the arguments and a return address pushed to the stack for the next call.
  // |ra_state| defines whether return address is already pushed to stack or
  // not. Both |callee_args_count| and |caller_args_count_reg| do not include
  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
  // is trashed.
  void PrepareForTailCall(const ParameterCount& callee_args_count,
                          Register caller_args_count_reg, Register scratch0,
                          Register scratch1, ReturnAddressState ra_state);

  inline bool AllowThisStubCall(CodeStub* stub);

  // Call a code stub. This expects {stub} to be zone-allocated, as it does not
  // trigger generation of the stub's code object but instead files a
  // HeapObjectRequest that will be fulfilled after code assembly.
  void CallStubDelayed(CodeStub* stub);

  void SlowTruncateToIDelayed(Zone* zone, Register result_reg,
                              Register input_reg,
                              int offset = HeapNumber::kValueOffset -
                                           kHeapObjectTag);

  // Call a runtime routine.
  void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
                          SaveFPRegsMode save_doubles = kDontSaveFPRegs);

  void InitializeRootRegister() {
    ExternalReference roots_array_start =
        ExternalReference::roots_array_start(isolate());
    Move(kRootRegister, roots_array_start);
    addp(kRootRegister, Immediate(kRootRegisterBias));
  }

407 408 409 410 411 412 413
  void SaveRegisters(RegList registers);
  void RestoreRegisters(RegList registers);

  void CallRecordWriteStub(Register object, Register address,
                           RememberedSetAction remembered_set_action,
                           SaveFPRegsMode fp_mode);

414 415 416
  void MoveNumber(Register dst, double value);
  void MoveNonSmi(Register dst, double value);

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
  // Calculate how much stack space (in bytes) are required to store caller
  // registers excluding those specified in the arguments.
  int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
                                      Register exclusion1 = no_reg,
                                      Register exclusion2 = no_reg,
                                      Register exclusion3 = no_reg) const;

  // PushCallerSaved and PopCallerSaved do not arrange the registers in any
  // particular order so they are not useful for calls that can cause a GC.
  // The caller can exclude up to 3 registers that do not need to be saved and
  // restored.

  // Push caller saved registers on the stack, and return the number of bytes
  // stack pointer is adjusted.
  int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
432 433
                      Register exclusion2 = no_reg,
                      Register exclusion3 = no_reg);
434 435 436 437 438
  // Restore caller saved registers from the stack, and return the number of
  // bytes stack pointer is adjusted.
  int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
                     Register exclusion2 = no_reg,
                     Register exclusion3 = no_reg);
439

440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
 protected:
  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
  int smi_count = 0;
  int heap_object_count = 0;

  bool root_array_available_ = true;

  int64_t RootRegisterDelta(ExternalReference other);

  // Returns a register holding the smi value. The register MUST NOT be
  // modified. It may be the "smi 1 constant" register.
  Register GetSmiConstant(Smi* value);

 private:
  bool has_frame_ = false;
  // This handle will be patched with the code object on installation.
  Handle<HeapObject> code_object_;
  Isolate* const isolate_;
};
459

460
// MacroAssembler implements a collection of frequently used macros.
461
class MacroAssembler : public TurboAssembler {
462
 public:
463 464
  MacroAssembler(Isolate* isolate, void* buffer, int size,
                 CodeObjectRequired create_code_object);
465

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
  // Prevent the use of the RootArray during the lifetime of this
  // scope object.
  class NoRootArrayScope BASE_EMBEDDED {
   public:
    explicit NoRootArrayScope(MacroAssembler* assembler)
        : variable_(&assembler->root_array_available_),
          old_value_(assembler->root_array_available_) {
      assembler->root_array_available_ = false;
    }
    ~NoRootArrayScope() {
      *variable_ = old_value_;
    }
   private:
    bool* variable_;
    bool old_value_;
  };

  // Operand pointing to an external reference.
  // May emit code to set up the scratch register. The operand is
  // only guaranteed to be correct as long as the scratch register
  // isn't changed.
  // If the operand is used more than once, use a scratch register
  // that is guaranteed not to be clobbered.
  Operand ExternalOperand(ExternalReference reference,
                          Register scratch = kScratchRegister);
  // Loads and stores the value of an external reference.
  // Special case code for load and store to take advantage of
  // load_rax/store_rax if possible/necessary.
  // For other operations, just use:
  //   Operand operand = ExternalOperand(extref);
  //   operation(operand, ..);
  void Load(Register destination, ExternalReference source);
  void Store(ExternalReference destination, Register source);
499

500 501
  // Pushes the address of the external reference onto the stack.
  void PushAddress(ExternalReference source);
502 503

  // Operations on roots in the root-array.
504 505 506
  // Load a root value where the index (or part of it) is variable.
  // The variable_offset register is added to the fixed_offset value
  // to get the index into the root-array.
507 508
  void PushRoot(Heap::RootListIndex index);

509 510
  // Compare the object in a register to a value and jump if they are equal.
  void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
511
                  Label::Distance if_equal_distance = Label::kFar) {
512 513 514
    CompareRoot(with, index);
    j(equal, if_equal, if_equal_distance);
  }
515 516
  void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
                  Label* if_equal,
517
                  Label::Distance if_equal_distance = Label::kFar) {
518 519 520
    CompareRoot(with, index);
    j(equal, if_equal, if_equal_distance);
  }
521 522 523 524

  // Compare the object in a register to a value and jump if they are not equal.
  void JumpIfNotRoot(Register with, Heap::RootListIndex index,
                     Label* if_not_equal,
525
                     Label::Distance if_not_equal_distance = Label::kFar) {
526 527 528
    CompareRoot(with, index);
    j(not_equal, if_not_equal, if_not_equal_distance);
  }
529 530
  void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
                     Label* if_not_equal,
531
                     Label::Distance if_not_equal_distance = Label::kFar) {
532 533 534
    CompareRoot(with, index);
    j(not_equal, if_not_equal, if_not_equal_distance);
  }
535

536 537 538 539 540 541 542 543

// ---------------------------------------------------------------------------
// GC Support


  // Record in the remembered set the fact that we have a pointer to new space
  // at the address pointed to by the addr register.  Only works if addr is not
  // in new space.
544
  void RememberedSetHelper(Register object,  // Used for debug code.
545 546
                           Register addr, Register scratch,
                           SaveFPRegsMode save_fp);
547 548

  // Check if object is in new space.  Jumps if the object is not in new space.
549
  // The register scratch can be object itself, but scratch will be clobbered.
550 551 552 553
  void JumpIfNotInNewSpace(Register object,
                           Register scratch,
                           Label* branch,
                           Label::Distance distance = Label::kFar) {
554
    InNewSpace(object, scratch, zero, branch, distance);
555 556 557 558 559 560 561 562
  }

  // Check if object is in new space.  Jumps if the object is in new space.
  // The register scratch can be object itself, but it will be clobbered.
  void JumpIfInNewSpace(Register object,
                        Register scratch,
                        Label* branch,
                        Label::Distance distance = Label::kFar) {
563
    InNewSpace(object, scratch, not_zero, branch, distance);
564 565 566
  }

  // Check if an object has the black incremental marking color.  Also uses rcx!
567 568 569
  void JumpIfBlack(Register object, Register bitmap_scratch,
                   Register mask_scratch, Label* on_black,
                   Label::Distance on_black_distance);
570

hpayer's avatar
hpayer committed
571 572 573 574
  // Checks the color of an object.  If the object is white we jump to the
  // incremental marker.
  void JumpIfWhite(Register value, Register scratch1, Register scratch2,
                   Label* value_is_white, Label::Distance distance);
575 576 577 578 579 580 581

  // Notify the garbage collector that we wrote a pointer into an object.
  // |object| is the object being stored into, |value| is the object being
  // stored.  value and scratch registers are clobbered by the operation.
  // The offset is the offset from the start of the object, not the offset from
  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
  void RecordWriteField(
582
      Register object, int offset, Register value, Register scratch,
583 584
      SaveFPRegsMode save_fp,
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
585
      SmiCheck smi_check = INLINE_SMI_CHECK);
586

587
  // For page containing |object| mark region covering |address|
588
  // dirty. |object| is the object being stored into, |value| is the
589
  // object being stored. The address and value registers are clobbered by the
590 591
  // operation.  RecordWrite filters out smis so it does not update
  // the write barrier if the value is a smi.
592
  void RecordWrite(
593
      Register object, Register address, Register value, SaveFPRegsMode save_fp,
594
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
595
      SmiCheck smi_check = INLINE_SMI_CHECK);
596

597
  // Frame restart support.
598
  void MaybeDropFrames();
599

600 601
  // Enter specific kind of exit frame; either in normal or
  // debug mode. Expects the number of arguments in register rax and
602 603
  // sets up the number of arguments in register rdi and the pointer
  // to the first argument in register rsi.
604 605 606
  //
  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
  // accessible via StackSpaceOperand.
607 608
  void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false,
                      StackFrame::Type frame_type = StackFrame::EXIT);
609

610 611 612
  // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
  // memory (not GCed) on the stack accessible via StackSpaceOperand.
  void EnterApiExitFrame(int arg_stack_space);
613

614 615
  // Leave the current exit frame. Expects/provides the return value in
  // register rax:rdx (untouched) and the pointer to the first
616 617
  // argument in register rsi (if pop_arguments == true).
  void LeaveExitFrame(bool save_doubles = false, bool pop_arguments = true);
618

619 620
  // Leave the current exit frame. Expects/provides the return value in
  // register rax (untouched).
621
  void LeaveApiExitFrame(bool restore_context);
622

623
  // Push and pop the registers that can hold pointers.
624 625
  void PushSafepointRegisters() { Pushad(); }
  void PopSafepointRegisters() { Popad(); }
626

627 628 629 630
  // ---------------------------------------------------------------------------
  // JavaScript invokes

  // Invoke the JavaScript function code by either calling or jumping.
631 632
  void InvokeFunctionCode(Register function, Register new_target,
                          const ParameterCount& expected,
633
                          const ParameterCount& actual, InvokeFlag flag);
634

635 636 637 638
  // On function call, call into the debugger if necessary.
  void CheckDebugHook(Register fun, Register new_target,
                      const ParameterCount& expected,
                      const ParameterCount& actual);
639

640 641
  // Invoke the JavaScript function in the given register. Changes the
  // current context to the context in the function before invoking.
642 643 644 645
  void InvokeFunction(Register function, Register new_target,
                      const ParameterCount& actual, InvokeFlag flag);

  void InvokeFunction(Register function, Register new_target,
646
                      const ParameterCount& expected,
647
                      const ParameterCount& actual, InvokeFlag flag);
648

649
  void InvokeFunction(Handle<JSFunction> function,
650
                      const ParameterCount& expected,
651
                      const ParameterCount& actual, InvokeFlag flag);
652

653 654 655 656
  // ---------------------------------------------------------------------------
  // Conversions between tagged smi values and non-tagged integer values.

  // Tag an integer value. The result must be known to be a valid smi value.
657
  // Only uses the low 32 bits of the src register. Sets the N and Z flags
658
  // based on the value of the resulting smi.
659 660 661 662
  void Integer32ToSmi(Register dst, Register src);

  // Convert smi to 64-bit integer (sign extended if necessary).
  void SmiToInteger64(Register dst, Register src);
663
  void SmiToInteger64(Register dst, const Operand& src);
664

665 666 667 668 669 670
  // Convert smi to double.
  void SmiToDouble(XMMRegister dst, Register src) {
    SmiToInteger32(kScratchRegister, src);
    Cvtlsi2sd(dst, kScratchRegister);
  }

671 672 673 674 675 676
  // Multiply a positive smi's integer value by a power of two.
  // Provides result as 64-bit integer value.
  void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
                                             Register src,
                                             int power);

677 678 679
  // Simple comparison of smis.  Both sides must be known smis to use these,
  // otherwise use Cmp.
  void SmiCompare(Register smi1, Register smi2);
680
  void SmiCompare(Register dst, Smi* src);
681
  void SmiCompare(Register dst, const Operand& src);
682 683
  void SmiCompare(const Operand& dst, Register src);
  void SmiCompare(const Operand& dst, Smi* src);
684
  // Compare the int32 in src register to the value of the smi stored at dst.
685 686
  void SmiTest(Register src);

687 688 689
  // Functions performing a check on a known or potential smi. Returns
  // a condition that is satisfied if the check is successful.

690
  // Are both values tagged smis.
691 692
  Condition CheckBothSmi(Register first, Register second);

693
  // Are either value a tagged smi.
694 695 696
  Condition CheckEitherSmi(Register first,
                           Register second,
                           Register scratch = kScratchRegister);
697 698 699 700
  // Test-and-jump functions. Typically combines a check function
  // above with a conditional jump.

  // Jump to label if the value is not a tagged smi.
701 702 703
  void JumpIfNotSmi(Register src,
                    Label* on_not_smi,
                    Label::Distance near_jump = Label::kFar);
704 705 706 707

  // Jump to label if the value is not a tagged smi.
  void JumpIfNotSmi(Operand src, Label* on_not_smi,
                    Label::Distance near_jump = Label::kFar);
708 709

  // Jump if either or both register are not smi values.
710 711
  void JumpIfNotBothSmi(Register src1,
                        Register src2,
712 713
                        Label* on_not_both_smi,
                        Label::Distance near_jump = Label::kFar);
714 715 716 717 718 719

  // Operations on tagged smi values.

  // Smis represent a subset of integers. The subset is always equivalent to
  // a two's complement interpretation of a fixed number of bits.

720 721 722 723
  // Add an integer constant to a tagged smi, giving a tagged smi as result.
  // No overflow testing on the result is done.
  void SmiAddConstant(Register dst, Register src, Smi* constant);

724 725 726 727
  // Add an integer constant to a tagged smi, giving a tagged smi as result.
  // No overflow testing on the result is done.
  void SmiAddConstant(const Operand& dst, Smi* constant);

728 729
  // Add an integer constant to a tagged smi, giving a tagged smi as result,
  // or jumping to a label if the result cannot be represented by a smi.
730 731
  void SmiAddConstant(Register dst, Register src, Smi* constant,
                      SmiOperationConstraints constraints, Label* bailout_label,
732
                      Label::Distance near_jump = Label::kFar);
733

734
  // Subtract an integer constant from a tagged smi, giving a tagged smi as
735 736
  // result. No testing on the result is done. Sets the N and Z flags
  // based on the value of the resulting integer.
737 738
  void SmiSubConstant(Register dst, Register src, Smi* constant);

739 740
  // Subtract an integer constant from a tagged smi, giving a tagged smi as
  // result, or jumping to a label if the result cannot be represented by a smi.
741 742
  void SmiSubConstant(Register dst, Register src, Smi* constant,
                      SmiOperationConstraints constraints, Label* bailout_label,
743
                      Label::Distance near_jump = Label::kFar);
744 745

  // Adds smi values and return the result as a smi.
746 747
  // If dst is src1, then src1 will be destroyed if the operation is
  // successful, otherwise kept intact.
748 749 750
  void SmiAdd(Register dst,
              Register src1,
              Register src2,
751 752
              Label* on_not_smi_result,
              Label::Distance near_jump = Label::kFar);
753 754 755
  void SmiAdd(Register dst,
              Register src1,
              const Operand& src2,
756 757
              Label* on_not_smi_result,
              Label::Distance near_jump = Label::kFar);
758 759 760 761

  void SmiAdd(Register dst,
              Register src1,
              Register src2);
762

763
  // Subtracts smi values and return the result as a smi.
764 765
  // If dst is src1, then src1 will be destroyed if the operation is
  // successful, otherwise kept intact.
766 767 768
  void SmiSub(Register dst,
              Register src1,
              Register src2,
769 770
              Label* on_not_smi_result,
              Label::Distance near_jump = Label::kFar);
771 772
  void SmiSub(Register dst,
              Register src1,
773
              const Operand& src2,
774 775
              Label* on_not_smi_result,
              Label::Distance near_jump = Label::kFar);
776

777 778 779 780
  void SmiSub(Register dst,
              Register src1,
              Register src2);

781 782 783
  void SmiSub(Register dst,
              Register src1,
              const Operand& src2);
784

785 786 787 788 789 790 791
  // Specialized operations

  // Select the non-smi register of two registers where exactly one is a
  // smi. If neither are smis, jump to the failure label.
  void SelectNonSmi(Register dst,
                    Register src1,
                    Register src2,
792 793
                    Label* on_not_smis,
                    Label::Distance near_jump = Label::kFar);
794

795 796 797 798 799 800 801 802 803 804
  // Converts, if necessary, a smi to a combination of number and
  // multiplier to be used as a scaled index.
  // The src register contains a *positive* smi value. The shift is the
  // power of two to multiply the index value by (e.g.
  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
  // The returned index register may be either src or dst, depending
  // on what is most efficient. If src and dst are different registers,
  // src is always unchanged.
  SmiIndex SmiToIndex(Register dst, Register src, int shift);

805 806
  // ---------------------------------------------------------------------------
  // String macros.
807

808
  // Checks if the given register or operand is a unique name
809 810 811 812
  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
                                       Label::Distance distance = Label::kFar);
  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
                                       Label::Distance distance = Label::kFar);
813

814
  // ---------------------------------------------------------------------------
815
  // Macro instructions.
816

817 818 819 820
  // Load/store with specific representation.
  void Load(Register dst, const Operand& src, Representation r);
  void Store(const Operand& dst, Register src, Representation r);

821
  void Cmp(Register dst, Handle<Object> source);
822
  void Cmp(const Operand& dst, Handle<Object> source);
823 824
  void Cmp(Register dst, Smi* src);
  void Cmp(const Operand& dst, Smi* src);
825

826 827
  void GetWeakValue(Register value, Handle<WeakCell> cell);

828 829 830 831
  // Load the value of the weak cell in the value register. Branch to the given
  // miss label if the weak cell was cleared.
  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);

832 833 834 835 836 837 838 839 840 841 842 843
  // Emit code that loads |parameter_index|'th parameter from the stack to
  // the register according to the CallInterfaceDescriptor definition.
  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
  // below the caller's sp (on x64 it's at least return address).
  template <class Descriptor>
  void LoadParameterFromStack(
      Register reg, typename Descriptor::ParameterIndices parameter_index,
      int sp_to_ra_offset_in_words = 1) {
    DCHECK(Descriptor::kPassLastArgsOnStack);
    UNIMPLEMENTED();
  }

844 845
  // Emit code to discard a non-negative number of pointer-sized elements
  // from the stack, clobbering only the rsp register.
846
  void Drop(int stack_elements);
847 848 849 850 851
  // Emit code to discard a positive number of pointer-sized elements
  // from the stack under the return address which remains on the top,
  // clobbering the rsp register.
  void DropUnderReturnAddress(int stack_elements,
                              Register scratch = kScratchRegister);
852

853
  void PushQuad(const Operand& src);
854 855 856
  void PushImm32(int32_t imm32);
  void Pop(Register dst);
  void Pop(const Operand& dst);
857
  void PopQuad(const Operand& dst);
858

859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
  void macro_name(XMMRegister dst, src_type src) {    \
    if (CpuFeatures::IsSupported(AVX)) {              \
      CpuFeatureScope scope(this, AVX);               \
      v##name(dst, dst, src);                         \
    } else {                                          \
      name(dst, src);                                 \
    }                                                 \
  }
#define AVX_OP2_X(macro_name, name) \
  AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
#define AVX_OP2_O(macro_name, name) \
  AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
#define AVX_OP2_XO(macro_name, name) \
  AVX_OP2_X(macro_name, name)        \
  AVX_OP2_O(macro_name, name)

  AVX_OP2_XO(Addsd, addsd)
  AVX_OP2_XO(Mulsd, mulsd)
878 879 880
  AVX_OP2_XO(Andps, andps)
  AVX_OP2_XO(Andpd, andpd)
  AVX_OP2_XO(Orpd, orpd)
881 882 883 884 885 886 887 888 889 890 891 892
  AVX_OP2_XO(Cmpeqps, cmpeqps)
  AVX_OP2_XO(Cmpltps, cmpltps)
  AVX_OP2_XO(Cmpleps, cmpleps)
  AVX_OP2_XO(Cmpneqps, cmpneqps)
  AVX_OP2_XO(Cmpnltps, cmpnltps)
  AVX_OP2_XO(Cmpnleps, cmpnleps)
  AVX_OP2_XO(Cmpeqpd, cmpeqpd)
  AVX_OP2_XO(Cmpltpd, cmpltpd)
  AVX_OP2_XO(Cmplepd, cmplepd)
  AVX_OP2_XO(Cmpneqpd, cmpneqpd)
  AVX_OP2_XO(Cmpnltpd, cmpnltpd)
  AVX_OP2_XO(Cmpnlepd, cmpnlepd)
893 894 895 896 897 898

#undef AVX_OP2_O
#undef AVX_OP2_X
#undef AVX_OP2_XO
#undef AVX_OP2_WITH_TYPE

899 900 901 902 903 904 905
  // ---------------------------------------------------------------------------
  // SIMD macros.
  void Absps(XMMRegister dst);
  void Negps(XMMRegister dst);
  void Abspd(XMMRegister dst);
  void Negpd(XMMRegister dst);

906 907 908
  // Control Flow
  void Jump(Address destination, RelocInfo::Mode rmode);
  void Jump(ExternalReference ext);
909
  void Jump(const Operand& op);
910 911
  void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);

912 913 914
  // Non-x64 instructions.
  // Push/pop all general purpose registers.
  // Does not push rsp/rbp nor any of the assembler's special purpose registers
bmeurer's avatar
bmeurer committed
915
  // (kScratchRegister, kRootRegister).
916 917 918
  void Pushad();
  void Popad();

919
  // Compare object type for heap object.
920
  // Always use unsigned comparisons: above and below, not less and greater.
921
  // Incoming register is heap_object and outgoing register is map.
922
  // They may be the same register, and may be kScratchRegister.
923 924 925
  void CmpObjectType(Register heap_object, InstanceType type, Register map);

  // Compare instance type for map.
926
  // Always use unsigned comparisons: above and below, not less and greater.
927 928
  void CmpInstanceType(Register map, InstanceType type);

929
  void DoubleToI(Register result_reg, XMMRegister input_reg,
930 931 932
                 XMMRegister scratch, MinusZeroMode minus_zero_mode,
                 Label* lost_precision, Label* is_nan, Label* minus_zero,
                 Label::Distance dst = Label::kFar);
933

934
  void LoadInstanceDescriptors(Register map, Register descriptors);
935 936
  void LoadAccessor(Register dst, Register holder, int accessor_index,
                    AccessorComponent accessor);
937 938 939

  template<typename Field>
  void DecodeField(Register reg) {
940
    static const int shift = Field::kShift;
941
    static const int mask = Field::kMask >> Field::kShift;
942 943 944
    if (shift != 0) {
      shrp(reg, Immediate(shift));
    }
945
    andp(reg, Immediate(mask));
946
  }
947

948 949
  // Abort execution if argument is a smi, enabled via --debug-code.
  void AssertNotSmi(Register object);
950

951 952 953
  // Abort execution if argument is not a smi, enabled via --debug-code.
  void AssertSmi(Register object);
  void AssertSmi(const Operand& object);
954

955 956 957
  // Abort execution if argument is not a FixedArray, enabled via --debug-code.
  void AssertFixedArray(Register object);

958 959 960
  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
  void AssertFunction(Register object);

961 962 963 964
  // Abort execution if argument is not a JSBoundFunction,
  // enabled via --debug-code.
  void AssertBoundFunction(Register object);

965
  // Abort execution if argument is not a JSGeneratorObject (or subclass),
966
  // enabled via --debug-code.
967
  void AssertGeneratorObject(Register object);
968

969 970 971 972
  // Abort execution if argument is not undefined or an AllocationSite, enabled
  // via --debug-code.
  void AssertUndefinedOrAllocationSite(Register object);

973 974 975
  // ---------------------------------------------------------------------------
  // Exception handling

976 977
  // Push a new stack handler and link it into stack handler chain.
  void PushStackHandler();
978

979 980
  // Unlink the stack handler on top of the stack from the stack handler chain.
  void PopStackHandler();
981 982 983 984

  // ---------------------------------------------------------------------------
  // Support functions.

985 986 987 988
  // Machine code version of Map::GetConstructor().
  // |temp| holds |result|'s map when done.
  void GetMapConstructor(Register result, Register map, Register temp);

989
  // Load the global proxy from the current context.
990 991 992
  void LoadGlobalProxy(Register dst) {
    LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
  }
993

994 995
  // Load the native context slot with the current index.
  void LoadNativeContextSlot(int index, Register dst);
996

997 998 999 1000
  // ---------------------------------------------------------------------------
  // Runtime calls

  // Call a code stub.
1001 1002
  // The code object is generated immediately, in contrast to
  // TurboAssembler::CallStubDelayed.
1003
  void CallStub(CodeStub* stub);
1004

1005 1006 1007
  // Tail call a code stub (jump).
  void TailCallStub(CodeStub* stub);

1008
  // Call a runtime routine.
1009 1010 1011
  void CallRuntime(const Runtime::Function* f,
                   int num_arguments,
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1012 1013

  // Convenience function: Same as above, but takes the fid instead.
1014 1015 1016 1017 1018 1019 1020 1021
  void CallRuntime(Runtime::FunctionId fid,
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
    const Runtime::Function* function = Runtime::FunctionForId(fid);
    CallRuntime(function, function->nargs, save_doubles);
  }

  // Convenience function: Same as above, but takes the fid instead.
  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1022
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1023
    CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1024
  }
1025

1026 1027
  // Convenience function: tail call a runtime routine (jump)
  void TailCallRuntime(Runtime::FunctionId fid);
1028

1029
  // Jump to a runtime routines
1030 1031
  void JumpToExternalReference(const ExternalReference& ext,
                               bool builtin_exit_frame = false);
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041

  // ---------------------------------------------------------------------------
  // StatsCounter support
  void IncrementCounter(StatsCounter* counter, int value);
  void DecrementCounter(StatsCounter* counter, int value);


  // ---------------------------------------------------------------------------
  // Debugging

1042 1043 1044 1045
  static int SafepointRegisterStackIndex(Register reg) {
    return SafepointRegisterStackIndex(reg.code());
  }

1046 1047 1048
  void EnterBuiltinFrame(Register context, Register target, Register argc);
  void LeaveBuiltinFrame(Register context, Register target, Register argc);

1049
 private:
1050
  // Order general registers are pushed by Pushad.
bmeurer's avatar
bmeurer committed
1051
  // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
1052
  static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
bmeurer's avatar
bmeurer committed
1053
  static const int kNumSafepointSavedRegisters = 12;
1054

1055 1056
  // Helper functions for generating invokes.
  void InvokePrologue(const ParameterCount& expected,
1057 1058 1059
                      const ParameterCount& actual, Label* done,
                      bool* definitely_mismatches, InvokeFlag flag,
                      Label::Distance near_jump);
1060

1061
  void EnterExitFramePrologue(bool save_rax, StackFrame::Type frame_type);
1062 1063 1064

  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
  // accessible via StackSpaceOperand.
1065
  void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1066

1067
  void LeaveExitFrameEpilogue(bool restore_context);
1068

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
  void InNewSpace(Register object,
                  Register scratch,
                  Condition cc,
                  Label* branch,
                  Label::Distance distance = Label::kFar);

  // Helper for finding the mark bits for an address.  Afterwards, the
  // bitmap register points at the word with the mark bits and the mask
  // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
  // unchanged.
  inline void GetMarkBits(Register addr_reg,
                          Register bitmap_reg,
                          Register mask_reg);
1083 1084 1085 1086 1087 1088

  // Compute memory operands for safepoint stack slots.
  static int SafepointRegisterStackIndex(int reg_code) {
    return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
  }

1089
  // Needs access to SafepointRegisterStackIndex for compiled frame
1090
  // traversal.
1091
  friend class StandardFrame;
1092 1093 1094
};


1095 1096 1097 1098 1099 1100 1101
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
// relocation information. If any of these constraints are violated it causes
// an assertion.
class CodePatcher {
 public:
1102
  CodePatcher(Isolate* isolate, byte* address, int size);
1103
  ~CodePatcher();
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114

  // Macro assembler to emit code.
  MacroAssembler* masm() { return &masm_; }

 private:
  byte* address_;  // The address of the code being patched.
  int size_;  // Number of bytes of the expected patch size.
  MacroAssembler masm_;  // Macro assembler used to generate the code.
};


1115 1116 1117 1118
// -----------------------------------------------------------------------------
// Static helper functions.

// Generate an Operand for loading a field from an object.
1119
inline Operand FieldOperand(Register object, int offset) {
1120 1121 1122 1123 1124
  return Operand(object, offset - kHeapObjectTag);
}


// Generate an Operand for loading an indexed field from an object.
1125 1126 1127 1128
inline Operand FieldOperand(Register object,
                            Register index,
                            ScaleFactor scale,
                            int offset) {
1129 1130 1131 1132
  return Operand(object, index, scale, offset - kHeapObjectTag);
}


1133
inline Operand ContextOperand(Register context, int index) {
1134 1135 1136 1137
  return Operand(context, Context::SlotOffset(index));
}


1138 1139 1140 1141 1142
inline Operand ContextOperand(Register context, Register index) {
  return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
}


1143 1144
inline Operand NativeContextOperand() {
  return ContextOperand(rsi, Context::NATIVE_CONTEXT_INDEX);
1145 1146 1147
}


1148
// Provides access to exit frame stack space (not GCed).
1149
inline Operand StackSpaceOperand(int index) {
1150 1151 1152 1153 1154 1155 1156 1157 1158
#ifdef _WIN64
  const int kShaddowSpace = 4;
  return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
#else
  return Operand(rsp, index * kPointerSize);
#endif
}


1159 1160 1161 1162
inline Operand StackOperandForReturnAddress(int32_t disp) {
  return Operand(rsp, disp);
}

1163 1164
#define ACCESS_MASM(masm) masm->

1165 1166
}  // namespace internal
}  // namespace v8
1167 1168

#endif  // V8_X64_MACRO_ASSEMBLER_X64_H_