macro-assembler-ia32.h 30.9 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
7

8
#include "src/assembler.h"
9
#include "src/bailout-reason.h"
10
#include "src/globals.h"
11
#include "src/ia32/assembler-ia32.h"
12
#include "src/turbo-assembler.h"
13

14 15
namespace v8 {
namespace internal {
16

17
// Give alias names to registers for calling conventions.
18 19 20 21 22 23
constexpr Register kReturnRegister0 = eax;
constexpr Register kReturnRegister1 = edx;
constexpr Register kReturnRegister2 = edi;
constexpr Register kJSFunctionRegister = edi;
constexpr Register kContextRegister = esi;
constexpr Register kAllocateSizeRegister = edx;
24
constexpr Register kSpeculationPoisonRegister = ebx;
25
constexpr Register kInterpreterAccumulatorRegister = eax;
26
constexpr Register kInterpreterBytecodeOffsetRegister = edx;
27 28
constexpr Register kInterpreterBytecodeArrayRegister = edi;
constexpr Register kInterpreterDispatchTableRegister = esi;
29

30
constexpr Register kJavaScriptCallArgCountRegister = eax;
31
constexpr Register kJavaScriptCallCodeStartRegister = ecx;
32
constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
33
constexpr Register kJavaScriptCallNewTargetRegister = edx;
34 35
constexpr Register kJavaScriptCallExtraArg1Register = ebx;

36
constexpr Register kOffHeapTrampolineRegister = ecx;
37 38
constexpr Register kRuntimeCallFunctionRegister = ebx;
constexpr Register kRuntimeCallArgCountRegister = eax;
39
constexpr Register kWasmInstanceRegister = esi;
40

41 42 43 44
// Convenience for platform-independent signatures.  We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;

45 46 47
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };

48
enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
49

50
#ifdef DEBUG
51 52 53
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
                Register reg4 = no_reg, Register reg5 = no_reg,
                Register reg6 = no_reg, Register reg7 = no_reg,
54 55
                Register reg8 = no_reg);
#endif
56

57
class TurboAssembler : public TurboAssemblerBase {
58
 public:
59 60 61 62
  TurboAssembler(Isolate* isolate, const Options& options, void* buffer,
                 int buffer_size, CodeObjectRequired create_code_object)
      : TurboAssemblerBase(isolate, options, buffer, buffer_size,
                           create_code_object) {}
63 64 65 66 67 68 69 70 71 72 73 74 75

  void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
                     Label* condition_met,
                     Label::Distance condition_met_distance = Label::kFar);

  // Activation support.
  void EnterFrame(StackFrame::Type type);
  void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
    // Out-of-line constant pool not implemented on ia32.
    UNREACHABLE();
  }
  void LeaveFrame(StackFrame::Type type);

76 77 78 79 80 81 82 83 84 85 86 87 88
// Allocate a stack frame of given size (i.e. decrement {esp} by the value
// stored in the given register).
#ifdef V8_OS_WIN
  // On win32, take special care if the number of bytes is greater than 4096:
  // Ensure that each page within the new stack frame is touched once in
  // decreasing order. See
  // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
  // Use {bytes_scratch} as scratch register for this procedure.
  void AllocateStackFrame(Register bytes_scratch);
#else
  void AllocateStackFrame(Register bytes) { sub(esp, bytes); }
#endif

89
  // Print a message to stdout and abort execution.
90
  void Abort(AbortReason reason);
91 92 93

  // Calls Abort(msg) if the condition cc is not satisfied.
  // Use --debug_code to enable.
94
  void Assert(Condition cc, AbortReason reason);
95

96 97
  // Like Assert(), but without condition.
  // Use --debug_code to enable.
98
  void AssertUnreachable(AbortReason reason);
99

100
  // Like Assert(), but always enabled.
101
  void Check(Condition cc, AbortReason reason);
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116

  // Check that the stack is aligned.
  void CheckStackAlignment();

  // Nop, because ia32 does not have a root register.
  void InitializeRootRegister() {}

  // Move a constant into a destination using the most efficient encoding.
  void Move(Register dst, const Immediate& x);

  void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }

  // Move if the registers are not identical.
  void Move(Register target, Register source);

117
  void Move(Operand dst, const Immediate& x);
118 119 120 121 122 123 124 125 126

  // Move an immediate into an XMM register.
  void Move(XMMRegister dst, uint32_t src);
  void Move(XMMRegister dst, uint64_t src);
  void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
  void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }

  void Move(Register dst, Handle<HeapObject> handle);

127
  void Call(Register reg) { call(reg); }
128 129 130
  void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
  void Call(Label* target) { call(target); }

131 132 133 134 135
  void RetpolineCall(Register reg);
  void RetpolineCall(Address destination, RelocInfo::Mode rmode);

  void RetpolineJump(Register reg);

136 137 138
  void CallForDeoptimization(Address target, int deopt_id,
                             RelocInfo::Mode rmode) {
    USE(deopt_id);
139 140 141
    call(target, rmode);
  }

142 143 144
  inline bool AllowThisStubCall(CodeStub* stub);
  void CallStubDelayed(CodeStub* stub);

145 146 147
  // Call a runtime routine. This expects {centry} to contain a fitting CEntry
  // builtin for the target runtime function and uses an indirect call.
  void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
148

149 150 151 152 153 154 155 156 157 158 159 160 161
  // Jump the register contains a smi.
  inline void JumpIfSmi(Register value, Label* smi_label,
                        Label::Distance distance = Label::kFar) {
    test(value, Immediate(kSmiTagMask));
    j(zero, smi_label, distance);
  }
  // Jump if the operand is a smi.
  inline void JumpIfSmi(Operand value, Label* smi_label,
                        Label::Distance distance = Label::kFar) {
    test(value, Immediate(kSmiTagMask));
    j(zero, smi_label, distance);
  }

162 163 164 165 166 167 168 169 170 171
  void JumpIfEqual(Register a, int32_t b, Label* dest) {
    cmp(a, Immediate(b));
    j(equal, dest);
  }

  void JumpIfLessThan(Register a, int32_t b, Label* dest) {
    cmp(a, Immediate(b));
    j(less, dest);
  }

172 173
  void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }

174 175 176 177 178 179 180 181
  // Removes current frame and its arguments from the stack preserving the
  // arguments and a return address pushed to the stack for the next call. Both
  // |callee_args_count| and |caller_args_count_reg| do not include receiver.
  // |callee_args_count| is not modified, |caller_args_count_reg| is trashed.
  // |number_of_temp_values_after_return_address| specifies the number of words
  // pushed to the stack after the return address. This is to allow "allocation"
  // of scratch registers that this function requires by saving their values on
  // the stack.
182 183
  void PrepareForTailCall(const ParameterCount& callee_args_count,
                          Register caller_args_count_reg, Register scratch0,
184
                          Register scratch1,
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
                          int number_of_temp_values_after_return_address);

  // Before calling a C-function from generated code, align arguments on stack.
  // After aligning the frame, arguments must be stored in esp[0], esp[4],
  // etc., not pushed. The argument count assumes all arguments are word sized.
  // Some compilers/platforms require the stack to be aligned when calling
  // C++ code.
  // Needs a scratch register to do some arithmetic. This register will be
  // trashed.
  void PrepareCallCFunction(int num_arguments, Register scratch);

  // Calls a C function and cleans up the space for arguments allocated
  // by PrepareCallCFunction. The called function is not allowed to trigger a
  // garbage collection, since that might move the code and invalidate the
  // return address (unless this is somehow accounted for by the called
  // function).
  void CallCFunction(ExternalReference function, int num_arguments);
  void CallCFunction(Register function, int num_arguments);

  void ShlPair(Register high, Register low, uint8_t imm8);
  void ShlPair_cl(Register high, Register low);
  void ShrPair(Register high, Register low, uint8_t imm8);
207
  void ShrPair_cl(Register high, Register low);
208 209 210 211 212
  void SarPair(Register high, Register low, uint8_t imm8);
  void SarPair_cl(Register high, Register low);

  // Generates function and stub prologue code.
  void StubPrologue(StackFrame::Type type);
213
  void Prologue();
214 215

  void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
216
  void Lzcnt(Register dst, Operand src);
217 218

  void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
219
  void Tzcnt(Register dst, Operand src);
220 221

  void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
222
  void Popcnt(Register dst, Operand src);
223 224 225

  void Ret();

226 227
  void LoadRoot(Register destination, Heap::RootListIndex index) override;

228 229 230 231
  // Return and drop arguments from stack, where the number of arguments
  // may be bigger than 2^16 - 1.  Requires a scratch register.
  void Ret(int bytes_dropped, Register scratch);

232 233 234 235
  void Pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
    Pshufhw(dst, Operand(src), shuffle);
  }
  void Pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
236 237 238
  void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
    Pshuflw(dst, Operand(src), shuffle);
  }
239
  void Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
240 241 242
  void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
    Pshufd(dst, Operand(src), shuffle);
  }
243
  void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
244 245 246 247 248 249 250 251 252 253 254 255

// SSE/SSE2 instructions with AVX version.
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
  void macro_name(dst_type dst, src_type src) {                 \
    if (CpuFeatures::IsSupported(AVX)) {                        \
      CpuFeatureScope scope(this, AVX);                         \
      v##name(dst, src);                                        \
    } else {                                                    \
      name(dst, src);                                           \
    }                                                           \
  }

256 257
  AVX_OP2_WITH_TYPE(Rcpps, rcpps, XMMRegister, const Operand&)
  AVX_OP2_WITH_TYPE(Rsqrtps, rsqrtps, XMMRegister, const Operand&)
258 259
  AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, Operand)
  AVX_OP2_WITH_TYPE(Movdqu, movdqu, Operand, XMMRegister)
260
  AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
261
  AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Operand)
262
  AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
263
  AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
264
  AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
265 266 267

#undef AVX_OP2_WITH_TYPE

268 269 270 271 272 273 274 275 276 277 278 279 280
// Only use these macros when non-destructive source of AVX version is not
// needed.
#define AVX_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
  void macro_name(dst_type dst, src_type src) {                 \
    if (CpuFeatures::IsSupported(AVX)) {                        \
      CpuFeatureScope scope(this, AVX);                         \
      v##name(dst, dst, src);                                   \
    } else {                                                    \
      name(dst, src);                                           \
    }                                                           \
  }
#define AVX_OP3_XO(macro_name, name)                            \
  AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
281
  AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
282

283 284
  AVX_OP3_XO(Pcmpeqb, pcmpeqb)
  AVX_OP3_XO(Pcmpeqw, pcmpeqw)
285
  AVX_OP3_XO(Pcmpeqd, pcmpeqd)
286 287
  AVX_OP3_XO(Psubb, psubb)
  AVX_OP3_XO(Psubw, psubw)
288 289
  AVX_OP3_XO(Psubd, psubd)
  AVX_OP3_XO(Pxor, pxor)
290 291
  AVX_OP3_XO(Andps, andps)
  AVX_OP3_XO(Andpd, andpd)
292 293
  AVX_OP3_XO(Xorps, xorps)
  AVX_OP3_XO(Xorpd, xorpd)
294 295
  AVX_OP3_XO(Sqrtss, sqrtss)
  AVX_OP3_XO(Sqrtsd, sqrtsd)
296 297 298 299

#undef AVX_OP3_XO
#undef AVX_OP3_WITH_TYPE

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
// Non-SSE2 instructions.
#define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
                                sse_scope)                            \
  void macro_name(dst_type dst, src_type src) {                       \
    if (CpuFeatures::IsSupported(AVX)) {                              \
      CpuFeatureScope scope(this, AVX);                               \
      v##name(dst, src);                                              \
      return;                                                         \
    }                                                                 \
    if (CpuFeatures::IsSupported(sse_scope)) {                        \
      CpuFeatureScope scope(this, sse_scope);                         \
      name(dst, src);                                                 \
      return;                                                         \
    }                                                                 \
    UNREACHABLE();                                                    \
  }
#define AVX_OP2_XO_SSE4(macro_name, name)                                     \
  AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
  AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)

  AVX_OP2_XO_SSE4(Ptest, ptest)
  AVX_OP2_XO_SSE4(Pmovsxbw, pmovsxbw)
  AVX_OP2_XO_SSE4(Pmovsxwd, pmovsxwd)
  AVX_OP2_XO_SSE4(Pmovzxbw, pmovzxbw)
  AVX_OP2_XO_SSE4(Pmovzxwd, pmovzxwd)

#undef AVX_OP2_WITH_TYPE_SCOPE
#undef AVX_OP2_XO_SSE4
328

329
  void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
330
  void Pshufb(XMMRegister dst, Operand src);
331 332 333 334
  void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
    Pblendw(dst, Operand(src), imm8);
  }
  void Pblendw(XMMRegister dst, Operand src, uint8_t imm8);
335 336

  void Psignb(XMMRegister dst, XMMRegister src) { Psignb(dst, Operand(src)); }
337
  void Psignb(XMMRegister dst, Operand src);
338
  void Psignw(XMMRegister dst, XMMRegister src) { Psignw(dst, Operand(src)); }
339
  void Psignw(XMMRegister dst, Operand src);
340
  void Psignd(XMMRegister dst, XMMRegister src) { Psignd(dst, Operand(src)); }
341
  void Psignd(XMMRegister dst, Operand src);
342

343 344 345 346 347
  void Palignr(XMMRegister dst, XMMRegister src, uint8_t imm8) {
    Palignr(dst, Operand(src), imm8);
  }
  void Palignr(XMMRegister dst, Operand src, uint8_t imm8);

348 349 350 351 352 353 354
  void Pextrb(Register dst, XMMRegister src, int8_t imm8);
  void Pextrw(Register dst, XMMRegister src, int8_t imm8);
  void Pextrd(Register dst, XMMRegister src, int8_t imm8);
  void Pinsrd(XMMRegister dst, Register src, int8_t imm8,
              bool is_64_bits = false) {
    Pinsrd(dst, Operand(src), imm8, is_64_bits);
  }
355
  void Pinsrd(XMMRegister dst, Operand src, int8_t imm8,
356 357 358 359 360 361
              bool is_64_bits = false);

  // Expression support
  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
  // hinders register renaming and makes dependence chains longer. So we use
  // xorps to clear the dst register before cvtsi2sd to solve this issue.
362 363
  void Cvtsi2ss(XMMRegister dst, Register src) { Cvtsi2ss(dst, Operand(src)); }
  void Cvtsi2ss(XMMRegister dst, Operand src);
364
  void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
365
  void Cvtsi2sd(XMMRegister dst, Operand src);
366

367 368 369 370
  void Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
    Cvtui2ss(dst, Operand(src), tmp);
  }
  void Cvtui2ss(XMMRegister dst, Operand src, Register tmp);
371 372 373 374 375 376 377 378 379 380
  void Cvttss2ui(Register dst, XMMRegister src, XMMRegister tmp) {
    Cvttss2ui(dst, Operand(src), tmp);
  }
  void Cvttss2ui(Register dst, Operand src, XMMRegister tmp);
  void Cvtui2sd(XMMRegister dst, Register src) { Cvtui2sd(dst, Operand(src)); }
  void Cvtui2sd(XMMRegister dst, Operand src);
  void Cvttsd2ui(Register dst, XMMRegister src, XMMRegister tmp) {
    Cvttsd2ui(dst, Operand(src), tmp);
  }
  void Cvttsd2ui(Register dst, Operand src, XMMRegister tmp);
381 382

  void Push(Register src) { push(src); }
383
  void Push(Operand src) { push(src); }
384 385 386
  void Push(Immediate value) { push(value); }
  void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
  void Push(Smi* smi) { Push(Immediate(smi)); }
387 388 389 390 391 392 393

  void SaveRegisters(RegList registers);
  void RestoreRegisters(RegList registers);

  void CallRecordWriteStub(Register object, Register address,
                           RememberedSetAction remembered_set_action,
                           SaveFPRegsMode fp_mode);
394

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
  // Calculate how much stack space (in bytes) are required to store caller
  // registers excluding those specified in the arguments.
  int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
                                      Register exclusion1 = no_reg,
                                      Register exclusion2 = no_reg,
                                      Register exclusion3 = no_reg) const;

  // PushCallerSaved and PopCallerSaved do not arrange the registers in any
  // particular order so they are not useful for calls that can cause a GC.
  // The caller can exclude up to 3 registers that do not need to be saved and
  // restored.

  // Push caller saved registers on the stack, and return the number of bytes
  // stack pointer is adjusted.
  int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
410 411
                      Register exclusion2 = no_reg,
                      Register exclusion3 = no_reg);
412 413 414 415 416
  // Restore caller saved registers from the stack, and return the number of
  // bytes stack pointer is adjusted.
  int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
                     Register exclusion2 = no_reg,
                     Register exclusion3 = no_reg);
417

418 419 420 421
  // Compute the start of the generated instruction stream from the current PC.
  // This is an alternative to embedding the {CodeObject} handle as a reference.
  void ComputeCodeStartAddress(Register dst);

422
  void ResetSpeculationPoisonRegister();
423 424
};

425
// MacroAssembler implements a collection of frequently used macros.
426
class MacroAssembler : public TurboAssembler {
427
 public:
428
  MacroAssembler(Isolate* isolate, void* buffer, int size,
429 430 431 432 433
                 CodeObjectRequired create_code_object)
      : MacroAssembler(isolate, Assembler::DefaultOptions(isolate), buffer,
                       size, create_code_object) {}
  MacroAssembler(Isolate* isolate, const Options& options, void* buffer,
                 int size, CodeObjectRequired create_code_object);
434

435 436 437 438 439 440 441 442
  // Load a register with a long value as efficiently as possible.
  void Set(Register dst, int32_t x) {
    if (x == 0) {
      xor_(dst, dst);
    } else {
      mov(dst, Immediate(x));
    }
  }
443
  void Set(Operand dst, int32_t x) { mov(dst, Immediate(x)); }
444

445 446 447 448 449
  // Operations on roots in the root-array.
  void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
  // These methods can only be used with constant roots (i.e. non-writable
  // and not in new space).
  void CompareRoot(Register with, Heap::RootListIndex index);
450
  void CompareRoot(Operand with, Heap::RootListIndex index);
451 452 453 454
  void PushRoot(Heap::RootListIndex index);

  // Compare the object in a register to a value and jump if they are equal.
  void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
455
                  Label::Distance if_equal_distance = Label::kFar) {
456 457 458
    CompareRoot(with, index);
    j(equal, if_equal, if_equal_distance);
  }
459
  void JumpIfRoot(Operand with, Heap::RootListIndex index, Label* if_equal,
460
                  Label::Distance if_equal_distance = Label::kFar) {
461 462 463
    CompareRoot(with, index);
    j(equal, if_equal, if_equal_distance);
  }
464 465 466 467

  // Compare the object in a register to a value and jump if they are not equal.
  void JumpIfNotRoot(Register with, Heap::RootListIndex index,
                     Label* if_not_equal,
468
                     Label::Distance if_not_equal_distance = Label::kFar) {
469 470 471
    CompareRoot(with, index);
    j(not_equal, if_not_equal, if_not_equal_distance);
  }
472
  void JumpIfNotRoot(Operand with, Heap::RootListIndex index,
473
                     Label* if_not_equal,
474
                     Label::Distance if_not_equal_distance = Label::kFar) {
475 476 477
    CompareRoot(with, index);
    j(not_equal, if_not_equal, if_not_equal_distance);
  }
478

479 480
  // ---------------------------------------------------------------------------
  // GC Support
481 482 483 484 485 486
  // Notify the garbage collector that we wrote a pointer into an object.
  // |object| is the object being stored into, |value| is the object being
  // stored.  value and scratch registers are clobbered by the operation.
  // The offset is the offset from the start of the object, not the offset from
  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
  void RecordWriteField(
487
      Register object, int offset, Register value, Register scratch,
488 489
      SaveFPRegsMode save_fp,
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
490
      SmiCheck smi_check = INLINE_SMI_CHECK);
491

492 493
  // For page containing |object| mark region covering |address|
  // dirty. |object| is the object being stored into, |value| is the
494
  // object being stored. The address and value registers are clobbered by the
495 496
  // operation. RecordWrite filters out smis so it does not update the
  // write barrier if the value is a smi.
497
  void RecordWrite(
498
      Register object, Register address, Register value, SaveFPRegsMode save_fp,
499
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
500
      SmiCheck smi_check = INLINE_SMI_CHECK);
501

502
  // Frame restart support
503
  void MaybeDropFrames();
504

505 506 507 508
  // Enter specific kind of exit frame. Expects the number of
  // arguments in register eax and sets up the number of arguments in
  // register edi and the pointer to the first argument in register
  // esi.
509
  void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
510

511
  void EnterApiExitFrame(int argc);
512 513 514

  // Leave the current exit frame. Expects the return value in
  // register eax:edx (untouched) and the pointer to the first
515 516
  // argument in register esi (if pop_arguments == true).
  void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
517

518 519
  // Leave the current exit frame. Expects the return value in
  // register eax (untouched).
520
  void LeaveApiExitFrame();
521

522 523 524
  // Load the global proxy from the current context.
  void LoadGlobalProxy(Register dst);

525 526 527
  // Load the global function with the given index.
  void LoadGlobalFunction(int index, Register function);

528 529 530 531
  // Push and pop the registers that can hold pointers.
  void PushSafepointRegisters() { pushad(); }
  void PopSafepointRegisters() { popad(); }

532 533 534
  // ---------------------------------------------------------------------------
  // JavaScript invokes

535

536
  // Invoke the JavaScript function code by either calling or jumping.
537

538 539
  void InvokeFunctionCode(Register function, Register new_target,
                          const ParameterCount& expected,
540
                          const ParameterCount& actual, InvokeFlag flag);
541

542
  // On function call, call into the debugger if necessary.
543
  // This may clobber ecx.
544 545 546
  void CheckDebugHook(Register fun, Register new_target,
                      const ParameterCount& expected,
                      const ParameterCount& actual);
547

548 549
  // Invoke the JavaScript function in the given register. Changes the
  // current context to the context in the function before invoking.
550
  void InvokeFunction(Register function, Register new_target,
551
                      const ParameterCount& actual, InvokeFlag flag);
552

553
  void InvokeFunction(Register function, const ParameterCount& expected,
554
                      const ParameterCount& actual, InvokeFlag flag);
555

556 557 558 559 560 561 562
  // Compare object type for heap object.
  // Incoming register is heap_object and outgoing register is map.
  void CmpObjectType(Register heap_object, InstanceType type, Register map);

  // Compare instance type for map.
  void CmpInstanceType(Register map, InstanceType type);

563
  void DoubleToI(Register result_reg, XMMRegister input_reg,
564
                 XMMRegister scratch, Label* lost_precision, Label* is_nan,
565
                 Label::Distance dst = Label::kFar);
566

567 568
  // Smi tagging support.
  void SmiTag(Register reg) {
569 570
    STATIC_ASSERT(kSmiTag == 0);
    STATIC_ASSERT(kSmiTagSize == 1);
571
    add(reg, reg);
572 573
  }

574
  // Modifies the register even if it does not contain a Smi!
575
  void UntagSmi(Register reg, Label* is_smi) {
576
    STATIC_ASSERT(kSmiTagSize == 1);
577
    sar(reg, kSmiTagSize);
578
    STATIC_ASSERT(kSmiTag == 0);
579 580 581
    j(not_carry, is_smi);
  }

582
  // Jump if register contain a non-smi.
583
  inline void JumpIfNotSmi(Register value, Label* not_smi_label,
584
                           Label::Distance distance = Label::kFar) {
585
    test(value, Immediate(kSmiTagMask));
586
    j(not_zero, not_smi_label, distance);
587
  }
588 589 590 591 592 593
  // Jump if the operand is not a smi.
  inline void JumpIfNotSmi(Operand value, Label* smi_label,
                           Label::Distance distance = Label::kFar) {
    test(value, Immediate(kSmiTagMask));
    j(not_zero, smi_label, distance);
  }
594

595 596
  template<typename Field>
  void DecodeField(Register reg) {
597
    static const int shift = Field::kShift;
598
    static const int mask = Field::kMask >> Field::kShift;
599 600 601
    if (shift != 0) {
      sar(reg, shift);
    }
602
    and_(reg, Immediate(mask));
603
  }
604

605 606
  // Abort execution if argument is not a smi, enabled via --debug-code.
  void AssertSmi(Register object);
607

608 609
  // Abort execution if argument is a smi, enabled via --debug-code.
  void AssertNotSmi(Register object);
610

611 612 613
  // Abort execution if argument is not a FixedArray, enabled via --debug-code.
  void AssertFixedArray(Register object);

614 615 616
  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
  void AssertFunction(Register object);

617 618 619
  // Abort execution if argument is not a Constructor, enabled via --debug-code.
  void AssertConstructor(Register object);

620 621 622 623
  // Abort execution if argument is not a JSBoundFunction,
  // enabled via --debug-code.
  void AssertBoundFunction(Register object);

624
  // Abort execution if argument is not a JSGeneratorObject (or subclass),
625
  // enabled via --debug-code.
626
  void AssertGeneratorObject(Register object);
627

628 629 630 631
  // Abort execution if argument is not undefined or an AllocationSite, enabled
  // via --debug-code.
  void AssertUndefinedOrAllocationSite(Register object);

632 633 634
  // ---------------------------------------------------------------------------
  // Exception handling

635 636
  // Push a new stack handler and link it into stack handler chain.
  void PushStackHandler();
637

638 639
  // Unlink the stack handler on top of the stack from the stack handler chain.
  void PopStackHandler();
640

641 642 643
  // ---------------------------------------------------------------------------
  // Runtime calls

644
  // Call a code stub.  Generate the code if necessary.
645
  void CallStub(CodeStub* stub);
646

647
  // Tail call a code stub (jump).  Generate the code if necessary.
648 649
  void TailCallStub(CodeStub* stub);

650
  // Call a runtime routine.
651
  void CallRuntime(const Runtime::Function* f, int num_arguments,
652
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
653 654

  // Convenience function: Same as above, but takes the fid instead.
655
  void CallRuntime(Runtime::FunctionId fid,
656
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
657 658 659 660 661 662 663 664
    const Runtime::Function* function = Runtime::FunctionForId(fid);
    CallRuntime(function, function->nargs, save_doubles);
  }

  // Convenience function: Same as above, but takes the fid instead.
  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
    CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
665
  }
666

serya@chromium.org's avatar
serya@chromium.org committed
667
  // Convenience function: tail call a runtime routine (jump).
668
  void TailCallRuntime(Runtime::FunctionId fid);
669

670
  // Jump to a runtime routine.
671 672
  void JumpToExternalReference(const ExternalReference& ext,
                               bool builtin_exit_frame = false);
673

674
  // Generates a trampoline to jump to the off-heap instruction stream.
675
  void JumpToInstructionStream(Address entry);
676

677 678 679
  // ---------------------------------------------------------------------------
  // Utilities

680 681
  // Emit code to discard a non-negative number of pointer-sized elements
  // from the stack, clobbering only the esp register.
682 683
  void Drop(int element_count);

684
  void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
685
  void Pop(Register dst) { pop(dst); }
686
  void Pop(Operand dst) { pop(dst); }
687 688
  void PushReturnAddressFrom(Register src) { push(src); }
  void PopReturnAddressTo(Register dst) { pop(dst); }
689

690 691 692 693
  // ---------------------------------------------------------------------------
  // In-place weak references.
  void LoadWeakValue(Register in_out, Label* target_if_cleared);

694 695 696 697 698 699
  // ---------------------------------------------------------------------------
  // StatsCounter support

  void IncrementCounter(StatsCounter* counter, int value);
  void DecrementCounter(StatsCounter* counter, int value);

700 701 702 703
  static int SafepointRegisterStackIndex(Register reg) {
    return SafepointRegisterStackIndex(reg.code());
  }

704 705 706
  void EnterBuiltinFrame(Register context, Register target, Register argc);
  void LeaveBuiltinFrame(Register context, Register target, Register argc);

707 708 709
 private:
  // Helper functions for generating invokes.
  void InvokePrologue(const ParameterCount& expected,
710 711
                      const ParameterCount& actual, Label* done,
                      bool* definitely_mismatches, InvokeFlag flag,
712
                      Label::Distance done_distance);
713

714
  void EnterExitFramePrologue(StackFrame::Type frame_type);
715
  void EnterExitFrameEpilogue(int argc, bool save_doubles);
serya@chromium.org's avatar
serya@chromium.org committed
716

717
  void LeaveExitFrameEpilogue();
718

719
  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
720
  void InNewSpace(Register object, Register scratch, Condition cc,
721 722 723
                  Label* condition_met,
                  Label::Distance condition_met_distance = Label::kFar);

724 725 726
  // Compute memory operands for safepoint stack slots.
  static int SafepointRegisterStackIndex(int reg_code);

727
  // Needs access to SafepointRegisterStackIndex for compiled frame
728
  // traversal.
729
  friend class StandardFrame;
730 731 732 733 734 735
};

// -----------------------------------------------------------------------------
// Static helper functions.

// Generate an Operand for loading a field from an object.
736
inline Operand FieldOperand(Register object, int offset) {
737 738 739
  return Operand(object, offset - kHeapObjectTag);
}

740
// Generate an Operand for loading an indexed field from an object.
741
inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
742
                            int offset) {
743 744 745
  return Operand(object, index, scale, offset - kHeapObjectTag);
}

746
inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
747 748 749 750 751
                                        int additional_offset = 0) {
  int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
  return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}

752
inline Operand ContextOperand(Register context, int index) {
753 754 755
  return Operand(context, Context::SlotOffset(index));
}

756 757 758 759
inline Operand ContextOperand(Register context, Register index) {
  return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
}

760 761
inline Operand NativeContextOperand() {
  return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
762 763
}

764 765
#define ACCESS_MASM(masm) masm->

766 767
}  // namespace internal
}  // namespace v8
768

769
#endif  // V8_IA32_MACRO_ASSEMBLER_IA32_H_