macro-assembler-x64.cc 86.4 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#if V8_TARGET_ARCH_X64
6

7
#include "src/base/bits.h"
8
#include "src/base/division-by-constant.h"
9
#include "src/base/utils/random-number-generator.h"
10
#include "src/bootstrapper.h"
11
#include "src/callable.h"
12
#include "src/code-factory.h"
13
#include "src/counters.h"
14
#include "src/debug/debug.h"
15
#include "src/external-reference-table.h"
16
#include "src/frames-inl.h"
17
#include "src/globals.h"
18
#include "src/heap/heap-inl.h"  // For MemoryChunk.
19
#include "src/macro-assembler.h"
20
#include "src/objects-inl.h"
21
#include "src/objects/smi.h"
22
#include "src/register-configuration.h"
23
#include "src/snapshot/embedded-data.h"
24
#include "src/snapshot/snapshot.h"
25
#include "src/string-constants.h"
26
#include "src/x64/assembler-x64.h"
27

28 29 30 31 32
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
#include "src/x64/macro-assembler-x64.h"
#endif
33

34 35
namespace v8 {
namespace internal {
36

37 38 39 40 41 42 43 44
Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
  DCHECK_GE(index, 0);
  int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
  int displacement_to_last_argument =
      base_reg_ == rsp ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
  displacement_to_last_argument += extra_displacement_to_last_argument_;
  if (argument_count_reg_ == no_reg) {
    // argument[0] is at base_reg_ + displacement_to_last_argument +
45
    // (argument_count_immediate_ + receiver - 1) * kSystemPointerSize.
46
    DCHECK_GT(argument_count_immediate_ + receiver, 0);
47 48 49 50
    return Operand(base_reg_,
                   displacement_to_last_argument +
                       (argument_count_immediate_ + receiver - 1 - index) *
                           kSystemPointerSize);
51 52
  } else {
    // argument[0] is at base_reg_ + displacement_to_last_argument +
53
    // argument_count_reg_ * times_system_pointer_size + (receiver - 1) *
54
    // kSystemPointerSize.
55
    return Operand(base_reg_, argument_count_reg_, times_system_pointer_size,
56 57
                   displacement_to_last_argument +
                       (receiver - 1 - index) * kSystemPointerSize);
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
  }
}

StackArgumentsAccessor::StackArgumentsAccessor(
    Register base_reg, const ParameterCount& parameter_count,
    StackArgumentsAccessorReceiverMode receiver_mode,
    int extra_displacement_to_last_argument)
    : base_reg_(base_reg),
      argument_count_reg_(parameter_count.is_reg() ? parameter_count.reg()
                                                   : no_reg),
      argument_count_immediate_(
          parameter_count.is_immediate() ? parameter_count.immediate() : 0),
      receiver_mode_(receiver_mode),
      extra_displacement_to_last_argument_(
          extra_displacement_to_last_argument) {}

74
void MacroAssembler::Load(Register destination, ExternalReference source) {
75
  if (root_array_available_ && options().enable_root_array_delta_access) {
76 77
    intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
    if (is_int32(delta)) {
78
      movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
79 80 81 82
      return;
    }
  }
  // Safe code.
83
  if (destination == rax && !options().isolate_independent_code) {
84 85
    load_rax(source);
  } else {
86
    movq(destination, ExternalReferenceAsOperand(source));
87 88 89 90 91
  }
}


void MacroAssembler::Store(ExternalReference destination, Register source) {
92
  if (root_array_available_ && options().enable_root_array_delta_access) {
93 94 95
    intptr_t delta =
        RootRegisterOffsetForExternalReference(isolate(), destination);
    if (is_int32(delta)) {
96
      movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
97 98 99 100
      return;
    }
  }
  // Safe code.
101
  if (source == rax && !options().isolate_independent_code) {
102 103
    store_rax(destination);
  } else {
104
    movq(ExternalReferenceAsOperand(destination), source);
105 106 107
  }
}

108 109
void TurboAssembler::LoadFromConstantsTable(Register destination,
                                            int constant_index) {
110
  DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
111
  LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
112 113 114
  LoadTaggedPointerField(
      destination,
      FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
115 116
}

117 118 119 120 121 122
void TurboAssembler::LoadRootRegisterOffset(Register destination,
                                            intptr_t offset) {
  DCHECK(is_int32(offset));
  if (offset == 0) {
    Move(destination, kRootRegister);
  } else {
123
    leaq(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
124 125
  }
}
126 127

void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
128
  movq(destination, Operand(kRootRegister, offset));
129
}
130

131
void TurboAssembler::LoadAddress(Register destination,
132
                                 ExternalReference source) {
133
  if (root_array_available_ && options().enable_root_array_delta_access) {
134 135
    intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
    if (is_int32(delta)) {
136
      leaq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
137 138 139 140
      return;
    }
  }
  // Safe code.
141 142 143 144 145
  if (FLAG_embedded_builtins) {
    if (root_array_available_ && options().isolate_independent_code) {
      IndirectLoadExternalReference(destination, source);
      return;
    }
146
  }
147
  Move(destination, source);
148 149
}

150 151
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
                                                   Register scratch) {
152
  if (root_array_available_ && options().enable_root_array_delta_access) {
153 154 155
    int64_t delta =
        RootRegisterOffsetForExternalReference(isolate(), reference);
    if (is_int32(delta)) {
156 157 158
      return Operand(kRootRegister, static_cast<int32_t>(delta));
    }
  }
159 160 161 162 163 164 165 166 167 168
  if (root_array_available_ && options().isolate_independent_code) {
    if (IsAddressableThroughRootRegister(isolate(), reference)) {
      // Some external references can be efficiently loaded as an offset from
      // kRootRegister.
      intptr_t offset =
          RootRegisterOffsetForExternalReference(isolate(), reference);
      CHECK(is_int32(offset));
      return Operand(kRootRegister, static_cast<int32_t>(offset));
    } else {
      // Otherwise, do a memory load from the external reference table.
169
      movq(scratch, Operand(kRootRegister,
170 171
                            RootRegisterOffsetForExternalReferenceTableEntry(
                                isolate(), reference)));
172 173 174 175
      return Operand(scratch, 0);
    }
  }
  Move(scratch, reference);
176 177 178
  return Operand(scratch, 0);
}

179 180
void MacroAssembler::PushAddress(ExternalReference source) {
  LoadAddress(kScratchRegister, source);
181
  Push(kScratchRegister);
182 183
}

184
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
185
  DCHECK(root_array_available_);
186
  movq(destination,
187
       Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
188 189
}

190
void MacroAssembler::PushRoot(RootIndex index) {
191
  DCHECK(root_array_available_);
192
  Push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
193 194
}

195
void TurboAssembler::CompareRoot(Register with, RootIndex index) {
196
  DCHECK(root_array_available_);
197 198 199 200 201 202
  if (IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
                RootIndex::kLastStrongOrReadOnlyRoot)) {
    cmp_tagged(with,
               Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
  } else {
    // Some smi roots contain system pointer size values like stack limits.
203
    cmpq(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
204
  }
205 206
}

207
void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
208 209
  DCHECK(root_array_available_);
  DCHECK(!with.AddressUsesRegister(kScratchRegister));
210
  LoadRoot(kScratchRegister, index);
211 212 213 214 215
  if (IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
                RootIndex::kLastStrongOrReadOnlyRoot)) {
    cmp_tagged(with, kScratchRegister);
  } else {
    // Some smi roots contain system pointer size values like stack limits.
216
    cmpq(with, kScratchRegister);
217 218 219 220
  }
}

void TurboAssembler::LoadTaggedPointerField(Register destination,
221
                                            Operand field_operand) {
222
#ifdef V8_COMPRESS_POINTERS
223
  DecompressTaggedPointer(destination, field_operand);
224
#else
225
  mov_tagged(destination, field_operand);
226 227 228 229
#endif
}

void TurboAssembler::LoadAnyTaggedField(Register destination,
230 231
                                        Operand field_operand,
                                        Register scratch) {
232
#ifdef V8_COMPRESS_POINTERS
233
  DecompressAnyTagged(destination, field_operand, scratch);
234
#else
235
  mov_tagged(destination, field_operand);
236 237 238 239
#endif
}

void TurboAssembler::PushTaggedPointerField(Operand field_operand,
240
                                            Register scratch) {
241 242
#ifdef V8_COMPRESS_POINTERS
  DCHECK(!field_operand.AddressUsesRegister(scratch));
243
  DecompressTaggedPointer(scratch, field_operand);
244 245 246 247 248 249 250
  Push(scratch);
#else
  Push(field_operand);
#endif
}

void TurboAssembler::PushTaggedAnyField(Operand field_operand,
251
                                        Register scratch1, Register scratch2) {
252
#ifdef V8_COMPRESS_POINTERS
253
  DCHECK(!AreAliased(scratch1, scratch2));
254 255
  DCHECK(!field_operand.AddressUsesRegister(scratch1));
  DCHECK(!field_operand.AddressUsesRegister(scratch2));
256
  DecompressAnyTagged(scratch1, field_operand, scratch2);
257 258 259 260 261 262 263 264 265 266 267 268
  Push(scratch1);
#else
  Push(field_operand);
#endif
}

void TurboAssembler::SmiUntagField(Register dst, Operand src) {
  SmiUntag(dst, src);
}

void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
                                      Immediate value) {
269 270 271 272 273
#ifdef V8_COMPRESS_POINTERS
  RecordComment("[ StoreTagged");
  movl(dst_field_operand, value);
  RecordComment("]");
#else
274
  movq(dst_field_operand, value);
275
#endif
276 277 278 279
}

void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
                                      Register value) {
280 281 282 283 284
#ifdef V8_COMPRESS_POINTERS
  RecordComment("[ StoreTagged");
  movl(dst_field_operand, value);
  RecordComment("]");
#else
285
  movq(dst_field_operand, value);
286
#endif
287 288
}

289
void TurboAssembler::DecompressTaggedSigned(Register destination,
290
                                            Operand field_operand) {
291
  RecordComment("[ DecompressTaggedSigned");
292
  movsxlq(destination, field_operand);
293 294 295
  RecordComment("]");
}

296 297 298
void TurboAssembler::DecompressTaggedSigned(Register destination,
                                            Register source) {
  RecordComment("[ DecompressTaggedSigned");
299 300 301 302
  movsxlq(destination, source);
  RecordComment("]");
}

303
void TurboAssembler::DecompressTaggedPointer(Register destination,
304
                                             Operand field_operand) {
305
  RecordComment("[ DecompressTaggedPointer");
306 307
  movsxlq(destination, field_operand);
  addq(destination, kRootRegister);
308 309 310
  RecordComment("]");
}

311 312 313 314 315 316 317 318 319 320
void TurboAssembler::DecompressTaggedPointer(Register destination,
                                             Register source) {
  RecordComment("[ DecompressTaggedPointer");
  movsxlq(destination, source);
  addq(destination, kRootRegister);
  RecordComment("]");
}

void TurboAssembler::DecompressRegisterAnyTagged(Register destination,
                                                 Register scratch) {
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
  if (kUseBranchlessPtrDecompression) {
    // Branchlessly compute |masked_root|:
    // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
    STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
    Register masked_root = scratch;
    movl(masked_root, destination);
    andl(masked_root, Immediate(kSmiTagMask));
    negq(masked_root);
    andq(masked_root, kRootRegister);
    // Now this add operation will either leave the value unchanged if it is
    // a smi or add the isolate root if it is a heap object.
    addq(destination, masked_root);
  } else {
    Label done;
    JumpIfSmi(destination, &done);
    addq(destination, kRootRegister);
    bind(&done);
  }
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
}

void TurboAssembler::DecompressAnyTagged(Register destination,
                                         Operand field_operand,
                                         Register scratch) {
  DCHECK(!AreAliased(destination, scratch));
  RecordComment("[ DecompressAnyTagged");
  movsxlq(destination, field_operand);
  DecompressRegisterAnyTagged(destination, scratch);
  RecordComment("]");
}

void TurboAssembler::DecompressAnyTagged(Register destination, Register source,
                                         Register scratch) {
  DCHECK(!AreAliased(destination, scratch));
  RecordComment("[ DecompressAnyTagged");
  movsxlq(destination, source);
  DecompressRegisterAnyTagged(destination, scratch);
357 358 359
  RecordComment("]");
}

360 361 362 363 364
void MacroAssembler::RecordWriteField(Register object, int offset,
                                      Register value, Register dst,
                                      SaveFPRegsMode save_fp,
                                      RememberedSetAction remembered_set_action,
                                      SmiCheck smi_check) {
365
  // First, check if a write barrier is even needed. The tests below
366
  // catch stores of Smis.
lrn@chromium.org's avatar
lrn@chromium.org committed
367 368
  Label done;

369 370 371 372 373 374
  // Skip barrier if writing a smi.
  if (smi_check == INLINE_SMI_CHECK) {
    JumpIfSmi(value, &done);
  }

  // Although the object register is tagged, the offset is relative to the start
375 376
  // of the object, so the offset must be a multiple of kTaggedSize.
  DCHECK(IsAligned(offset, kTaggedSize));
377

378
  leaq(dst, FieldOperand(object, offset));
379 380
  if (emit_debug_code()) {
    Label ok;
381
    testb(dst, Immediate(kTaggedSize - 1));
382 383 384 385 386
    j(zero, &ok, Label::kNear);
    int3();
    bind(&ok);
  }

387
  RecordWrite(object, dst, value, save_fp, remembered_set_action,
388
              OMIT_SMI_CHECK);
389

390
  bind(&done);
391

392 393
  // Clobber clobbered input registers when running with the debug-code flag
  // turned on to provoke errors.
394
  if (emit_debug_code()) {
395 396
    Move(value, kZapValue, RelocInfo::NONE);
    Move(dst, kZapValue, RelocInfo::NONE);
397
  }
398 399
}

400
void TurboAssembler::SaveRegisters(RegList registers) {
401
  DCHECK_GT(NumRegs(registers), 0);
402 403 404 405 406 407 408 409
  for (int i = 0; i < Register::kNumRegisters; ++i) {
    if ((registers >> i) & 1u) {
      pushq(Register::from_code(i));
    }
  }
}

void TurboAssembler::RestoreRegisters(RegList registers) {
410
  DCHECK_GT(NumRegs(registers), 0);
411 412 413 414 415 416 417
  for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
    if ((registers >> i) & 1u) {
      popq(Register::from_code(i));
    }
  }
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
                                             SaveFPRegsMode fp_mode) {
  EphemeronKeyBarrierDescriptor descriptor;
  RegList registers = descriptor.allocatable_registers();

  SaveRegisters(registers);

  Register object_parameter(
      descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
  Register slot_parameter(descriptor.GetRegisterParameter(
      EphemeronKeyBarrierDescriptor::kSlotAddress));
  Register fp_mode_parameter(
      descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));

  MovePair(slot_parameter, address, object_parameter, object);
  Smi smi_fm = Smi::FromEnum(fp_mode);
  Move(fp_mode_parameter, smi_fm);
  Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
       RelocInfo::CODE_TARGET);

  RestoreRegisters(registers);
}

441 442 443
void TurboAssembler::CallRecordWriteStub(
    Register object, Register address,
    RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
  CallRecordWriteStub(
      object, address, remembered_set_action, fp_mode,
      isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
      kNullAddress);
}

void TurboAssembler::CallRecordWriteStub(
    Register object, Register address,
    RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
    Address wasm_target) {
  CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
                      Handle<Code>::null(), wasm_target);
}

void TurboAssembler::CallRecordWriteStub(
    Register object, Register address,
    RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
    Handle<Code> code_target, Address wasm_target) {
  DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);

  RecordWriteDescriptor descriptor;
  RegList registers = descriptor.allocatable_registers();
466 467 468

  SaveRegisters(registers);

469 470
  Register object_parameter(
      descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
471
  Register slot_parameter(
472 473 474 475 476
      descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
  Register remembered_set_parameter(
      descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
  Register fp_mode_parameter(
      descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
477

478 479 480
  // Prepare argument registers for calling RecordWrite
  // slot_parameter   <= address
  // object_parameter <= object
481
  MovePair(slot_parameter, address, object_parameter, object);
482

483 484
  Smi smi_rsa = Smi::FromEnum(remembered_set_action);
  Smi smi_fm = Smi::FromEnum(fp_mode);
485 486 487 488 489 490
  Move(remembered_set_parameter, smi_rsa);
  if (smi_rsa != smi_fm) {
    Move(fp_mode_parameter, smi_fm);
  } else {
    movq(fp_mode_parameter, remembered_set_parameter);
  }
491 492 493 494 495 496
  if (code_target.is_null()) {
    // Use {near_call} for direct Wasm call within a module.
    near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
  } else {
    Call(code_target, RelocInfo::CODE_TARGET);
  }
497 498 499

  RestoreRegisters(registers);
}
500

501 502 503 504
void MacroAssembler::RecordWrite(Register object, Register address,
                                 Register value, SaveFPRegsMode fp_mode,
                                 RememberedSetAction remembered_set_action,
                                 SmiCheck smi_check) {
505 506 507
  DCHECK(object != value);
  DCHECK(object != address);
  DCHECK(value != address);
508
  AssertNotSmi(object);
509

510 511 512 513 514
  if (remembered_set_action == OMIT_REMEMBERED_SET &&
      !FLAG_incremental_marking) {
    return;
  }

515
  if (emit_debug_code()) {
516
    Label ok;
517
    cmp_tagged(value, Operand(address, 0));
518 519 520 521
    j(equal, &ok, Label::kNear);
    int3();
    bind(&ok);
  }
522

523 524
  // First, check if a write barrier is even needed. The tests below
  // catch stores of smis and stores into the young generation.
525
  Label done;
526

527 528 529
  if (smi_check == INLINE_SMI_CHECK) {
    // Skip barrier if writing a smi.
    JumpIfSmi(value, &done);
530 531
  }

532 533 534 535
  CheckPageFlag(value,
                value,  // Used as scratch.
                MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
                Label::kNear);
lrn@chromium.org's avatar
lrn@chromium.org committed
536

537 538 539 540 541 542
  CheckPageFlag(object,
                value,  // Used as scratch.
                MemoryChunk::kPointersFromHereAreInterestingMask,
                zero,
                &done,
                Label::kNear);
543

544
  CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
lrn@chromium.org's avatar
lrn@chromium.org committed
545 546

  bind(&done);
547

548
  // Clobber clobbered registers when running with the debug-code flag
549
  // turned on to provoke errors.
550
  if (emit_debug_code()) {
551 552
    Move(address, kZapValue, RelocInfo::NONE);
    Move(value, kZapValue, RelocInfo::NONE);
553
  }
554 555
}

556
void TurboAssembler::Assert(Condition cc, AbortReason reason) {
557
  if (emit_debug_code()) Check(cc, reason);
558 559
}

560
void TurboAssembler::AssertUnreachable(AbortReason reason) {
561 562 563
  if (emit_debug_code()) Abort(reason);
}

564
void TurboAssembler::Check(Condition cc, AbortReason reason) {
565 566
  Label L;
  j(cc, &L, Label::kNear);
567
  Abort(reason);
568
  // Control will not return here.
569 570 571
  bind(&L);
}

572
void TurboAssembler::CheckStackAlignment() {
573
  int frame_alignment = base::OS::ActivationFrameAlignment();
574
  int frame_alignment_mask = frame_alignment - 1;
575
  if (frame_alignment > kSystemPointerSize) {
576
    DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
577
    Label alignment_as_expected;
578
    testq(rsp, Immediate(frame_alignment_mask));
579
    j(zero, &alignment_as_expected, Label::kNear);
580 581 582 583 584 585
    // Abort if stack is not aligned.
    int3();
    bind(&alignment_as_expected);
  }
}

586
void TurboAssembler::Abort(AbortReason reason) {
587
#ifdef DEBUG
588
  const char* msg = GetAbortReason(reason);
589 590 591
  RecordComment("Abort message: ");
  RecordComment(msg);
#endif
592

593 594
  // Avoid emitting call to builtin if requested.
  if (trap_on_abort()) {
595 596 597 598
    int3();
    return;
  }

599 600 601 602 603 604 605 606 607 608
  if (should_abort_hard()) {
    // We don't care if we constructed a frame. Just pretend we did.
    FrameScope assume_frame(this, StackFrame::NONE);
    movl(arg_reg_1, Immediate(static_cast<int>(reason)));
    PrepareCallCFunction(1);
    LoadAddress(rax, ExternalReference::abort_with_reason());
    call(rax);
    return;
  }

609
  Move(rdx, Smi::FromInt(static_cast<int>(reason)));
610

611
  if (!has_frame()) {
612 613 614
    // We don't actually want to generate a pile of code for this, so just
    // claim there is a stack frame, without generating one.
    FrameScope scope(this, StackFrame::NONE);
615
    Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
616
  } else {
617
    Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
618 619
  }
  // Control will not return here.
620
  int3();
621 622
}

623 624
void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
                                           Register centry) {
625 626 627 628 629 630
  const Runtime::Function* f = Runtime::FunctionForId(fid);
  // TODO(1236192): Most runtime routines don't need the number of
  // arguments passed in because it is constant. At some point we
  // should remove this need and make the runtime routine entry code
  // smarter.
  Set(rax, f->nargs);
631
  LoadAddress(rbx, ExternalReference::Create(f));
632
  DCHECK(!AreAliased(centry, rax, rbx));
633 634
  DCHECK(centry == rcx);
  CallCodeObject(centry);
635 636
}

637
void MacroAssembler::CallRuntime(const Runtime::Function* f,
638 639
                                 int num_arguments,
                                 SaveFPRegsMode save_doubles) {
640 641 642
  // If the expected number of arguments of the runtime function is
  // constant, we check that the actual number of arguments match the
  // expectation.
643
  CHECK(f->nargs < 0 || f->nargs == num_arguments);
644

645 646 647 648
  // TODO(1236192): Most runtime routines don't need the number of
  // arguments passed in because it is constant. At some point we
  // should remove this need and make the runtime routine entry code
  // smarter.
649
  Set(rax, num_arguments);
650
  LoadAddress(rbx, ExternalReference::Create(f));
651 652 653
  Handle<Code> code =
      CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
  Call(code, RelocInfo::CODE_TARGET);
654 655
}

656
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
657
  // ----------- S t a t e -------------
658 659
  //  -- rsp[0]                 : return address
  //  -- rsp[8]                 : argument num_arguments - 1
660 661
  //  ...
  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
662 663 664
  //
  //  For runtime functions with variable arguments:
  //  -- rax                    : number of  arguments
665 666
  // -----------------------------------

667 668 669 670 671
  const Runtime::Function* function = Runtime::FunctionForId(fid);
  DCHECK_EQ(1, function->result_size);
  if (function->nargs >= 0) {
    Set(rax, function->nargs);
  }
672
  JumpToExternalReference(ExternalReference::Create(fid));
673 674
}

675 676
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
                                             bool builtin_exit_frame) {
677
  // Set the entry point and jump to the C entry runtime stub.
678
  LoadAddress(rbx, ext);
679 680 681
  Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
                                          kArgvOnStack, builtin_exit_frame);
  Jump(code, RelocInfo::CODE_TARGET);
682 683
}

684 685
static constexpr Register saved_regs[] = {rax, rcx, rdx, rbx, rbp, rsi,
                                          rdi, r8,  r9,  r10, r11};
686

687
static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
688

689 690 691 692 693 694 695
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
                                                    Register exclusion1,
                                                    Register exclusion2,
                                                    Register exclusion3) const {
  int bytes = 0;
  for (int i = 0; i < kNumberOfSavedRegs; i++) {
    Register reg = saved_regs[i];
696
    if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
697
      bytes += kSystemPointerSize;
698 699 700 701 702
    }
  }

  // R12 to r15 are callee save on all platforms.
  if (fp_mode == kSaveFPRegs) {
703
    bytes += kDoubleSize * XMMRegister::kNumRegisters;
704 705 706 707 708 709 710
  }

  return bytes;
}

int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
                                    Register exclusion2, Register exclusion3) {
711 712 713
  // We don't allow a GC during a store buffer overflow so there is no need to
  // store the registers in any particular way, but we do have to store and
  // restore them.
714
  int bytes = 0;
715 716
  for (int i = 0; i < kNumberOfSavedRegs; i++) {
    Register reg = saved_regs[i];
717
    if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
718
      pushq(reg);
719
      bytes += kSystemPointerSize;
720 721
    }
  }
722

723 724
  // R12 to r15 are callee save on all platforms.
  if (fp_mode == kSaveFPRegs) {
725
    int delta = kDoubleSize * XMMRegister::kNumRegisters;
726
    AllocateStackSpace(delta);
727
    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
728
      XMMRegister reg = XMMRegister::from_code(i);
729
      Movsd(Operand(rsp, i * kDoubleSize), reg);
730
    }
731
    bytes += delta;
732
  }
733 734

  return bytes;
735 736
}

737 738 739
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
                                   Register exclusion2, Register exclusion3) {
  int bytes = 0;
740
  if (fp_mode == kSaveFPRegs) {
741
    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
742
      XMMRegister reg = XMMRegister::from_code(i);
743
      Movsd(reg, Operand(rsp, i * kDoubleSize));
744
    }
745
    int delta = kDoubleSize * XMMRegister::kNumRegisters;
746
    addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
747
    bytes += delta;
748
  }
749

750 751
  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
    Register reg = saved_regs[i];
752
    if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
753
      popq(reg);
754
      bytes += kSystemPointerSize;
755 756
    }
  }
757 758

  return bytes;
759 760
}

761
void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
762 763 764 765 766 767 768 769
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvtss2sd(dst, src, src);
  } else {
    cvtss2sd(dst, src);
  }
}

770
void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
771 772 773 774 775 776 777 778
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvtss2sd(dst, dst, src);
  } else {
    cvtss2sd(dst, src);
  }
}

779
void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
780 781 782 783 784 785 786 787
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvtsd2ss(dst, src, src);
  } else {
    cvtsd2ss(dst, src);
  }
}

788
void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
789 790 791 792 793 794 795 796
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvtsd2ss(dst, dst, src);
  } else {
    cvtsd2ss(dst, src);
  }
}

797
void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
798 799 800 801 802
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vxorpd(dst, dst, dst);
    vcvtlsi2sd(dst, dst, src);
  } else {
803
    xorpd(dst, dst);
804 805
    cvtlsi2sd(dst, src);
  }
806 807
}

808
void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
809 810 811 812 813
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vxorpd(dst, dst, dst);
    vcvtlsi2sd(dst, dst, src);
  } else {
814
    xorpd(dst, dst);
815 816
    cvtlsi2sd(dst, src);
  }
817 818
}

819
void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
820 821 822 823 824 825 826 827 828 829
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vxorps(dst, dst, dst);
    vcvtlsi2ss(dst, dst, src);
  } else {
    xorps(dst, dst);
    cvtlsi2ss(dst, src);
  }
}

830
void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
831 832 833 834 835 836 837 838 839 840
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vxorps(dst, dst, dst);
    vcvtlsi2ss(dst, dst, src);
  } else {
    xorps(dst, dst);
    cvtlsi2ss(dst, src);
  }
}

841
void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
842 843 844 845 846 847 848 849 850 851
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vxorps(dst, dst, dst);
    vcvtqsi2ss(dst, dst, src);
  } else {
    xorps(dst, dst);
    cvtqsi2ss(dst, src);
  }
}

852
void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
853 854 855 856 857 858 859 860 861 862
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vxorps(dst, dst, dst);
    vcvtqsi2ss(dst, dst, src);
  } else {
    xorps(dst, dst);
    cvtqsi2ss(dst, src);
  }
}

863
void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
864 865 866 867 868 869 870 871 872 873
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vxorpd(dst, dst, dst);
    vcvtqsi2sd(dst, dst, src);
  } else {
    xorpd(dst, dst);
    cvtqsi2sd(dst, src);
  }
}

874
void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
875 876 877 878 879 880 881 882 883 884
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vxorpd(dst, dst, dst);
    vcvtqsi2sd(dst, dst, src);
  } else {
    xorpd(dst, dst);
    cvtqsi2sd(dst, src);
  }
}

885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) {
  // Zero-extend the 32 bit value to 64 bit.
  movl(kScratchRegister, src);
  Cvtqsi2ss(dst, kScratchRegister);
}

void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) {
  // Zero-extend the 32 bit value to 64 bit.
  movl(kScratchRegister, src);
  Cvtqsi2ss(dst, kScratchRegister);
}

void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) {
  // Zero-extend the 32 bit value to 64 bit.
  movl(kScratchRegister, src);
  Cvtqsi2sd(dst, kScratchRegister);
}

void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) {
  // Zero-extend the 32 bit value to 64 bit.
  movl(kScratchRegister, src);
  Cvtqsi2sd(dst, kScratchRegister);
}

void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
  Label done;
911
  Cvtqsi2ss(dst, src);
912 913 914 915 916 917 918 919 920 921 922 923
  testq(src, src);
  j(positive, &done, Label::kNear);

  // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
  if (src != kScratchRegister) movq(kScratchRegister, src);
  shrq(kScratchRegister, Immediate(1));
  // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
  Label msb_not_set;
  j(not_carry, &msb_not_set, Label::kNear);
  orq(kScratchRegister, Immediate(1));
  bind(&msb_not_set);
  Cvtqsi2ss(dst, kScratchRegister);
924
  addss(dst, dst);
925
  bind(&done);
926 927
}

928 929 930 931 932 933 934
void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) {
  movq(kScratchRegister, src);
  Cvtqui2ss(dst, kScratchRegister);
}

void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
  Label done;
935
  Cvtqsi2sd(dst, src);
936 937 938 939 940 941 942 943 944 945 946 947
  testq(src, src);
  j(positive, &done, Label::kNear);

  // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
  if (src != kScratchRegister) movq(kScratchRegister, src);
  shrq(kScratchRegister, Immediate(1));
  // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
  Label msb_not_set;
  j(not_carry, &msb_not_set, Label::kNear);
  orq(kScratchRegister, Immediate(1));
  bind(&msb_not_set);
  Cvtqsi2sd(dst, kScratchRegister);
948
  addsd(dst, dst);
949 950 951 952 953 954
  bind(&done);
}

void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) {
  movq(kScratchRegister, src);
  Cvtqui2sd(dst, kScratchRegister);
955 956
}

957
void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
958 959 960 961 962 963 964 965
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvttss2si(dst, src);
  } else {
    cvttss2si(dst, src);
  }
}

966
void TurboAssembler::Cvttss2si(Register dst, Operand src) {
967 968 969 970 971 972 973 974
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvttss2si(dst, src);
  } else {
    cvttss2si(dst, src);
  }
}

975
void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
976 977 978 979 980 981 982 983
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvttsd2si(dst, src);
  } else {
    cvttsd2si(dst, src);
  }
}

984
void TurboAssembler::Cvttsd2si(Register dst, Operand src) {
985 986 987 988 989 990 991 992
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvttsd2si(dst, src);
  } else {
    cvttsd2si(dst, src);
  }
}

993
void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
994 995 996 997 998 999 1000 1001
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvttss2siq(dst, src);
  } else {
    cvttss2siq(dst, src);
  }
}

1002
void TurboAssembler::Cvttss2siq(Register dst, Operand src) {
1003 1004 1005 1006 1007 1008 1009 1010
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvttss2siq(dst, src);
  } else {
    cvttss2siq(dst, src);
  }
}

1011
void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
1012 1013 1014 1015 1016 1017 1018 1019
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvttsd2siq(dst, src);
  } else {
    cvttsd2siq(dst, src);
  }
}

1020
void TurboAssembler::Cvttsd2siq(Register dst, Operand src) {
1021 1022 1023 1024 1025 1026 1027 1028
  if (CpuFeatures::IsSupported(AVX)) {
    CpuFeatureScope scope(this, AVX);
    vcvttsd2siq(dst, src);
  } else {
    cvttsd2siq(dst, src);
  }
}

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
namespace {
template <typename OperandOrXMMRegister, bool is_double>
void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
                          OperandOrXMMRegister src, Label* fail) {
  Label success;
  // There does not exist a native float-to-uint instruction, so we have to use
  // a float-to-int, and postprocess the result.
  if (is_double) {
    tasm->Cvttsd2siq(dst, src);
  } else {
    tasm->Cvttss2siq(dst, src);
  }
  // If the result of the conversion is positive, we are already done.
  tasm->testq(dst, dst);
  tasm->j(positive, &success);
  // The result of the first conversion was negative, which means that the
  // input value was not within the positive int64 range. We subtract 2^63
  // and convert it again to see if it is within the uint64 range.
  if (is_double) {
    tasm->Move(kScratchDoubleReg, -9223372036854775808.0);
    tasm->addsd(kScratchDoubleReg, src);
    tasm->Cvttsd2siq(dst, kScratchDoubleReg);
  } else {
    tasm->Move(kScratchDoubleReg, -9223372036854775808.0f);
    tasm->addss(kScratchDoubleReg, src);
    tasm->Cvttss2siq(dst, kScratchDoubleReg);
  }
  tasm->testq(dst, dst);
  // The only possible negative value here is 0x80000000000000000, which is
  // used on x64 to indicate an integer overflow.
  tasm->j(negative, fail ? fail : &success);
  // The input value is within uint64 range and the second conversion worked
  // successfully, but we still have to undo the subtraction we did
  // earlier.
  tasm->Set(kScratchRegister, 0x8000000000000000);
  tasm->orq(dst, kScratchRegister);
  tasm->bind(&success);
}
}  // namespace

void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* success) {
  ConvertFloatToUint64<Operand, true>(this, dst, src, success);
}

void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* success) {
  ConvertFloatToUint64<XMMRegister, true>(this, dst, src, success);
}

void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* success) {
  ConvertFloatToUint64<Operand, false>(this, dst, src, success);
}

void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* success) {
  ConvertFloatToUint64<XMMRegister, false>(this, dst, src, success);
}

1085
void TurboAssembler::Set(Register dst, int64_t x) {
1086
  if (x == 0) {
1087
    xorl(dst, dst);
1088
  } else if (is_uint32(x)) {
1089
    movl(dst, Immediate(static_cast<uint32_t>(x)));
1090 1091
  } else if (is_int32(x)) {
    movq(dst, Immediate(static_cast<int32_t>(x)));
1092
  } else {
1093
    movq(dst, x);
1094 1095 1096
  }
}

1097
void TurboAssembler::Set(Operand dst, intptr_t x) {
1098
  if (is_int32(x)) {
1099
    movq(dst, Immediate(static_cast<int32_t>(x)));
1100 1101 1102
  } else {
    Set(kScratchRegister, x);
    movq(dst, kScratchRegister);
1103 1104 1105
  }
}

1106

1107 1108 1109
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.

1110
Register TurboAssembler::GetSmiConstant(Smi source) {
1111
  STATIC_ASSERT(kSmiTag == 0);
1112 1113 1114 1115 1116
  int value = source->value();
  if (value == 0) {
    xorl(kScratchRegister, kScratchRegister);
    return kScratchRegister;
  }
1117
  Move(kScratchRegister, source);
1118 1119 1120
  return kScratchRegister;
}

1121
void TurboAssembler::Move(Register dst, Smi source) {
1122 1123 1124 1125 1126
  STATIC_ASSERT(kSmiTag == 0);
  int value = source->value();
  if (value == 0) {
    xorl(dst, dst);
  } else {
1127
    Move(dst, source.ptr(), RelocInfo::NONE);
1128
  }
1129 1130
}

1131
void TurboAssembler::Move(Register dst, ExternalReference ext) {
1132 1133 1134 1135 1136
  if (FLAG_embedded_builtins) {
    if (root_array_available_ && options().isolate_independent_code) {
      IndirectLoadExternalReference(dst, ext);
      return;
    }
1137
  }
1138
  movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
1139 1140
}

1141
void MacroAssembler::SmiTag(Register dst, Register src) {
1142
  STATIC_ASSERT(kSmiTag == 0);
1143
  if (dst != src) {
1144
    movq(dst, src);
1145
  }
1146
  DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1147
  shlq(dst, Immediate(kSmiShift));
1148 1149
}

1150
void TurboAssembler::SmiUntag(Register dst, Register src) {
1151
  STATIC_ASSERT(kSmiTag == 0);
1152
  if (dst != src) {
1153
    movq(dst, src);
1154
  }
1155
  DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1156
  sarq(dst, Immediate(kSmiShift));
1157 1158
}

1159
void TurboAssembler::SmiUntag(Register dst, Operand src) {
1160 1161
  if (SmiValuesAre32Bits()) {
    movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1162 1163
    // Sign extend to 64-bit.
    movsxlq(dst, dst);
1164
  } else {
1165
    DCHECK(SmiValuesAre31Bits());
1166 1167 1168
#ifdef V8_COMPRESS_POINTERS
    movsxlq(dst, src);
#else
1169
    movq(dst, src);
1170
#endif
1171
    sarq(dst, Immediate(kSmiShift));
1172
  }
1173 1174
}

1175
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1176 1177
  AssertSmi(smi1);
  AssertSmi(smi2);
1178
  cmp_tagged(smi1, smi2);
1179 1180
}

1181
void MacroAssembler::SmiCompare(Register dst, Smi src) {
1182
  AssertSmi(dst);
1183 1184 1185
  Cmp(dst, src);
}

1186
void MacroAssembler::Cmp(Register dst, Smi src) {
1187
  DCHECK_NE(dst, kScratchRegister);
1188
  if (src->value() == 0) {
1189
    test_tagged(dst, dst);
1190
  } else {
1191
    Register constant_reg = GetSmiConstant(src);
1192
    cmp_tagged(dst, constant_reg);
1193 1194 1195
  }
}

1196
void MacroAssembler::SmiCompare(Register dst, Operand src) {
1197 1198
  AssertSmi(dst);
  AssertSmi(src);
1199
  cmp_tagged(dst, src);
1200 1201
}

1202
void MacroAssembler::SmiCompare(Operand dst, Register src) {
1203 1204
  AssertSmi(dst);
  AssertSmi(src);
1205
  cmp_tagged(dst, src);
1206 1207
}

1208
void MacroAssembler::SmiCompare(Operand dst, Smi src) {
1209
  AssertSmi(dst);
1210 1211 1212
  if (SmiValuesAre32Bits()) {
    cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
  } else {
1213
    DCHECK(SmiValuesAre31Bits());
1214 1215
    cmpl(dst, Immediate(src));
  }
1216 1217
}

1218
void MacroAssembler::Cmp(Operand dst, Smi src) {
1219 1220
  // The Operand cannot use the smi register.
  Register smi_reg = GetSmiConstant(src);
1221
  DCHECK(!dst.AddressUsesRegister(smi_reg));
1222
  cmp_tagged(dst, smi_reg);
1223 1224 1225
}


1226
Condition TurboAssembler::CheckSmi(Register src) {
1227
  STATIC_ASSERT(kSmiTag == 0);
1228 1229 1230 1231
  testb(src, Immediate(kSmiTagMask));
  return zero;
}

1232
Condition TurboAssembler::CheckSmi(Operand src) {
1233
  STATIC_ASSERT(kSmiTag == 0);
1234 1235 1236 1237
  testb(src, Immediate(kSmiTagMask));
  return zero;
}

1238
void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
                               Label::Distance near_jump) {
  Condition smi = CheckSmi(src);
  j(smi, on_smi, near_jump);
}

void MacroAssembler::JumpIfNotSmi(Register src,
                                  Label* on_not_smi,
                                  Label::Distance near_jump) {
  Condition smi = CheckSmi(src);
  j(NegateCondition(smi), on_not_smi, near_jump);
}

1251 1252 1253 1254 1255
void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
                                  Label::Distance near_jump) {
  Condition smi = CheckSmi(src);
  j(NegateCondition(smi), on_not_smi, near_jump);
}
1256

1257
void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
1258
  if (constant->value() != 0) {
1259 1260 1261 1262
    if (SmiValuesAre32Bits()) {
      addl(Operand(dst, kSmiShift / kBitsPerByte),
           Immediate(constant->value()));
    } else {
1263
      DCHECK(SmiValuesAre31Bits());
1264
      if (kTaggedSize == kInt64Size) {
1265 1266 1267 1268 1269 1270
        // Sign-extend value after addition
        movl(kScratchRegister, dst);
        addl(kScratchRegister, Immediate(constant));
        movsxlq(kScratchRegister, kScratchRegister);
        movq(dst, kScratchRegister);
      } else {
1271 1272
        DCHECK_EQ(kTaggedSize, kInt32Size);
        addl(dst, Immediate(constant));
1273
      }
1274
    }
1275 1276 1277
  }
}

1278 1279 1280
SmiIndex MacroAssembler::SmiToIndex(Register dst,
                                    Register src,
                                    int shift) {
1281
  if (SmiValuesAre32Bits()) {
1282
    DCHECK(is_uint6(shift));
1283 1284
    // There is a possible optimization if shift is in the range 60-63, but that
    // will (and must) never happen.
1285
    if (dst != src) {
1286
      movq(dst, src);
1287 1288
    }
    if (shift < kSmiShift) {
1289
      sarq(dst, Immediate(kSmiShift - shift));
1290
    } else {
1291
      shlq(dst, Immediate(shift - kSmiShift));
1292 1293
    }
    return SmiIndex(dst, times_1);
1294
  } else {
1295
    DCHECK(SmiValuesAre31Bits());
1296
    if (dst != src) {
1297
      mov_tagged(dst, src);
1298 1299 1300 1301
    }
    // We have to sign extend the index register to 64-bit as the SMI might
    // be negative.
    movsxlq(dst, dst);
1302 1303 1304 1305 1306 1307 1308
    if (shift < kSmiShift) {
      sarq(dst, Immediate(kSmiShift - shift));
    } else if (shift != kSmiShift) {
      if (shift - kSmiShift <= static_cast<int>(times_8)) {
        return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiShift));
      }
      shlq(dst, Immediate(shift - kSmiShift));
1309
    }
1310
    return SmiIndex(dst, times_1);
1311 1312 1313
  }
}

1314 1315
void TurboAssembler::Push(Smi source) {
  intptr_t smi = static_cast<intptr_t>(source.ptr());
1316
  if (is_int32(smi)) {
1317
    Push(Immediate(static_cast<int32_t>(smi)));
1318 1319 1320 1321
    return;
  }
  int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
  int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
1322
  if (first_byte_set == last_byte_set) {
1323 1324 1325 1326 1327
    // This sequence has only 7 bytes, compared to the 12 bytes below.
    Push(Immediate(0));
    movb(Operand(rsp, first_byte_set),
         Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set))));
    return;
1328
  }
1329 1330
  Register constant = GetSmiConstant(source);
  Push(constant);
1331 1332 1333 1334
}

// ----------------------------------------------------------------------------

1335
void TurboAssembler::Move(Register dst, Register src) {
1336
  if (dst != src) {
1337
    movq(dst, src);
1338 1339 1340
  }
}

1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
                              Register src1) {
  if (dst0 != src1) {
    // Normal case: Writing to dst0 does not destroy src1.
    Move(dst0, src0);
    Move(dst1, src1);
  } else if (dst1 != src0) {
    // Only dst0 and src1 are the same register,
    // but writing to dst1 does not destroy src0.
    Move(dst1, src1);
    Move(dst0, src0);
  } else {
    // dst0 == src1, and dst1 == src0, a swap is required:
    // dst0 \/ src0
    // dst1 /\ src1
    xchgq(dst0, dst1);
  }
}

1360
void TurboAssembler::MoveNumber(Register dst, double value) {
1361 1362 1363 1364
  int32_t smi;
  if (DoubleToSmiInteger(value, &smi)) {
    Move(dst, Smi::FromInt(smi));
  } else {
1365
    movq_heap_number(dst, value);
1366 1367
  }
}
1368

1369
void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
1370
  if (src == 0) {
1371
    Xorps(dst, dst);
1372
  } else {
1373 1374
    unsigned nlz = base::bits::CountLeadingZeros(src);
    unsigned ntz = base::bits::CountTrailingZeros(src);
1375
    unsigned pop = base::bits::CountPopulation(src);
1376
    DCHECK_NE(0u, pop);
1377
    if (pop + ntz + nlz == 32) {
1378
      Pcmpeqd(dst, dst);
1379 1380
      if (ntz) Pslld(dst, static_cast<byte>(ntz + nlz));
      if (nlz) Psrld(dst, static_cast<byte>(nlz));
1381 1382
    } else {
      movl(kScratchRegister, Immediate(src));
1383
      Movd(dst, kScratchRegister);
1384
    }
1385 1386 1387
  }
}

1388
void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
1389
  if (src == 0) {
1390
    Xorpd(dst, dst);
1391
  } else {
1392 1393
    unsigned nlz = base::bits::CountLeadingZeros(src);
    unsigned ntz = base::bits::CountTrailingZeros(src);
1394
    unsigned pop = base::bits::CountPopulation(src);
1395
    DCHECK_NE(0u, pop);
1396
    if (pop + ntz + nlz == 64) {
1397
      Pcmpeqd(dst, dst);
1398 1399
      if (ntz) Psllq(dst, static_cast<byte>(ntz + nlz));
      if (nlz) Psrlq(dst, static_cast<byte>(nlz));
1400
    } else {
1401 1402 1403 1404 1405 1406
      uint32_t lower = static_cast<uint32_t>(src);
      uint32_t upper = static_cast<uint32_t>(src >> 32);
      if (upper == 0) {
        Move(dst, lower);
      } else {
        movq(kScratchRegister, src);
1407
        Movq(dst, kScratchRegister);
1408
      }
1409 1410 1411 1412
    }
  }
}

1413 1414 1415
// ----------------------------------------------------------------------------

void MacroAssembler::Absps(XMMRegister dst) {
1416 1417
  Andps(dst, ExternalReferenceAsOperand(
                 ExternalReference::address_of_float_abs_constant()));
1418 1419 1420
}

void MacroAssembler::Negps(XMMRegister dst) {
1421 1422
  Xorps(dst, ExternalReferenceAsOperand(
                 ExternalReference::address_of_float_neg_constant()));
1423 1424 1425
}

void MacroAssembler::Abspd(XMMRegister dst) {
1426 1427
  Andps(dst, ExternalReferenceAsOperand(
                 ExternalReference::address_of_double_abs_constant()));
1428 1429 1430
}

void MacroAssembler::Negpd(XMMRegister dst) {
1431 1432
  Xorps(dst, ExternalReferenceAsOperand(
                 ExternalReference::address_of_double_neg_constant()));
1433
}
1434

1435
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1436
  AllowDeferredHandleDereference smi_check;
1437
  if (source->IsSmi()) {
1438
    Cmp(dst, Smi::cast(*source));
1439
  } else {
1440
    Move(kScratchRegister, Handle<HeapObject>::cast(source));
1441
    cmp_tagged(dst, kScratchRegister);
1442
  }
1443 1444
}

1445
void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
1446
  AllowDeferredHandleDereference smi_check;
1447
  if (source->IsSmi()) {
1448
    Cmp(dst, Smi::cast(*source));
1449
  } else {
1450
    Move(kScratchRegister, Handle<HeapObject>::cast(source));
1451
    cmp_tagged(dst, kScratchRegister);
1452
  }
1453 1454
}

1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
                                     unsigned higher_limit, Label* on_in_range,
                                     Label::Distance near_jump) {
  if (lower_limit != 0) {
    leal(kScratchRegister, Operand(value, 0u - lower_limit));
    cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
  } else {
    cmpl(value, Immediate(higher_limit));
  }
  j(below_equal, on_in_range, near_jump);
}

1467
void TurboAssembler::Push(Handle<HeapObject> source) {
1468 1469 1470 1471
  Move(kScratchRegister, source);
  Push(kScratchRegister);
}

1472
void TurboAssembler::Move(Register result, Handle<HeapObject> object,
1473
                          RelocInfo::Mode rmode) {
1474 1475 1476 1477 1478
  if (FLAG_embedded_builtins) {
    if (root_array_available_ && options().isolate_independent_code) {
      IndirectLoadConstant(result, object);
      return;
    }
1479
  }
1480 1481 1482 1483 1484 1485 1486
  if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
    int compressed_embedded_object_index = AddCompressedEmbeddedObject(object);
    movl(result, Immediate(compressed_embedded_object_index, rmode));
  } else {
    DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
    movq(result, Immediate64(object.address(), rmode));
  }
1487
}
1488

1489
void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
1490 1491
                          RelocInfo::Mode rmode) {
  Move(kScratchRegister, object, rmode);
1492
  movq(dst, kScratchRegister);
1493 1494
}

1495 1496 1497
void TurboAssembler::MoveStringConstant(Register result,
                                        const StringConstantBase* string,
                                        RelocInfo::Mode rmode) {
1498
  movq_string(result, string);
1499 1500
}

1501 1502
void MacroAssembler::Drop(int stack_elements) {
  if (stack_elements > 0) {
1503
    addq(rsp, Immediate(stack_elements * kSystemPointerSize));
1504 1505 1506 1507
  }
}


1508 1509
void MacroAssembler::DropUnderReturnAddress(int stack_elements,
                                            Register scratch) {
1510
  DCHECK_GT(stack_elements, 0);
1511
  if (stack_elements == 1) {
1512 1513 1514 1515 1516 1517 1518 1519 1520
    popq(MemOperand(rsp, 0));
    return;
  }

  PopReturnAddressTo(scratch);
  Drop(stack_elements);
  PushReturnAddressFrom(scratch);
}

1521
void TurboAssembler::Push(Register src) { pushq(src); }
1522

1523
void TurboAssembler::Push(Operand src) { pushq(src); }
1524

1525
void MacroAssembler::PushQuad(Operand src) { pushq(src); }
1526

1527
void TurboAssembler::Push(Immediate value) { pushq(value); }
1528

1529
void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); }
1530

1531
void MacroAssembler::Pop(Register dst) { popq(dst); }
1532

1533
void MacroAssembler::Pop(Operand dst) { popq(dst); }
1534

1535
void MacroAssembler::PopQuad(Operand dst) { popq(dst); }
1536

1537
void TurboAssembler::Jump(ExternalReference ext) {
1538
  LoadAddress(kScratchRegister, ext);
1539 1540 1541
  jmp(kScratchRegister);
}

1542
void TurboAssembler::Jump(Operand op) { jmp(op); }
1543

1544
void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1545 1546
  Move(kScratchRegister, destination, rmode);
  jmp(kScratchRegister);
1547 1548
}

1549
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
1550
                          Condition cc) {
1551 1552 1553 1554 1555 1556
  DCHECK_IMPLIES(options().isolate_independent_code,
                 Builtins::IsIsolateIndependentBuiltin(*code_object));
  if (options().inline_offheap_trampolines) {
    int builtin_index = Builtins::kNoBuiltinId;
    if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
        Builtins::IsIsolateIndependent(builtin_index)) {
1557 1558 1559 1560 1561
      Label skip;
      if (cc != always) {
        if (cc == never) return;
        j(NegateCondition(cc), &skip, Label::kNear);
      }
1562 1563 1564 1565 1566 1567
      // Inline the trampoline.
      RecordCommentForOffHeapTrampoline(builtin_index);
      CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
      EmbeddedData d = EmbeddedData::FromBlob();
      Address entry = d.InstructionStartOfBuiltin(builtin_index);
      Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1568
      jmp(kScratchRegister);
1569
      bind(&skip);
1570 1571
      return;
    }
1572
  }
1573
  j(cc, code_object, rmode);
1574 1575
}

1576
void MacroAssembler::JumpToInstructionStream(Address entry) {
1577
  Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1578 1579 1580
  jmp(kOffHeapTrampolineRegister);
}

1581
void TurboAssembler::Call(ExternalReference ext) {
1582
  LoadAddress(kScratchRegister, ext);
1583 1584 1585
  call(kScratchRegister);
}

1586
void TurboAssembler::Call(Operand op) {
1587
  if (!CpuFeatures::IsSupported(ATOM)) {
1588 1589
    call(op);
  } else {
1590
    movq(kScratchRegister, op);
1591 1592 1593 1594
    call(kScratchRegister);
  }
}

1595
void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1596 1597
  Move(kScratchRegister, destination, rmode);
  call(kScratchRegister);
1598 1599
}

1600
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
  DCHECK_IMPLIES(options().isolate_independent_code,
                 Builtins::IsIsolateIndependentBuiltin(*code_object));
  if (options().inline_offheap_trampolines) {
    int builtin_index = Builtins::kNoBuiltinId;
    if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
        Builtins::IsIsolateIndependent(builtin_index)) {
      // Inline the trampoline.
      RecordCommentForOffHeapTrampoline(builtin_index);
      CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
      EmbeddedData d = EmbeddedData::FromBlob();
      Address entry = d.InstructionStartOfBuiltin(builtin_index);
      Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1613 1614 1615
      call(kScratchRegister);
      return;
    }
1616
  }
1617
  DCHECK(RelocInfo::IsCodeTarget(rmode));
1618
  call(code_object, rmode);
1619 1620
}

1621
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
1622 1623
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
  STATIC_ASSERT(kSmiShiftSize == 0);
1624 1625 1626
  STATIC_ASSERT(kSmiTagSize == 1);
  STATIC_ASSERT(kSmiTag == 0);

1627 1628 1629 1630 1631
  // The builtin_pointer register contains the builtin index as a Smi.
  // Untagging is folded into the indexing operand below (we use times_4 instead
  // of times_8 since smis are already shifted by one).
  Call(Operand(kRootRegister, builtin_pointer, times_4,
               IsolateData::builtin_entry_table_offset()));
1632
#else   // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
1633 1634 1635
  STATIC_ASSERT(kSmiShiftSize == 31);
  STATIC_ASSERT(kSmiTagSize == 1);
  STATIC_ASSERT(kSmiTag == 0);
1636 1637 1638 1639 1640

  // The builtin_pointer register contains the builtin index as a Smi.
  SmiUntag(builtin_pointer, builtin_pointer);
  Call(Operand(kRootRegister, builtin_pointer, times_8,
               IsolateData::builtin_entry_table_offset()));
1641
#endif  // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
1642 1643
}

1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
void TurboAssembler::LoadCodeObjectEntry(Register destination,
                                         Register code_object) {
  // Code objects are called differently depending on whether we are generating
  // builtin code (which will later be embedded into the binary) or compiling
  // user JS code at runtime.
  // * Builtin code runs in --jitless mode and thus must not call into on-heap
  //   Code targets. Instead, we dispatch through the builtins entry table.
  // * Codegen at runtime does not have this restriction and we can use the
  //   shorter, branchless instruction sequence. The assumption here is that
  //   targets are usually generated code and not builtin Code objects.

  if (options().isolate_independent_code) {
    DCHECK(root_array_available());
1657
    Label if_code_is_off_heap, out;
1658

1659 1660 1661 1662 1663 1664
    // Check whether the Code object is an off-heap trampoline. If so, call its
    // (off-heap) entry point directly without going through the (on-heap)
    // trampoline.  Otherwise, just call the Code object as always.
    testl(FieldOperand(code_object, Code::kFlagsOffset),
          Immediate(Code::IsOffHeapTrampoline::kMask));
    j(not_equal, &if_code_is_off_heap);
1665

1666
    // Not an off-heap trampoline, the entry point is at
1667 1668
    // Code::raw_instruction_start().
    Move(destination, code_object);
1669
    addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
1670 1671
    jmp(&out);

1672
    // An off-heap trampoline, the entry point is loaded from the builtin entry
1673
    // table.
1674
    bind(&if_code_is_off_heap);
1675
    movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
1676 1677 1678
    movq(destination,
         Operand(kRootRegister, destination, times_system_pointer_size,
                 IsolateData::builtin_entry_table_offset()));
1679 1680 1681 1682

    bind(&out);
  } else {
    Move(destination, code_object);
1683
    addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
  }
}

void TurboAssembler::CallCodeObject(Register code_object) {
  LoadCodeObjectEntry(code_object, code_object);
  call(code_object);
}

void TurboAssembler::JumpCodeObject(Register code_object) {
  LoadCodeObjectEntry(code_object, code_object);
  jmp(code_object);
}

1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
void TurboAssembler::RetpolineCall(Register reg) {
  Label setup_return, setup_target, inner_indirect_branch, capture_spec;

  jmp(&setup_return);  // Jump past the entire retpoline below.

  bind(&inner_indirect_branch);
  call(&setup_target);

  bind(&capture_spec);
  pause();
  jmp(&capture_spec);

  bind(&setup_target);
  movq(Operand(rsp, 0), reg);
  ret(0);

  bind(&setup_return);
  call(&inner_indirect_branch);  // Callee will return after this instruction.
}

void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
  Move(kScratchRegister, destination, rmode);
  RetpolineCall(kScratchRegister);
}

void TurboAssembler::RetpolineJump(Register reg) {
  Label setup_target, capture_spec;

  call(&setup_target);

  bind(&capture_spec);
  pause();
  jmp(&capture_spec);

  bind(&setup_target);
  movq(Operand(rsp, 0), reg);
  ret(0);
}

1736
void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
1737
  if (imm8 == 0) {
1738
    Movd(dst, src);
1739 1740 1741 1742 1743 1744 1745
    return;
  }
  if (CpuFeatures::IsSupported(SSE4_1)) {
    CpuFeatureScope sse_scope(this, SSE4_1);
    pextrd(dst, src, imm8);
    return;
  }
1746
  DCHECK_EQ(1, imm8);
1747 1748 1749 1750
  movq(dst, src);
  shrq(dst, Immediate(32));
}

1751
void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
1752 1753 1754 1755 1756
  if (CpuFeatures::IsSupported(SSE4_1)) {
    CpuFeatureScope sse_scope(this, SSE4_1);
    pinsrd(dst, src, imm8);
    return;
  }
1757
  Movd(kScratchDoubleReg, src);
1758
  if (imm8 == 1) {
1759
    punpckldq(dst, kScratchDoubleReg);
1760 1761
  } else {
    DCHECK_EQ(0, imm8);
1762
    Movss(dst, kScratchDoubleReg);
1763 1764 1765
  }
}

1766
void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
1767 1768 1769 1770 1771
  if (CpuFeatures::IsSupported(SSE4_1)) {
    CpuFeatureScope sse_scope(this, SSE4_1);
    pinsrd(dst, src, imm8);
    return;
  }
1772
  Movd(kScratchDoubleReg, src);
1773
  if (imm8 == 1) {
1774
    punpckldq(dst, kScratchDoubleReg);
1775 1776
  } else {
    DCHECK_EQ(0, imm8);
1777
    Movss(dst, kScratchDoubleReg);
1778 1779 1780
  }
}

1781
void TurboAssembler::Lzcntl(Register dst, Register src) {
1782 1783 1784 1785 1786
  if (CpuFeatures::IsSupported(LZCNT)) {
    CpuFeatureScope scope(this, LZCNT);
    lzcntl(dst, src);
    return;
  }
1787 1788 1789 1790 1791 1792 1793 1794
  Label not_zero_src;
  bsrl(dst, src);
  j(not_zero, &not_zero_src, Label::kNear);
  Set(dst, 63);  // 63^31 == 32
  bind(&not_zero_src);
  xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
}

1795
void TurboAssembler::Lzcntl(Register dst, Operand src) {
1796 1797 1798 1799 1800
  if (CpuFeatures::IsSupported(LZCNT)) {
    CpuFeatureScope scope(this, LZCNT);
    lzcntl(dst, src);
    return;
  }
1801 1802 1803 1804 1805 1806 1807 1808
  Label not_zero_src;
  bsrl(dst, src);
  j(not_zero, &not_zero_src, Label::kNear);
  Set(dst, 63);  // 63^31 == 32
  bind(&not_zero_src);
  xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
}

1809
void TurboAssembler::Lzcntq(Register dst, Register src) {
1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
  if (CpuFeatures::IsSupported(LZCNT)) {
    CpuFeatureScope scope(this, LZCNT);
    lzcntq(dst, src);
    return;
  }
  Label not_zero_src;
  bsrq(dst, src);
  j(not_zero, &not_zero_src, Label::kNear);
  Set(dst, 127);  // 127^63 == 64
  bind(&not_zero_src);
  xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
}

1823
void TurboAssembler::Lzcntq(Register dst, Operand src) {
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
  if (CpuFeatures::IsSupported(LZCNT)) {
    CpuFeatureScope scope(this, LZCNT);
    lzcntq(dst, src);
    return;
  }
  Label not_zero_src;
  bsrq(dst, src);
  j(not_zero, &not_zero_src, Label::kNear);
  Set(dst, 127);  // 127^63 == 64
  bind(&not_zero_src);
  xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
}

1837
void TurboAssembler::Tzcntq(Register dst, Register src) {
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
  if (CpuFeatures::IsSupported(BMI1)) {
    CpuFeatureScope scope(this, BMI1);
    tzcntq(dst, src);
    return;
  }
  Label not_zero_src;
  bsfq(dst, src);
  j(not_zero, &not_zero_src, Label::kNear);
  // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
  Set(dst, 64);
  bind(&not_zero_src);
}

1851
void TurboAssembler::Tzcntq(Register dst, Operand src) {
1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
  if (CpuFeatures::IsSupported(BMI1)) {
    CpuFeatureScope scope(this, BMI1);
    tzcntq(dst, src);
    return;
  }
  Label not_zero_src;
  bsfq(dst, src);
  j(not_zero, &not_zero_src, Label::kNear);
  // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
  Set(dst, 64);
  bind(&not_zero_src);
}

1865
void TurboAssembler::Tzcntl(Register dst, Register src) {
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
  if (CpuFeatures::IsSupported(BMI1)) {
    CpuFeatureScope scope(this, BMI1);
    tzcntl(dst, src);
    return;
  }
  Label not_zero_src;
  bsfl(dst, src);
  j(not_zero, &not_zero_src, Label::kNear);
  Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
  bind(&not_zero_src);
}

1878
void TurboAssembler::Tzcntl(Register dst, Operand src) {
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
  if (CpuFeatures::IsSupported(BMI1)) {
    CpuFeatureScope scope(this, BMI1);
    tzcntl(dst, src);
    return;
  }
  Label not_zero_src;
  bsfl(dst, src);
  j(not_zero, &not_zero_src, Label::kNear);
  Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
  bind(&not_zero_src);
}

1891
void TurboAssembler::Popcntl(Register dst, Register src) {
1892 1893 1894 1895 1896 1897 1898 1899
  if (CpuFeatures::IsSupported(POPCNT)) {
    CpuFeatureScope scope(this, POPCNT);
    popcntl(dst, src);
    return;
  }
  UNREACHABLE();
}

1900
void TurboAssembler::Popcntl(Register dst, Operand src) {
1901 1902 1903 1904 1905 1906 1907 1908
  if (CpuFeatures::IsSupported(POPCNT)) {
    CpuFeatureScope scope(this, POPCNT);
    popcntl(dst, src);
    return;
  }
  UNREACHABLE();
}

1909
void TurboAssembler::Popcntq(Register dst, Register src) {
1910 1911 1912 1913 1914 1915 1916 1917
  if (CpuFeatures::IsSupported(POPCNT)) {
    CpuFeatureScope scope(this, POPCNT);
    popcntq(dst, src);
    return;
  }
  UNREACHABLE();
}

1918
void TurboAssembler::Popcntq(Register dst, Operand src) {
1919 1920 1921 1922 1923 1924 1925 1926 1927
  if (CpuFeatures::IsSupported(POPCNT)) {
    CpuFeatureScope scope(this, POPCNT);
    popcntq(dst, src);
    return;
  }
  UNREACHABLE();
}


1928
void MacroAssembler::Pushad() {
1929 1930 1931 1932
  Push(rax);
  Push(rcx);
  Push(rdx);
  Push(rbx);
1933
  // Not pushing rsp or rbp.
1934 1935 1936 1937
  Push(rsi);
  Push(rdi);
  Push(r8);
  Push(r9);
1938
  // r10 is kScratchRegister.
1939
  Push(r11);
bmeurer's avatar
bmeurer committed
1940
  Push(r12);
1941
  // r13 is kRootRegister.
1942 1943
  Push(r14);
  Push(r15);
bmeurer's avatar
bmeurer committed
1944
  STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
1945
  // Use lea for symmetry with Popad.
1946 1947
  int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
                 kSystemPointerSize;
1948
  leaq(rsp, Operand(rsp, -sp_delta));
1949 1950 1951 1952
}


void MacroAssembler::Popad() {
1953
  // Popad must not change the flags, so use lea instead of addq.
1954 1955
  int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
                 kSystemPointerSize;
1956
  leaq(rsp, Operand(rsp, sp_delta));
1957 1958
  Pop(r15);
  Pop(r14);
bmeurer's avatar
bmeurer committed
1959
  Pop(r12);
1960 1961 1962 1963 1964 1965 1966 1967 1968
  Pop(r11);
  Pop(r9);
  Pop(r8);
  Pop(rdi);
  Pop(rsi);
  Pop(rbx);
  Pop(rdx);
  Pop(rcx);
  Pop(rax);
1969 1970 1971
}


1972
// Order general registers are pushed by Pushad:
1973
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1974 1975
const int
MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987
    0,
    1,
    2,
    3,
    -1,
    -1,
    4,
    5,
    6,
    7,
    -1,
    8,
1988
    9,
bmeurer's avatar
bmeurer committed
1989 1990 1991
    -1,
    10,
    11
1992 1993
};

1994
void MacroAssembler::PushStackHandler() {
1995
  // Adjust this code if not the case.
1996
  STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
1997 1998
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);

1999 2000
  Push(Immediate(0));  // Padding.

2001
  // Link the current handler as the next handler.
2002 2003
  ExternalReference handler_address =
      ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
2004
  Push(ExternalReferenceAsOperand(handler_address));
2005

2006
  // Set this new handler as the current one.
2007
  movq(ExternalReferenceAsOperand(handler_address), rsp);
lrn@chromium.org's avatar
lrn@chromium.org committed
2008 2009 2010
}


2011
void MacroAssembler::PopStackHandler() {
2012
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2013 2014
  ExternalReference handler_address =
      ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
2015
  Pop(ExternalReferenceAsOperand(handler_address));
2016
  addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
2017 2018
}

2019
void TurboAssembler::Ret() { ret(0); }
2020

2021
void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
2022 2023 2024
  if (is_uint16(bytes_dropped)) {
    ret(bytes_dropped);
  } else {
2025
    PopReturnAddressTo(scratch);
2026
    addq(rsp, Immediate(bytes_dropped));
2027
    PushReturnAddressFrom(scratch);
2028 2029 2030 2031
    ret(0);
  }
}

2032 2033 2034
void MacroAssembler::CmpObjectType(Register heap_object,
                                   InstanceType type,
                                   Register map) {
2035 2036
  LoadTaggedPointerField(map,
                         FieldOperand(heap_object, HeapObject::kMapOffset));
2037 2038 2039 2040 2041
  CmpInstanceType(map, type);
}


void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2042
  cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
2043 2044
}

2045
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
2046 2047
                               XMMRegister scratch, Label* lost_precision,
                               Label* is_nan, Label::Distance dst) {
2048
  Cvttsd2si(result_reg, input_reg);
2049 2050
  Cvtlsi2sd(kScratchDoubleReg, result_reg);
  Ucomisd(kScratchDoubleReg, input_reg);
2051 2052
  j(not_equal, lost_precision, dst);
  j(parity_even, is_nan, dst);  // NaN.
2053 2054 2055
}


2056 2057 2058
void MacroAssembler::AssertNotSmi(Register object) {
  if (emit_debug_code()) {
    Condition is_smi = CheckSmi(object);
2059
    Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
2060
  }
2061 2062 2063
}


2064 2065 2066
void MacroAssembler::AssertSmi(Register object) {
  if (emit_debug_code()) {
    Condition is_smi = CheckSmi(object);
2067
    Check(is_smi, AbortReason::kOperandIsNotASmi);
2068
  }
2069 2070
}

2071
void MacroAssembler::AssertSmi(Operand object) {
2072 2073
  if (emit_debug_code()) {
    Condition is_smi = CheckSmi(object);
2074
    Check(is_smi, AbortReason::kOperandIsNotASmi);
2075
  }
2076 2077
}

2078
void TurboAssembler::AssertZeroExtended(Register int32_register) {
2079
  if (emit_debug_code()) {
2080
    DCHECK_NE(int32_register, kScratchRegister);
2081
    movq(kScratchRegister, int64_t{0x0000000100000000});
2082
    cmpq(kScratchRegister, int32_register);
2083
    Check(above_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
2084
  }
2085 2086
}

2087 2088 2089 2090 2091
void MacroAssembler::AssertConstructor(Register object) {
  if (emit_debug_code()) {
    testb(object, Immediate(kSmiTagMask));
    Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
    Push(object);
2092 2093
    LoadTaggedPointerField(object,
                           FieldOperand(object, HeapObject::kMapOffset));
2094 2095 2096 2097 2098 2099
    testb(FieldOperand(object, Map::kBitFieldOffset),
          Immediate(Map::IsConstructorBit::kMask));
    Pop(object);
    Check(not_zero, AbortReason::kOperandIsNotAConstructor);
  }
}
2100

2101 2102 2103
void MacroAssembler::AssertFunction(Register object) {
  if (emit_debug_code()) {
    testb(object, Immediate(kSmiTagMask));
2104
    Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
2105 2106 2107
    Push(object);
    CmpObjectType(object, JS_FUNCTION_TYPE, object);
    Pop(object);
2108
    Check(equal, AbortReason::kOperandIsNotAFunction);
2109 2110 2111 2112
  }
}


2113 2114 2115
void MacroAssembler::AssertBoundFunction(Register object) {
  if (emit_debug_code()) {
    testb(object, Immediate(kSmiTagMask));
2116
    Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
2117 2118 2119
    Push(object);
    CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
    Pop(object);
2120
    Check(equal, AbortReason::kOperandIsNotABoundFunction);
2121 2122 2123
  }
}

2124
void MacroAssembler::AssertGeneratorObject(Register object) {
2125 2126
  if (!emit_debug_code()) return;
  testb(object, Immediate(kSmiTagMask));
2127
  Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2128 2129 2130 2131

  // Load map
  Register map = object;
  Push(object);
2132
  LoadTaggedPointerField(map, FieldOperand(object, HeapObject::kMapOffset));
2133

2134
  Label do_check;
2135 2136
  // Check if JSGeneratorObject
  CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
2137 2138 2139 2140
  j(equal, &do_check);

  // Check if JSAsyncFunctionObject
  CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE);
2141
  j(equal, &do_check);
2142 2143 2144 2145 2146 2147 2148

  // Check if JSAsyncGeneratorObject
  CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);

  bind(&do_check);
  // Restore generator object to register and perform assertion
  Pop(object);
2149
  Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
2150 2151
}

2152 2153 2154 2155 2156 2157 2158
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
  if (emit_debug_code()) {
    Label done_checking;
    AssertNotSmi(object);
    Cmp(object, isolate()->factory()->undefined_value());
    j(equal, &done_checking);
    Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
2159
    Assert(equal, AbortReason::kExpectedUndefinedOrCell);
2160 2161 2162 2163
    bind(&done_checking);
  }
}

2164
void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
2165
  cmpl(in_out, Immediate(kClearedWeakHeapObjectLower32));
2166 2167
  j(equal, target_if_cleared);

2168
  andq(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
2169 2170
}

2171
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2172
  DCHECK_GT(value, 0);
2173
  if (FLAG_native_code_counters && counter->Enabled()) {
2174
    Operand counter_operand =
2175
        ExternalReferenceAsOperand(ExternalReference::Create(counter));
2176 2177 2178
    // This operation has to be exactly 32-bit wide in case the external
    // reference table redirects the counter to a uint32_t dummy_stats_counter_
    // field.
2179
    if (value == 1) {
2180
      incl(counter_operand);
2181
    } else {
2182
      addl(counter_operand, Immediate(value));
2183 2184 2185 2186 2187 2188
    }
  }
}


void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2189
  DCHECK_GT(value, 0);
2190
  if (FLAG_native_code_counters && counter->Enabled()) {
2191
    Operand counter_operand =
2192
        ExternalReferenceAsOperand(ExternalReference::Create(counter));
2193 2194 2195
    // This operation has to be exactly 32-bit wide in case the external
    // reference table redirects the counter to a uint32_t dummy_stats_counter_
    // field.
2196
    if (value == 1) {
2197
      decl(counter_operand);
2198
    } else {
2199
      subl(counter_operand, Immediate(value));
2200 2201 2202 2203
    }
  }
}

2204 2205 2206 2207 2208
void MacroAssembler::MaybeDropFrames() {
  // Check whether we need to drop frames to restart a function on the stack.
  ExternalReference restart_fp =
      ExternalReference::debug_restart_fp_address(isolate());
  Load(rbx, restart_fp);
2209
  testq(rbx, rbx);
2210 2211 2212 2213 2214 2215

  Label dont_drop;
  j(zero, &dont_drop, Label::kNear);
  Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);

  bind(&dont_drop);
serya@chromium.org's avatar
serya@chromium.org committed
2216
}
2217

2218
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
2219
                                        Register caller_args_count_reg,
2220
                                        Register scratch0, Register scratch1) {
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
#if DEBUG
  if (callee_args_count.is_reg()) {
    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
                       scratch1));
  } else {
    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
  }
#endif

  // Calculate the destination address where we will put the return address
  // after we drop current frame.
  Register new_sp_reg = scratch0;
  if (callee_args_count.is_reg()) {
2234
    subq(caller_args_count_reg, callee_args_count.reg());
2235 2236 2237
    leaq(new_sp_reg,
         Operand(rbp, caller_args_count_reg, times_system_pointer_size,
                 StandardFrameConstants::kCallerPCOffset));
2238
  } else {
2239
    leaq(new_sp_reg,
2240
         Operand(rbp, caller_args_count_reg, times_system_pointer_size,
2241 2242
                 StandardFrameConstants::kCallerPCOffset -
                     callee_args_count.immediate() * kSystemPointerSize));
2243 2244 2245
  }

  if (FLAG_debug_code) {
2246
    cmpq(rsp, new_sp_reg);
2247
    Check(below, AbortReason::kStackAccessBelowStackPointer);
2248 2249 2250 2251 2252 2253
  }

  // Copy return address from caller's frame to current frame's return address
  // to avoid its trashing and let the following loop copy it to the right
  // place.
  Register tmp_reg = scratch1;
2254 2255
  movq(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
  movq(Operand(rsp, 0), tmp_reg);
2256 2257 2258

  // Restore caller's frame pointer now as it could be overwritten by
  // the copying loop.
2259
  movq(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2260 2261 2262 2263

  // +2 here is to copy both receiver and return address.
  Register count_reg = caller_args_count_reg;
  if (callee_args_count.is_reg()) {
2264
    leaq(count_reg, Operand(callee_args_count.reg(), 2));
2265
  } else {
2266
    movq(count_reg, Immediate(callee_args_count.immediate() + 2));
2267 2268 2269 2270 2271 2272 2273 2274
    // TODO(ishell): Unroll copying loop for small immediate values.
  }

  // Now copy callee arguments to the caller frame going backwards to avoid
  // callee arguments corruption (source and destination areas could overlap).
  Label loop, entry;
  jmp(&entry, Label::kNear);
  bind(&loop);
2275
  decq(count_reg);
2276 2277
  movq(tmp_reg, Operand(rsp, count_reg, times_system_pointer_size, 0));
  movq(Operand(new_sp_reg, count_reg, times_system_pointer_size, 0), tmp_reg);
2278
  bind(&entry);
2279
  cmpq(count_reg, Immediate(0));
2280 2281 2282
  j(not_equal, &loop, Label::kNear);

  // Leave current frame.
2283
  movq(rsp, new_sp_reg);
2284
}
2285

2286
void MacroAssembler::InvokeFunction(Register function, Register new_target,
2287
                                    const ParameterCount& actual,
2288
                                    InvokeFlag flag) {
2289 2290
  LoadTaggedPointerField(
      rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2291
  movzxwq(rbx,
2292
          FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
2293 2294

  ParameterCount expected(rbx);
2295
  InvokeFunction(function, new_target, expected, actual, flag);
2296 2297
}

2298
void MacroAssembler::InvokeFunction(Register function, Register new_target,
2299 2300
                                    const ParameterCount& expected,
                                    const ParameterCount& actual,
2301
                                    InvokeFlag flag) {
2302
  DCHECK(function == rdi);
2303 2304
  LoadTaggedPointerField(rsi,
                         FieldOperand(function, JSFunction::kContextOffset));
2305
  InvokeFunctionCode(rdi, new_target, expected, actual, flag);
2306 2307
}

2308 2309 2310
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
                                        const ParameterCount& expected,
                                        const ParameterCount& actual,
2311
                                        InvokeFlag flag) {
2312
  // You can't call a function without a valid frame.
2313
  DCHECK(flag == JUMP_FUNCTION || has_frame());
2314 2315
  DCHECK(function == rdi);
  DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
2316

2317 2318
  // On function call, call into the debugger if necessary.
  CheckDebugHook(function, new_target, expected, actual);
2319 2320

  // Clear the new.target register if not given.
2321
  if (!new_target.is_valid()) {
2322
    LoadRoot(rdx, RootIndex::kUndefinedValue);
2323 2324
  }

2325
  Label done;
2326
  bool definitely_mismatches = false;
2327 2328
  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
                 Label::kNear);
2329
  if (!definitely_mismatches) {
2330 2331 2332
    // We call indirectly through the code field in the function to
    // allow recompilation to take effect without changing any of the
    // call sites.
2333
    static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
2334 2335
    LoadTaggedPointerField(rcx,
                           FieldOperand(function, JSFunction::kCodeOffset));
2336
    if (flag == CALL_FUNCTION) {
2337
      CallCodeObject(rcx);
2338
    } else {
2339
      DCHECK(flag == JUMP_FUNCTION);
2340
      JumpCodeObject(rcx);
2341 2342
    }
    bind(&done);
2343 2344 2345
  }
}

2346
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2347
                                    const ParameterCount& actual, Label* done,
2348
                                    bool* definitely_mismatches,
2349
                                    InvokeFlag flag,
2350
                                    Label::Distance near_jump) {
2351
  bool definitely_matches = false;
2352
  *definitely_mismatches = false;
2353 2354
  Label invoke;
  if (expected.is_immediate()) {
2355
    DCHECK(actual.is_immediate());
2356
    Set(rax, actual.immediate());
2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
    if (expected.immediate() == actual.immediate()) {
      definitely_matches = true;
    } else {
      if (expected.immediate() ==
              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
        // Don't worry about adapting arguments for built-ins that
        // don't want that done. Skip adaption code by making it look
        // like we have a match between expected and actual number of
        // arguments.
        definitely_matches = true;
      } else {
2368
        *definitely_mismatches = true;
2369 2370 2371 2372 2373 2374 2375 2376
        Set(rbx, expected.immediate());
      }
    }
  } else {
    if (actual.is_immediate()) {
      // Expected is in register, actual is immediate. This is the
      // case when we invoke function values without going through the
      // IC mechanism.
2377
      Set(rax, actual.immediate());
2378
      cmpq(expected.reg(), Immediate(actual.immediate()));
2379
      j(equal, &invoke, Label::kNear);
2380 2381
      DCHECK(expected.reg() == rbx);
    } else if (expected.reg() != actual.reg()) {
2382 2383
      // Both expected and actual are in (different) registers. This
      // is the case when we invoke functions using call and apply.
2384
      cmpq(expected.reg(), actual.reg());
2385
      j(equal, &invoke, Label::kNear);
2386 2387
      DCHECK(actual.reg() == rax);
      DCHECK(expected.reg() == rbx);
2388
    } else {
2389
      definitely_matches = true;
2390
      Move(rax, actual.reg());
2391 2392 2393 2394
    }
  }

  if (!definitely_matches) {
2395
    Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
2396 2397
    if (flag == CALL_FUNCTION) {
      Call(adaptor, RelocInfo::CODE_TARGET);
2398 2399 2400
      if (!*definitely_mismatches) {
        jmp(done, near_jump);
      }
2401 2402 2403 2404 2405 2406 2407
    } else {
      Jump(adaptor, RelocInfo::CODE_TARGET);
    }
    bind(&invoke);
  }
}

2408 2409 2410
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
                                    const ParameterCount& expected,
                                    const ParameterCount& actual) {
2411
  Label skip_hook;
2412 2413
  ExternalReference debug_hook_active =
      ExternalReference::debug_hook_on_function_call_address(isolate());
2414 2415
  Operand debug_hook_active_operand =
      ExternalReferenceAsOperand(debug_hook_active);
2416
  cmpb(debug_hook_active_operand, Immediate(0));
2417 2418
  j(equal, &skip_hook);

2419 2420 2421 2422
  {
    FrameScope frame(this,
                     has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
    if (expected.is_reg()) {
2423
      SmiTag(expected.reg(), expected.reg());
2424 2425 2426
      Push(expected.reg());
    }
    if (actual.is_reg()) {
2427
      SmiTag(actual.reg(), actual.reg());
2428
      Push(actual.reg());
2429
      SmiUntag(actual.reg(), actual.reg());
2430 2431 2432 2433 2434 2435
    }
    if (new_target.is_valid()) {
      Push(new_target);
    }
    Push(fun);
    Push(fun);
2436
    Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand());
2437
    CallRuntime(Runtime::kDebugOnFunctionCall);
2438 2439 2440 2441 2442 2443
    Pop(fun);
    if (new_target.is_valid()) {
      Pop(new_target);
    }
    if (actual.is_reg()) {
      Pop(actual.reg());
2444
      SmiUntag(actual.reg(), actual.reg());
2445 2446 2447
    }
    if (expected.is_reg()) {
      Pop(expected.reg());
2448
      SmiUntag(expected.reg(), expected.reg());
2449 2450
    }
  }
2451
  bind(&skip_hook);
2452 2453
}

2454
void TurboAssembler::StubPrologue(StackFrame::Type type) {
2455
  pushq(rbp);  // Caller's frame pointer.
2456
  movq(rbp, rsp);
2457
  Push(Immediate(StackFrame::TypeToMarker(type)));
2458 2459
}

2460 2461
void TurboAssembler::Prologue() {
  pushq(rbp);  // Caller's frame pointer.
2462
  movq(rbp, rsp);
2463 2464
  Push(rsi);  // Callee's context.
  Push(rdi);  // Callee's JS function.
2465 2466
}

2467
void TurboAssembler::EnterFrame(StackFrame::Type type) {
2468
  pushq(rbp);
2469
  movq(rbp, rsp);
2470
  Push(Immediate(StackFrame::TypeToMarker(type)));
2471 2472
}

2473
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
2474
  if (emit_debug_code()) {
2475
    cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
2476
         Immediate(StackFrame::TypeToMarker(type)));
2477
    Check(equal, AbortReason::kStackFrameTypesMustMatch);
2478
  }
2479
  movq(rsp, rbp);
2480
  popq(rbp);
2481 2482
}

2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514
#ifdef V8_OS_WIN
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
  // In windows, we cannot increment the stack size by more than one page
  // (minimum page size is 4KB) without accessing at least one byte on the
  // page. Check this:
  // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
  Label check_offset;
  Label touch_next_page;
  jmp(&check_offset);
  bind(&touch_next_page);
  subq(rsp, Immediate(kStackPageSize));
  // Just to touch the page, before we increment further.
  movb(Operand(rsp, 0), Immediate(0));
  subq(bytes_scratch, Immediate(kStackPageSize));

  bind(&check_offset);
  cmpq(bytes_scratch, Immediate(kStackPageSize));
  j(greater, &touch_next_page);

  subq(rsp, bytes_scratch);
}

void TurboAssembler::AllocateStackSpace(int bytes) {
  while (bytes > kStackPageSize) {
    subq(rsp, Immediate(kStackPageSize));
    movb(Operand(rsp, 0), Immediate(0));
    bytes -= kStackPageSize;
  }
  subq(rsp, Immediate(bytes));
}
#endif

2515 2516 2517 2518
void MacroAssembler::EnterExitFramePrologue(bool save_rax,
                                            StackFrame::Type frame_type) {
  DCHECK(frame_type == StackFrame::EXIT ||
         frame_type == StackFrame::BUILTIN_EXIT);
2519

2520
  // Set up the frame structure on the stack.
2521
  // All constants are relative to the frame pointer of the exit frame.
2522 2523 2524
  DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
            ExitFrameConstants::kCallerSPDisplacement);
  DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
2525
  DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
2526
  pushq(rbp);
2527
  movq(rbp, rsp);
2528

2529
  // Reserve room for entry stack pointer.
2530
  Push(Immediate(StackFrame::TypeToMarker(frame_type)));
2531
  DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
2532
  Push(Immediate(0));  // Saved entry sp, patched before call.
2533 2534

  // Save the frame pointer and the context in top.
2535
  if (save_rax) {
2536
    movq(r14, rax);  // Backup rax in callee-save register.
2537
  }
2538

2539 2540 2541 2542 2543 2544 2545 2546
  Store(
      ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()),
      rbp);
  Store(ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()),
        rsi);
  Store(
      ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate()),
      rbx);
2547
}
2548

2549

2550 2551
void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
                                            bool save_doubles) {
2552
#ifdef _WIN64
2553 2554
  const int kShadowSpace = 4;
  arg_stack_space += kShadowSpace;
2555
#endif
2556 2557
  // Optionally save all XMM registers.
  if (save_doubles) {
2558
    int space = XMMRegister::kNumRegisters * kDoubleSize +
2559
                arg_stack_space * kSystemPointerSize;
2560
    AllocateStackSpace(space);
2561
    int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2562
    const RegisterConfiguration* config = RegisterConfiguration::Default();
2563 2564 2565
    for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
      DoubleRegister reg =
          DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2566
      Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
2567 2568
    }
  } else if (arg_stack_space > 0) {
2569
    AllocateStackSpace(arg_stack_space * kSystemPointerSize);
2570
  }
2571

2572
  // Get the required frame alignment for the OS.
2573
  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
2574
  if (kFrameAlignment > 0) {
2575
    DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
2576
    DCHECK(is_int8(kFrameAlignment));
2577
    andq(rsp, Immediate(-kFrameAlignment));
2578 2579 2580
  }

  // Patch the saved entry sp.
2581
  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
2582 2583
}

2584 2585 2586
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
                                    StackFrame::Type frame_type) {
  EnterExitFramePrologue(true, frame_type);
2587

2588
  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
2589
  // so it must be retained across the C-call.
2590
  int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
2591
  leaq(r15, Operand(rbp, r14, times_system_pointer_size, offset));
2592

2593
  EnterExitFrameEpilogue(arg_stack_space, save_doubles);
2594 2595 2596
}


2597
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
2598
  EnterExitFramePrologue(false, StackFrame::EXIT);
2599
  EnterExitFrameEpilogue(arg_stack_space, false);
2600 2601 2602
}


2603
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
2604
  // Registers:
2605
  // r15 : argv
2606
  if (save_doubles) {
2607
    int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2608
    const RegisterConfiguration* config = RegisterConfiguration::Default();
2609 2610 2611
    for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
      DoubleRegister reg =
          DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2612
      Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
2613 2614
    }
  }
2615

2616 2617
  if (pop_arguments) {
    // Get the return address from the stack and restore the frame pointer.
2618 2619
    movq(rcx, Operand(rbp, kFPOnStackSize));
    movq(rbp, Operand(rbp, 0 * kSystemPointerSize));
2620

2621 2622
    // Drop everything up to and including the arguments and the receiver
    // from the caller stack.
2623
    leaq(rsp, Operand(r15, 1 * kSystemPointerSize));
2624 2625 2626 2627 2628 2629

    PushReturnAddressFrom(rcx);
  } else {
    // Otherwise just leave the exit frame.
    leave();
  }
2630

2631
  LeaveExitFrameEpilogue();
2632 2633
}

2634
void MacroAssembler::LeaveApiExitFrame() {
2635
  movq(rsp, rbp);
2636
  popq(rbp);
2637

2638
  LeaveExitFrameEpilogue();
2639 2640
}

2641
void MacroAssembler::LeaveExitFrameEpilogue() {
2642
  // Restore current context from top and clear it in debug mode.
2643 2644
  ExternalReference context_address =
      ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
2645
  Operand context_operand = ExternalReferenceAsOperand(context_address);
2646
  movq(rsi, context_operand);
2647
#ifdef DEBUG
2648
  movq(context_operand, Immediate(Context::kInvalidContext));
2649 2650 2651
#endif

  // Clear the top frame.
2652 2653
  ExternalReference c_entry_fp_address =
      ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
2654
  Operand c_entry_fp_operand = ExternalReferenceAsOperand(c_entry_fp_address);
2655
  movq(c_entry_fp_operand, Immediate(0));
2656 2657 2658
}


2659 2660 2661 2662 2663
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
static const int kRegisterPassedArguments = 6;
#endif
2664

2665 2666

void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2667 2668
  LoadTaggedPointerField(dst, NativeContextOperand());
  LoadTaggedPointerField(dst, ContextOperand(dst, index));
2669 2670 2671
}


2672
int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2673 2674 2675 2676 2677 2678
  // On Windows 64 stack slots are reserved by the caller for all arguments
  // including the ones passed in registers, and space is always allocated for
  // the four register arguments even if the function takes fewer than four
  // arguments.
  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
  // and the caller does not reserve stack slots for them.
2679
  DCHECK_GE(num_arguments, 0);
2680
#ifdef _WIN64
2681
  const int kMinimumStackSlots = kRegisterPassedArguments;
2682 2683
  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
  return num_arguments;
2684
#else
2685 2686
  if (num_arguments < kRegisterPassedArguments) return 0;
  return num_arguments - kRegisterPassedArguments;
2687 2688 2689
#endif
}

2690
void TurboAssembler::PrepareCallCFunction(int num_arguments) {
2691
  int frame_alignment = base::OS::ActivationFrameAlignment();
2692 2693
  DCHECK_NE(frame_alignment, 0);
  DCHECK_GE(num_arguments, 0);
2694

2695
  // Make stack end at alignment and allocate space for arguments and old rsp.
2696
  movq(kScratchRegister, rsp);
2697
  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2698 2699
  int argument_slots_on_stack =
      ArgumentStackSlotsForCFunctionCall(num_arguments);
2700
  AllocateStackSpace((argument_slots_on_stack + 1) * kSystemPointerSize);
2701
  andq(rsp, Immediate(-frame_alignment));
2702 2703
  movq(Operand(rsp, argument_slots_on_stack * kSystemPointerSize),
       kScratchRegister);
2704 2705
}

2706
void TurboAssembler::CallCFunction(ExternalReference function,
2707
                                   int num_arguments) {
2708
  LoadAddress(rax, function);
2709 2710 2711
  CallCFunction(rax, num_arguments);
}

2712
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
2713
  DCHECK_LE(num_arguments, kMaxCParameters);
2714
  DCHECK(has_frame());
2715
  // Check stack alignment.
2716
  if (emit_debug_code()) {
2717 2718 2719
    CheckStackAlignment();
  }

2720 2721 2722 2723 2724 2725 2726
  // Save the frame pointer and PC so that the stack layout remains iterable,
  // even without an ExitFrame which normally exists between JS and C frames.
  if (isolate() != nullptr) {
    Label get_pc;
    DCHECK(!AreAliased(kScratchRegister, function));
    leaq(kScratchRegister, Operand(&get_pc, 0));
    bind(&get_pc);
2727
    movq(ExternalReferenceAsOperand(
2728 2729
             ExternalReference::fast_c_call_caller_pc_address(isolate())),
         kScratchRegister);
2730
    movq(ExternalReferenceAsOperand(
2731 2732 2733 2734
             ExternalReference::fast_c_call_caller_fp_address(isolate())),
         rbp);
  }

2735
  call(function);
2736 2737 2738

  if (isolate() != nullptr) {
    // We don't unset the PC; the FP is the source of truth.
2739
    movq(ExternalReferenceAsOperand(
2740 2741 2742 2743
             ExternalReference::fast_c_call_caller_fp_address(isolate())),
         Immediate(0));
  }

2744 2745
  DCHECK_NE(base::OS::ActivationFrameAlignment(), 0);
  DCHECK_GE(num_arguments, 0);
2746 2747
  int argument_slots_on_stack =
      ArgumentStackSlotsForCFunctionCall(num_arguments);
2748
  movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize));
2749 2750
}

2751 2752 2753
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
                                   Condition cc, Label* condition_met,
                                   Label::Distance condition_met_distance) {
2754
  DCHECK(cc == zero || cc == not_zero);
2755
  if (scratch == object) {
2756
    andq(scratch, Immediate(~kPageAlignmentMask));
2757
  } else {
2758
    movq(scratch, Immediate(~kPageAlignmentMask));
2759
    andq(scratch, object);
2760 2761 2762 2763 2764 2765 2766 2767 2768
  }
  if (mask < (1 << kBitsPerByte)) {
    testb(Operand(scratch, MemoryChunk::kFlagsOffset),
          Immediate(static_cast<uint8_t>(mask)));
  } else {
    testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
  }
  j(cc, condition_met, condition_met_distance);
}
2769

2770 2771 2772 2773
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
  Label current;
  bind(&current);
  int pc = pc_offset();
2774
  // Load effective address to get the address of the current instruction.
2775
  leaq(dst, Operand(&current, -pc));
2776 2777
}

2778
void TurboAssembler::ResetSpeculationPoisonRegister() {
2779
  // TODO(tebbi): Perhaps, we want to put an lfence here.
2780 2781 2782
  Set(kSpeculationPoisonRegister, -1);
}

2783 2784 2785
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
  NoRootArrayScope no_root_array(this);
  // Save the deopt id in r13 (we don't need the roots array from now on).
2786
  movq(r13, Immediate(deopt_id));
2787 2788 2789
  call(target, RelocInfo::RUNTIME_ENTRY);
}

2790 2791
}  // namespace internal
}  // namespace v8
2792 2793

#endif  // V8_TARGET_ARCH_X64