macro-assembler-ia32.cc 104 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/v8.h"
6

7
#if V8_TARGET_ARCH_IA32
8

9
#include "src/base/bits.h"
10
#include "src/base/division-by-constant.h"
11 12 13 14
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
15
#include "src/runtime/runtime.h"
16

17 18
namespace v8 {
namespace internal {
19

20 21 22
// -------------------------------------------------------------------------
// MacroAssembler implementation.

23 24
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
    : Assembler(arg_isolate, buffer, size),
25
      generating_stub_(false),
26
      has_frame_(false) {
27
  if (isolate() != NULL) {
28
    // TODO(titzer): should we just use a null handle here instead?
29 30 31
    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                  isolate());
  }
32 33 34
}


35
void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
36
  DCHECK(!r.IsDouble());
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
  if (r.IsInteger8()) {
    movsx_b(dst, src);
  } else if (r.IsUInteger8()) {
    movzx_b(dst, src);
  } else if (r.IsInteger16()) {
    movsx_w(dst, src);
  } else if (r.IsUInteger16()) {
    movzx_w(dst, src);
  } else {
    mov(dst, src);
  }
}


void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
52
  DCHECK(!r.IsDouble());
53 54 55 56 57
  if (r.IsInteger8() || r.IsUInteger8()) {
    mov_b(dst, src);
  } else if (r.IsInteger16() || r.IsUInteger16()) {
    mov_w(dst, src);
  } else {
58 59 60 61 62
    if (r.IsHeapObject()) {
      AssertNotSmi(src);
    } else if (r.IsSmi()) {
      AssertSmi(src);
    }
63 64 65 66 67
    mov(dst, src);
  }
}


68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
  if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
    Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
    mov(destination, value);
    return;
  }
  ExternalReference roots_array_start =
      ExternalReference::roots_array_start(isolate());
  mov(destination, Immediate(index));
  mov(destination, Operand::StaticArray(destination,
                                        times_pointer_size,
                                        roots_array_start));
}


void MacroAssembler::StoreRoot(Register source,
                               Register scratch,
                               Heap::RootListIndex index) {
86
  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
  ExternalReference roots_array_start =
      ExternalReference::roots_array_start(isolate());
  mov(scratch, Immediate(index));
  mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
      source);
}


void MacroAssembler::CompareRoot(Register with,
                                 Register scratch,
                                 Heap::RootListIndex index) {
  ExternalReference roots_array_start =
      ExternalReference::roots_array_start(isolate());
  mov(scratch, Immediate(index));
  cmp(with, Operand::StaticArray(scratch,
                                times_pointer_size,
                                roots_array_start));
}


void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
108
  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
109 110 111 112 113 114 115
  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
  cmp(with, value);
}


void MacroAssembler::CompareRoot(const Operand& with,
                                 Heap::RootListIndex index) {
116
  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
117 118 119 120 121
  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
  cmp(with, value);
}


122 123 124 125 126 127
void MacroAssembler::InNewSpace(
    Register object,
    Register scratch,
    Condition cc,
    Label* condition_met,
    Label::Distance condition_met_distance) {
128
  DCHECK(cc == equal || cc == not_equal);
129 130 131 132
  if (scratch.is(object)) {
    and_(scratch, Immediate(~Page::kPageAlignmentMask));
  } else {
    mov(scratch, Immediate(~Page::kPageAlignmentMask));
133
    and_(scratch, object);
134
  }
135
  // Check that we can use a test_b.
136 137
  DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
  DCHECK(MemoryChunk::IN_TO_SPACE < 8);
138 139 140 141 142 143 144
  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
           | (1 << MemoryChunk::IN_TO_SPACE);
  // If non-zero, the page belongs to new-space.
  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
         static_cast<uint8_t>(mask));
  j(cc, condition_met, condition_met_distance);
}
145

146

147
void MacroAssembler::RememberedSetHelper(
148
    Register object,  // Only used for debug checks.
149 150 151 152 153
    Register addr,
    Register scratch,
    SaveFPRegsMode save_fp,
    MacroAssembler::RememberedSetFinalAction and_then) {
  Label done;
154
  if (emit_debug_code()) {
155 156 157 158 159
    Label ok;
    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
    int3();
    bind(&ok);
  }
160 161 162 163 164 165 166
  // Load store buffer top.
  ExternalReference store_buffer =
      ExternalReference::store_buffer_top(isolate());
  mov(scratch, Operand::StaticVariable(store_buffer));
  // Store pointer to buffer.
  mov(Operand(scratch, 0), addr);
  // Increment buffer top.
167
  add(scratch, Immediate(kPointerSize));
168 169 170 171 172 173 174 175 176 177 178
  // Write back new top of buffer.
  mov(Operand::StaticVariable(store_buffer), scratch);
  // Call stub on end of buffer.
  // Check for end of buffer.
  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
  if (and_then == kReturnAtEnd) {
    Label buffer_overflowed;
    j(not_equal, &buffer_overflowed, Label::kNear);
    ret(0);
    bind(&buffer_overflowed);
  } else {
179
    DCHECK(and_then == kFallThroughAtEnd);
180 181
    j(equal, &done, Label::kNear);
  }
182
  StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
183 184 185 186
  CallStub(&store_buffer_overflow);
  if (and_then == kReturnAtEnd) {
    ret(0);
  } else {
187
    DCHECK(and_then == kFallThroughAtEnd);
188 189
    bind(&done);
  }
190 191 192
}


193 194 195 196
void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
                                        XMMRegister scratch_reg,
                                        Register result_reg) {
  Label done;
197
  Label conv_failure;
198
  xorps(scratch_reg, scratch_reg);
199
  cvtsd2si(result_reg, input_reg);
200 201
  test(result_reg, Immediate(0xFFFFFF00));
  j(zero, &done, Label::kNear);
202 203
  cmp(result_reg, Immediate(0x1));
  j(overflow, &conv_failure, Label::kNear);
204
  mov(result_reg, Immediate(0));
205
  setcc(sign, result_reg);
206 207 208 209
  sub(result_reg, Immediate(1));
  and_(result_reg, Immediate(255));
  jmp(&done, Label::kNear);
  bind(&conv_failure);
210
  Move(result_reg, Immediate(0));
211 212
  ucomisd(input_reg, scratch_reg);
  j(below, &done, Label::kNear);
213
  Move(result_reg, Immediate(255));
214 215 216 217 218 219 220 221 222 223 224 225 226 227
  bind(&done);
}


void MacroAssembler::ClampUint8(Register reg) {
  Label done;
  test(reg, Immediate(0xFFFFFF00));
  j(zero, &done, Label::kNear);
  setcc(negative, reg);  // 1 if negative, 0 if positive.
  dec_b(reg);  // 0 if negative, 255 if positive.
  bind(&done);
}


228 229 230
void MacroAssembler::SlowTruncateToI(Register result_reg,
                                     Register input_reg,
                                     int offset) {
231
  DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
232
  call(stub.GetCode(), RelocInfo::CODE_TARGET);
233 234 235 236 237 238 239
}


void MacroAssembler::TruncateDoubleToI(Register result_reg,
                                       XMMRegister input_reg) {
  Label done;
  cvttsd2si(result_reg, Operand(input_reg));
240 241
  cmp(result_reg, 0x1);
  j(no_overflow, &done, Label::kNear);
242 243

  sub(esp, Immediate(kDoubleSize));
244
  movsd(MemOperand(esp, 0), input_reg);
245 246 247 248 249 250
  SlowTruncateToI(result_reg, esp, 0);
  add(esp, Immediate(kDoubleSize));
  bind(&done);
}


251
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
252 253
                               XMMRegister scratch,
                               MinusZeroMode minus_zero_mode,
254 255
                               Label* lost_precision, Label* is_nan,
                               Label* minus_zero, Label::Distance dst) {
256
  DCHECK(!input_reg.is(scratch));
257
  cvttsd2si(result_reg, Operand(input_reg));
258
  Cvtsi2sd(scratch, Operand(result_reg));
259
  ucomisd(scratch, input_reg);
260 261
  j(not_equal, lost_precision, dst);
  j(parity_even, is_nan, dst);
262
  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
263 264 265
    Label done;
    // The integer converted back is equal to the original. We
    // only have to test if we got -0 as an input.
266 267 268
    test(result_reg, Operand(result_reg));
    j(not_zero, &done, Label::kNear);
    movmskpd(result_reg, input_reg);
269 270
    // Bit 0 contains the sign of the double in input_reg.
    // If input was positive, we are ok and return 0, otherwise
271
    // jump to minus_zero.
272
    and_(result_reg, 1);
273
    j(not_zero, minus_zero, dst);
274
    bind(&done);
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
  }
}


void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
                                           Register input_reg) {
  Label done, slow_case;

  if (CpuFeatures::IsSupported(SSE3)) {
    CpuFeatureScope scope(this, SSE3);
    Label convert;
    // Use more powerful conversion when sse3 is available.
    // Load x87 register with heap number.
    fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
    // Get exponent alone and check for too-big exponent.
    mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
    and_(result_reg, HeapNumber::kExponentMask);
    const uint32_t kTooBigExponent =
        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
    cmp(Operand(result_reg), Immediate(kTooBigExponent));
    j(greater_equal, &slow_case, Label::kNear);

    // Reserve space for 64 bit answer.
    sub(Operand(esp), Immediate(kDoubleSize));
    // Do conversion, which cannot fail because we checked the exponent.
    fisttp_d(Operand(esp, 0));
    mov(result_reg, Operand(esp, 0));  // Low word of answer is the result.
    add(Operand(esp), Immediate(kDoubleSize));
    jmp(&done, Label::kNear);

    // Slow case.
    bind(&slow_case);
    if (input_reg.is(result_reg)) {
      // Input is clobbered. Restore number from fpu stack
      sub(Operand(esp), Immediate(kDoubleSize));
      fstp_d(Operand(esp, 0));
      SlowTruncateToI(result_reg, esp, 0);
      add(esp, Immediate(kDoubleSize));
    } else {
      fstp(0);
      SlowTruncateToI(result_reg, input_reg);
    }
317
  } else {
318
    movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
319
    cvttsd2si(result_reg, Operand(xmm0));
320 321
    cmp(result_reg, 0x1);
    j(no_overflow, &done, Label::kNear);
322 323 324 325 326 327 328 329 330 331 332 333 334
    // Check if the input was 0x8000000 (kMinInt).
    // If no, then we got an overflow and we deoptimize.
    ExternalReference min_int = ExternalReference::address_of_min_int();
    ucomisd(xmm0, Operand::StaticVariable(min_int));
    j(not_equal, &slow_case, Label::kNear);
    j(parity_even, &slow_case, Label::kNear);  // NaN.
    jmp(&done, Label::kNear);

    // Slow case.
    bind(&slow_case);
    if (input_reg.is(result_reg)) {
      // Input is clobbered. Restore number from double scratch.
      sub(esp, Immediate(kDoubleSize));
335
      movsd(MemOperand(esp, 0), xmm0);
336 337 338 339 340 341 342 343 344 345
      SlowTruncateToI(result_reg, esp, 0);
      add(esp, Immediate(kDoubleSize));
    } else {
      SlowTruncateToI(result_reg, input_reg);
    }
  }
  bind(&done);
}


346
void MacroAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
347 348
  Label done;
  cmp(src, Immediate(0));
349
  ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
350
  Cvtsi2sd(dst, src);
351
  j(not_sign, &done, Label::kNear);
352
  addsd(dst, Operand::StaticVariable(uint32_bias));
353 354 355 356
  bind(&done);
}


357 358 359 360 361 362 363 364
void MacroAssembler::RecordWriteArray(
    Register object,
    Register value,
    Register index,
    SaveFPRegsMode save_fp,
    RememberedSetAction remembered_set_action,
    SmiCheck smi_check,
    PointersToHereCheck pointers_to_here_check_for_value) {
365 366 367 368 369 370
  // First, check if a write barrier is even needed. The tests below
  // catch stores of Smis.
  Label done;

  // Skip barrier if writing a smi.
  if (smi_check == INLINE_SMI_CHECK) {
371
    DCHECK_EQ(0, kSmiTag);
372 373 374 375 376 377 378 379 380 381 382
    test(value, Immediate(kSmiTagMask));
    j(zero, &done);
  }

  // Array access: calculate the destination address in the same manner as
  // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
  // into an array of words.
  Register dst = index;
  lea(dst, Operand(object, index, times_half_pointer_size,
                   FixedArray::kHeaderSize - kHeapObjectTag));

383 384
  RecordWrite(object, dst, value, save_fp, remembered_set_action,
              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
385 386 387 388 389 390

  bind(&done);

  // Clobber clobbered input registers when running with the debug-code flag
  // turned on to provoke errors.
  if (emit_debug_code()) {
391 392
    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
    mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
393 394 395 396
  }
}


397 398 399 400 401 402 403
void MacroAssembler::RecordWriteField(
    Register object,
    int offset,
    Register value,
    Register dst,
    SaveFPRegsMode save_fp,
    RememberedSetAction remembered_set_action,
404 405
    SmiCheck smi_check,
    PointersToHereCheck pointers_to_here_check_for_value) {
406
  // First, check if a write barrier is even needed. The tests below
407
  // catch stores of Smis.
408
  Label done;
409

410
  // Skip barrier if writing a smi.
411 412 413
  if (smi_check == INLINE_SMI_CHECK) {
    JumpIfSmi(value, &done, Label::kNear);
  }
414

415 416
  // Although the object register is tagged, the offset is relative to the start
  // of the object, so so offset must be a multiple of kPointerSize.
417
  DCHECK(IsAligned(offset, kPointerSize));
418

419 420 421
  lea(dst, FieldOperand(object, offset));
  if (emit_debug_code()) {
    Label ok;
422
    test_b(dst, (1 << kPointerSizeLog2) - 1);
423 424 425
    j(zero, &ok, Label::kNear);
    int3();
    bind(&ok);
426
  }
427

428 429
  RecordWrite(object, dst, value, save_fp, remembered_set_action,
              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
430 431

  bind(&done);
432

433
  // Clobber clobbered input registers when running with the debug-code flag
434
  // turned on to provoke errors.
435
  if (emit_debug_code()) {
436 437
    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
    mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
438
  }
439 440 441
}


442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
void MacroAssembler::RecordWriteForMap(
    Register object,
    Handle<Map> map,
    Register scratch1,
    Register scratch2,
    SaveFPRegsMode save_fp) {
  Label done;

  Register address = scratch1;
  Register value = scratch2;
  if (emit_debug_code()) {
    Label ok;
    lea(address, FieldOperand(object, HeapObject::kMapOffset));
    test_b(address, (1 << kPointerSizeLog2) - 1);
    j(zero, &ok, Label::kNear);
    int3();
    bind(&ok);
  }

461 462 463
  DCHECK(!object.is(value));
  DCHECK(!object.is(address));
  DCHECK(!value.is(address));
464
  AssertNotSmi(object);
465 466 467 468 469

  if (!FLAG_incremental_marking) {
    return;
  }

470 471 472
  // Compute the address.
  lea(address, FieldOperand(object, HeapObject::kMapOffset));

473 474 475 476
  // A single check of the map's pages interesting flag suffices, since it is
  // only set during incremental collection, and then it's also guaranteed that
  // the from object's page's interesting flag is also set.  This optimization
  // relies on the fact that maps can never be in new space.
477
  DCHECK(!isolate()->heap()->InNewSpace(*map));
478 479 480 481 482 483
  CheckPageFlagForMap(map,
                      MemoryChunk::kPointersToHereAreInterestingMask,
                      zero,
                      &done,
                      Label::kNear);

484 485
  RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
                       save_fp);
486 487 488 489
  CallStub(&stub);

  bind(&done);

490 491 492 493
  // Count number of write barriers in generated code.
  isolate()->counters()->write_barriers_static()->Increment();
  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);

494 495 496
  // Clobber clobbered input registers when running with the debug-code flag
  // turned on to provoke errors.
  if (emit_debug_code()) {
497 498 499
    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
    mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
    mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
500 501 502 503
  }
}


504 505 506 507 508 509 510 511
void MacroAssembler::RecordWrite(
    Register object,
    Register address,
    Register value,
    SaveFPRegsMode fp_mode,
    RememberedSetAction remembered_set_action,
    SmiCheck smi_check,
    PointersToHereCheck pointers_to_here_check_for_value) {
512 513 514
  DCHECK(!object.is(value));
  DCHECK(!object.is(address));
  DCHECK(!value.is(address));
515
  AssertNotSmi(object);
516 517 518 519 520 521

  if (remembered_set_action == OMIT_REMEMBERED_SET &&
      !FLAG_incremental_marking) {
    return;
  }

522
  if (emit_debug_code()) {
523 524 525 526 527 528 529
    Label ok;
    cmp(value, Operand(address, 0));
    j(equal, &ok, Label::kNear);
    int3();
    bind(&ok);
  }

530 531 532 533
  // First, check if a write barrier is even needed. The tests below
  // catch stores of Smis and stores into young gen.
  Label done;

534 535 536 537 538
  if (smi_check == INLINE_SMI_CHECK) {
    // Skip barrier if writing a smi.
    JumpIfSmi(value, &done, Label::kNear);
  }

539 540 541 542 543 544 545 546
  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
    CheckPageFlag(value,
                  value,  // Used as scratch.
                  MemoryChunk::kPointersToHereAreInterestingMask,
                  zero,
                  &done,
                  Label::kNear);
  }
547 548 549 550 551 552 553
  CheckPageFlag(object,
                value,  // Used as scratch.
                MemoryChunk::kPointersFromHereAreInterestingMask,
                zero,
                &done,
                Label::kNear);

554 555
  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
                       fp_mode);
556
  CallStub(&stub);
557 558 559

  bind(&done);

560 561 562 563
  // Count number of write barriers in generated code.
  isolate()->counters()->write_barriers_static()->Increment();
  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);

564
  // Clobber clobbered registers when running with the debug-code flag
565
  // turned on to provoke errors.
566
  if (emit_debug_code()) {
567 568
    mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
569 570 571 572
  }
}


serya@chromium.org's avatar
serya@chromium.org committed
573
void MacroAssembler::DebugBreak() {
574
  Move(eax, Immediate(0));
575
  mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
576
  CEntryStub ces(isolate(), 1);
577
  call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
serya@chromium.org's avatar
serya@chromium.org committed
578
}
579

580

581 582 583 584 585 586
void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
  xorps(dst, dst);
  cvtsi2sd(dst, src);
}


587 588
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
  static const int kMaxImmediateBits = 17;
589
  if (!RelocInfo::IsNone(x.rmode_)) return false;
590 591 592 593
  return !is_intn(x.x_, kMaxImmediateBits);
}


594
void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
595
  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
596
    Move(dst, Immediate(x.x_ ^ jit_cookie()));
597 598
    xor_(dst, jit_cookie());
  } else {
599
    Move(dst, x);
600 601 602 603 604 605 606 607 608 609 610 611 612 613
  }
}


void MacroAssembler::SafePush(const Immediate& x) {
  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
    push(Immediate(x.x_ ^ jit_cookie()));
    xor_(Operand(esp, 0), Immediate(jit_cookie()));
  } else {
    push(x);
  }
}


614 615 616 617 618 619 620 621 622 623 624 625 626 627
void MacroAssembler::CmpObjectType(Register heap_object,
                                   InstanceType type,
                                   Register map) {
  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
  CmpInstanceType(map, type);
}


void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
       static_cast<int8_t>(type));
}


628 629 630
void MacroAssembler::CheckFastElements(Register map,
                                       Label* fail,
                                       Label::Distance distance) {
631 632 633 634
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
  STATIC_ASSERT(FAST_ELEMENTS == 2);
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
635
  cmpb(FieldOperand(map, Map::kBitField2Offset),
636
       Map::kMaximumBitField2FastHoleyElementValue);
637 638 639 640
  j(above, fail, distance);
}


641 642 643
void MacroAssembler::CheckFastObjectElements(Register map,
                                             Label* fail,
                                             Label::Distance distance) {
644 645 646 647
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
  STATIC_ASSERT(FAST_ELEMENTS == 2);
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
648
  cmpb(FieldOperand(map, Map::kBitField2Offset),
649
       Map::kMaximumBitField2FastHoleySmiElementValue);
650 651
  j(below_equal, fail, distance);
  cmpb(FieldOperand(map, Map::kBitField2Offset),
652
       Map::kMaximumBitField2FastHoleyElementValue);
653 654 655 656
  j(above, fail, distance);
}


657 658 659 660 661
void MacroAssembler::CheckFastSmiElements(Register map,
                                          Label* fail,
                                          Label::Distance distance) {
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
662
  cmpb(FieldOperand(map, Map::kBitField2Offset),
663
       Map::kMaximumBitField2FastHoleySmiElementValue);
664 665 666 667
  j(above, fail, distance);
}


668 669 670 671 672 673 674
void MacroAssembler::StoreNumberToDoubleElements(
    Register maybe_number,
    Register elements,
    Register key,
    Register scratch1,
    XMMRegister scratch2,
    Label* fail,
675
    int elements_offset) {
676
  Label smi_value, done;
677 678 679 680 681 682 683
  JumpIfSmi(maybe_number, &smi_value, Label::kNear);

  CheckMap(maybe_number,
           isolate()->factory()->heap_number_map(),
           fail,
           DONT_DO_SMI_CHECK);

684 685 686 687
  // Double value, turn potential sNaN into qNaN.
  Move(scratch2, 1.0);
  mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
  jmp(&done, Label::kNear);
688 689 690 691 692 693

  bind(&smi_value);
  // Value is a smi. Convert to a double and store.
  // Preserve original value.
  mov(scratch1, maybe_number);
  SmiUntag(scratch1);
694
  Cvtsi2sd(scratch2, scratch1);
695
  bind(&done);
696 697 698
  movsd(FieldOperand(elements, key, times_4,
                     FixedDoubleArray::kHeaderSize - elements_offset),
        scratch2);
699 700 701
}


702
void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
703 704 705 706
  cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}


707 708 709
void MacroAssembler::CheckMap(Register obj,
                              Handle<Map> map,
                              Label* fail,
710
                              SmiCheckType smi_check_type) {
711
  if (smi_check_type == DO_SMI_CHECK) {
712
    JumpIfSmi(obj, fail);
713
  }
714

715
  CompareMap(obj, map);
716 717 718 719
  j(not_equal, fail);
}


720 721 722 723
void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
                                     Register scratch2, Handle<WeakCell> cell,
                                     Handle<Code> success,
                                     SmiCheckType smi_check_type) {
danno@chromium.org's avatar
danno@chromium.org committed
724
  Label fail;
725
  if (smi_check_type == DO_SMI_CHECK) {
danno@chromium.org's avatar
danno@chromium.org committed
726 727
    JumpIfSmi(obj, &fail);
  }
728 729
  mov(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
  CmpWeakValue(scratch1, cell, scratch2);
danno@chromium.org's avatar
danno@chromium.org committed
730 731 732 733 734 735
  j(equal, success);

  bind(&fail);
}


736 737 738 739 740
Condition MacroAssembler::IsObjectStringType(Register heap_object,
                                             Register map,
                                             Register instance_type) {
  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
741
  STATIC_ASSERT(kNotStringTag != 0);
742 743 744 745 746
  test(instance_type, Immediate(kIsNotStringMask));
  return zero;
}


747 748 749 750 751
Condition MacroAssembler::IsObjectNameType(Register heap_object,
                                           Register map,
                                           Register instance_type) {
  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
752
  cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
753 754 755 756
  return below_equal;
}


757 758 759 760 761 762 763 764 765 766 767 768 769
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
                                          Register map,
                                          Register scratch,
                                          Label* fail) {
  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
  IsInstanceJSObjectType(map, scratch, fail);
}


void MacroAssembler::IsInstanceJSObjectType(Register map,
                                            Register scratch,
                                            Label* fail) {
  movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
770
  sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
771 772
  cmp(scratch,
      LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
773 774 775 776
  j(above, fail);
}


777
void MacroAssembler::FCmp() {
778 779
  fucomip();
  fstp(0);
780 781 782
}


783 784 785 786 787 788
void MacroAssembler::AssertNumber(Register object) {
  if (emit_debug_code()) {
    Label ok;
    JumpIfSmi(object, &ok);
    cmp(FieldOperand(object, HeapObject::kMapOffset),
        isolate()->factory()->heap_number_map());
789
    Check(equal, kOperandNotANumber);
790 791
    bind(&ok);
  }
792 793 794
}


795 796 797
void MacroAssembler::AssertSmi(Register object) {
  if (emit_debug_code()) {
    test(object, Immediate(kSmiTagMask));
798
    Check(equal, kOperandIsNotASmi);
799
  }
800 801 802
}


803 804 805
void MacroAssembler::AssertString(Register object) {
  if (emit_debug_code()) {
    test(object, Immediate(kSmiTagMask));
806
    Check(not_equal, kOperandIsASmiAndNotAString);
807 808 809 810
    push(object);
    mov(object, FieldOperand(object, HeapObject::kMapOffset));
    CmpInstanceType(object, FIRST_NONSTRING_TYPE);
    pop(object);
811
    Check(below, kOperandIsNotAString);
812
  }
813 814 815
}


816 817 818
void MacroAssembler::AssertName(Register object) {
  if (emit_debug_code()) {
    test(object, Immediate(kSmiTagMask));
819
    Check(not_equal, kOperandIsASmiAndNotAName);
820 821 822 823
    push(object);
    mov(object, FieldOperand(object, HeapObject::kMapOffset));
    CmpInstanceType(object, LAST_NAME_TYPE);
    pop(object);
824
    Check(below_equal, kOperandIsNotAName);
825 826 827 828
  }
}


829 830 831 832 833 834 835 836 837 838 839 840 841 842
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
  if (emit_debug_code()) {
    Label done_checking;
    AssertNotSmi(object);
    cmp(object, isolate()->factory()->undefined_value());
    j(equal, &done_checking);
    cmp(FieldOperand(object, 0),
        Immediate(isolate()->factory()->allocation_site_map()));
    Assert(equal, kExpectedUndefinedOrCell);
    bind(&done_checking);
  }
}


843 844 845
void MacroAssembler::AssertNotSmi(Register object) {
  if (emit_debug_code()) {
    test(object, Immediate(kSmiTagMask));
846
    Check(not_equal, kOperandIsASmi);
847
  }
848 849 850
}


851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
void MacroAssembler::StubPrologue() {
  push(ebp);  // Caller's frame pointer.
  mov(ebp, esp);
  push(esi);  // Callee's context.
  push(Immediate(Smi::FromInt(StackFrame::STUB)));
}


void MacroAssembler::Prologue(bool code_pre_aging) {
  PredictableCodeSizeScope predictible_code_size_scope(this,
      kNoCodeAgeSequenceLength);
  if (code_pre_aging) {
      // Pre-age the code.
    call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
        RelocInfo::CODE_AGE_SEQUENCE);
    Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
  } else {
868 869 870
    push(ebp);  // Caller's frame pointer.
    mov(ebp, esp);
    push(esi);  // Callee's context.
871
    push(edi);  // Callee's JS function.
872 873 874 875
  }
}


876 877 878 879 880 881 882
void MacroAssembler::EnterFrame(StackFrame::Type type,
                                bool load_constant_pool_pointer_reg) {
  // Out-of-line constant pool not implemented on ia32.
  UNREACHABLE();
}


883
void MacroAssembler::EnterFrame(StackFrame::Type type) {
884
  push(ebp);
885
  mov(ebp, esp);
886 887
  push(esi);
  push(Immediate(Smi::FromInt(type)));
888
  push(Immediate(CodeObject()));
889
  if (emit_debug_code()) {
890
    cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
891
    Check(not_equal, kCodeObjectNotProperlyPatched);
892
  }
893 894 895
}


896
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
897
  if (emit_debug_code()) {
898 899
    cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
        Immediate(Smi::FromInt(type)));
900
    Check(equal, kStackFrameTypesMustMatch);
901 902 903 904
  }
  leave();
}

905 906

void MacroAssembler::EnterExitFramePrologue() {
907
  // Set up the frame structure on the stack.
908 909 910
  DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
  DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
  DCHECK(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
911
  push(ebp);
912
  mov(ebp, esp);
913

914
  // Reserve room for entry stack pointer and push the code object.
915
  DCHECK(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
serya@chromium.org's avatar
serya@chromium.org committed
916 917
  push(Immediate(0));  // Saved entry sp, patched before call.
  push(Immediate(CodeObject()));  // Accessed from ExitFrame::code_slot.
918 919

  // Save the frame pointer and the context in top.
920 921
  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
  ExternalReference context_address(Isolate::kContextAddress, isolate());
922
  ExternalReference c_function_address(Isolate::kCFunctionAddress, isolate());
923 924
  mov(Operand::StaticVariable(c_entry_fp_address), ebp);
  mov(Operand::StaticVariable(context_address), esi);
925
  mov(Operand::StaticVariable(c_function_address), ebx);
926
}
927

928

929 930 931
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
  // Optionally save all XMM registers.
  if (save_doubles) {
932 933
    int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
                argc * kPointerSize;
934
    sub(esp, Immediate(space));
935
    const int offset = -2 * kPointerSize;
936
    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
937
      XMMRegister reg = XMMRegister::from_code(i);
938
      movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
939 940
    }
  } else {
941
    sub(esp, Immediate(argc * kPointerSize));
942
  }
943 944

  // Get the required frame alignment for the OS.
945
  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
946
  if (kFrameAlignment > 0) {
947
    DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
948 949 950 951 952
    and_(esp, -kFrameAlignment);
  }

  // Patch the saved entry sp.
  mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
953 954 955
}


956
void MacroAssembler::EnterExitFrame(bool save_doubles) {
957
  EnterExitFramePrologue();
958

959
  // Set up argc and argv in callee-saved registers.
960
  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
961
  mov(edi, eax);
962 963
  lea(esi, Operand(ebp, eax, times_4, offset));

964 965
  // Reserve space for argc, argv and isolate.
  EnterExitFrameEpilogue(3, save_doubles);
966 967 968
}


969
void MacroAssembler::EnterApiExitFrame(int argc) {
970
  EnterExitFramePrologue();
971
  EnterExitFrameEpilogue(argc, false);
972 973 974
}


975 976 977
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
  // Optionally restore all XMM registers.
  if (save_doubles) {
978
    const int offset = -2 * kPointerSize;
979
    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
980
      XMMRegister reg = XMMRegister::from_code(i);
981
      movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
982 983 984
    }
  }

985 986 987 988 989 990 991
  // Get the return address from the stack and restore the frame pointer.
  mov(ecx, Operand(ebp, 1 * kPointerSize));
  mov(ebp, Operand(ebp, 0 * kPointerSize));

  // Pop the arguments and the receiver from the caller stack.
  lea(esp, Operand(esi, 1 * kPointerSize));

992 993
  // Push the return address to get ready to return.
  push(ecx);
serya@chromium.org's avatar
serya@chromium.org committed
994

995
  LeaveExitFrameEpilogue(true);
996 997
}

998

999
void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
1000
  // Restore current context from top and clear it in debug mode.
1001
  ExternalReference context_address(Isolate::kContextAddress, isolate());
1002 1003 1004
  if (restore_context) {
    mov(esi, Operand::StaticVariable(context_address));
  }
1005 1006 1007
#ifdef DEBUG
  mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
1008 1009

  // Clear the top frame.
1010
  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
1011
                                       isolate());
1012 1013 1014 1015
  mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
}


1016
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
1017
  mov(esp, ebp);
1018 1019
  pop(ebp);

1020
  LeaveExitFrameEpilogue(restore_context);
1021 1022 1023
}


1024
void MacroAssembler::PushStackHandler() {
1025
  // Adjust this code if not the case.
1026
  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1027
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1028 1029 1030 1031

  // Link the current handler as the next handler.
  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
  push(Operand::StaticVariable(handler_address));
1032

1033 1034
  // Set this new handler as the current one.
  mov(Operand::StaticVariable(handler_address), esp);
1035 1036 1037
}


1038
void MacroAssembler::PopStackHandler() {
1039
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1040 1041
  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
  pop(Operand::StaticVariable(handler_address));
1042
  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1043 1044 1045
}


1046
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1047 1048
                                            Register scratch1,
                                            Register scratch2,
1049
                                            Label* miss) {
1050 1051
  Label same_contexts;

1052 1053 1054
  DCHECK(!holder_reg.is(scratch1));
  DCHECK(!holder_reg.is(scratch2));
  DCHECK(!scratch1.is(scratch2));
1055

1056
  // Load current lexical context from the stack frame.
1057
  mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
1058 1059

  // When generating debug code, make sure the lexical context is set.
1060
  if (emit_debug_code()) {
1061
    cmp(scratch1, Immediate(0));
1062
    Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
1063
  }
1064
  // Load the native context of the current context.
1065 1066
  int offset =
      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1067 1068
  mov(scratch1, FieldOperand(scratch1, offset));
  mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
feng@chromium.org's avatar
feng@chromium.org committed
1069

1070
  // Check the context is a native context.
1071
  if (emit_debug_code()) {
1072
    // Read the first word and compare to native_context_map.
1073 1074
    cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
        isolate()->factory()->native_context_map());
1075
    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1076 1077 1078
  }

  // Check if both contexts are the same.
1079
  cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1080
  j(equal, &same_contexts);
1081 1082 1083 1084

  // Compare security tokens, save holder_reg on the stack so we can use it
  // as a temporary register.
  //
1085 1086 1087
  // Check that the security token in the calling global object is
  // compatible with the security token in the receiving global
  // object.
1088
  mov(scratch2,
1089
      FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1090

1091
  // Check the context is a native context.
1092
  if (emit_debug_code()) {
1093
    cmp(scratch2, isolate()->factory()->null_value());
1094
    Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
1095

1096
    // Read the first word and compare to native_context_map(),
1097 1098
    cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
        isolate()->factory()->native_context_map());
1099
    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1100 1101 1102 1103
  }

  int token_offset = Context::kHeaderSize +
                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
1104 1105
  mov(scratch1, FieldOperand(scratch1, token_offset));
  cmp(scratch1, FieldOperand(scratch2, token_offset));
1106
  j(not_equal, miss);
1107 1108

  bind(&same_contexts);
1109 1110 1111
}


1112
// Compute the hash code from the untagged key.  This must be kept in sync with
1113
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1114
// code-stub-hydrogen.cc
1115 1116 1117 1118
//
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
  // Xor original key with a seed.
1119
  if (serializer_enabled()) {
1120 1121
    ExternalReference roots_array_start =
        ExternalReference::roots_array_start(isolate());
1122
    mov(scratch, Immediate(Heap::kHashSeedRootIndex));
1123 1124 1125 1126
    mov(scratch,
        Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
    SmiUntag(scratch);
    xor_(r0, scratch);
1127
  } else {
1128
    int32_t seed = isolate()->heap()->HashSeed();
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
    xor_(r0, Immediate(seed));
  }

  // hash = ~hash + (hash << 15);
  mov(scratch, r0);
  not_(r0);
  shl(scratch, 15);
  add(r0, scratch);
  // hash = hash ^ (hash >> 12);
  mov(scratch, r0);
  shr(scratch, 12);
  xor_(r0, scratch);
  // hash = hash + (hash << 2);
  lea(r0, Operand(r0, r0, times_4, 0));
  // hash = hash ^ (hash >> 4);
  mov(scratch, r0);
  shr(scratch, 4);
  xor_(r0, scratch);
  // hash = hash * 2057;
  imul(r0, r0, 2057);
  // hash = hash ^ (hash >> 16);
  mov(scratch, r0);
  shr(scratch, 16);
  xor_(r0, scratch);
1153
  and_(r0, 0x3fffffff);
1154 1155 1156 1157
}



1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
                                              Register elements,
                                              Register key,
                                              Register r0,
                                              Register r1,
                                              Register r2,
                                              Register result) {
  // Register use:
  //
  // elements - holds the slow-case elements of the receiver and is unchanged.
  //
  // key      - holds the smi key on entry and is unchanged.
  //
  // Scratch registers:
  //
  // r0 - holds the untagged key on entry and holds the hash once computed.
  //
  // r1 - used to hold the capacity mask of the dictionary
  //
  // r2 - used for the index into the dictionary.
  //
  // result - holds the result on exit if the load succeeds and we fall through.

  Label done;

1183
  GetNumberHash(r0, r1);
1184 1185

  // Compute capacity mask.
1186
  mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
1187 1188 1189 1190
  shr(r1, kSmiTagSize);  // convert smi to int
  dec(r1);

  // Generate an unrolled loop that performs a few probes before giving up.
1191
  for (int i = 0; i < kNumberDictionaryProbes; i++) {
1192 1193 1194 1195
    // Use r2 for index calculations and keep the hash intact in r0.
    mov(r2, r0);
    // Compute the masked index: (hash + i + i * i) & mask.
    if (i > 0) {
1196
      add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
1197
    }
1198
    and_(r2, r1);
1199 1200

    // Scale the index by multiplying by the entry size.
1201
    DCHECK(SeededNumberDictionary::kEntrySize == 3);
1202 1203 1204 1205 1206 1207
    lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3

    // Check if the key matches.
    cmp(key, FieldOperand(elements,
                          r2,
                          times_pointer_size,
1208
                          SeededNumberDictionary::kElementsStartOffset));
1209
    if (i != (kNumberDictionaryProbes - 1)) {
1210 1211 1212 1213 1214 1215 1216
      j(equal, &done);
    } else {
      j(not_equal, miss);
    }
  }

  bind(&done);
1217
  // Check that the value is a field property.
1218
  const int kDetailsOffset =
1219
      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1220
  DCHECK_EQ(DATA, 0);
1221
  test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
1222
       Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
1223 1224 1225 1226
  j(not_zero, miss);

  // Get the value at the masked, scaled index.
  const int kValueOffset =
1227
      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1228 1229 1230 1231
  mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}


1232 1233 1234
void MacroAssembler::LoadAllocationTopHelper(Register result,
                                             Register scratch,
                                             AllocationFlags flags) {
1235 1236
  ExternalReference allocation_top =
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1237 1238

  // Just return if allocation top is already known.
1239
  if ((flags & RESULT_CONTAINS_TOP) != 0) {
1240
    // No use of scratch if allocation top is provided.
1241
    DCHECK(scratch.is(no_reg));
1242 1243
#ifdef DEBUG
    // Assert that result actually contains top on entry.
1244
    cmp(result, Operand::StaticVariable(allocation_top));
1245
    Check(equal, kUnexpectedAllocationTop);
1246
#endif
1247 1248 1249 1250 1251
    return;
  }

  // Move address of new object to result. Use scratch register if available.
  if (scratch.is(no_reg)) {
1252
    mov(result, Operand::StaticVariable(allocation_top));
1253
  } else {
1254
    mov(scratch, Immediate(allocation_top));
1255 1256 1257 1258 1259 1260
    mov(result, Operand(scratch, 0));
  }
}


void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1261 1262
                                               Register scratch,
                                               AllocationFlags flags) {
1263
  if (emit_debug_code()) {
1264
    test(result_end, Immediate(kObjectAlignmentMask));
1265
    Check(zero, kUnalignedAllocationInNewSpace);
1266 1267
  }

1268 1269
  ExternalReference allocation_top =
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1270 1271 1272

  // Update new top. Use scratch if available.
  if (scratch.is(no_reg)) {
1273
    mov(Operand::StaticVariable(allocation_top), result_end);
1274 1275 1276 1277 1278
  } else {
    mov(Operand(scratch, 0), result_end);
  }
}

1279

1280 1281 1282 1283 1284 1285
void MacroAssembler::Allocate(int object_size,
                              Register result,
                              Register result_end,
                              Register scratch,
                              Label* gc_required,
                              AllocationFlags flags) {
1286 1287
  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1288
  if (!FLAG_inline_new) {
1289
    if (emit_debug_code()) {
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
      // Trash the registers to simulate an allocation failure.
      mov(result, Immediate(0x7091));
      if (result_end.is_valid()) {
        mov(result_end, Immediate(0x7191));
      }
      if (scratch.is_valid()) {
        mov(scratch, Immediate(0x7291));
      }
    }
    jmp(gc_required);
    return;
  }
1302
  DCHECK(!result.is(result_end));
1303 1304

  // Load address of new object into result.
1305
  LoadAllocationTopHelper(result, scratch, flags);
1306

1307 1308 1309
  ExternalReference allocation_limit =
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);

1310
  // Align the next allocation. Storing the filler map without checking top is
1311
  // safe in new-space because the limit of the heap is aligned there.
1312
  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1313
    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1314 1315 1316
    Label aligned;
    test(result, Immediate(kDoubleAlignmentMask));
    j(zero, &aligned, Label::kNear);
1317
    if ((flags & PRETENURE) != 0) {
1318 1319 1320
      cmp(result, Operand::StaticVariable(allocation_limit));
      j(above_equal, gc_required);
    }
1321 1322 1323 1324 1325 1326
    mov(Operand(result, 0),
        Immediate(isolate()->factory()->one_pointer_filler_map()));
    add(result, Immediate(kDoubleSize / 2));
    bind(&aligned);
  }

1327
  // Calculate new top and bail out if space is exhausted.
1328
  Register top_reg = result_end.is_valid() ? result_end : result;
1329 1330
  if (!top_reg.is(result)) {
    mov(top_reg, result);
1331
  }
1332
  add(top_reg, Immediate(object_size));
1333
  j(carry, gc_required);
1334
  cmp(top_reg, Operand::StaticVariable(allocation_limit));
1335
  j(above, gc_required);
1336 1337

  // Update allocation top.
1338
  UpdateAllocationTopHelper(top_reg, scratch, flags);
1339 1340

  // Tag result if requested.
1341
  bool tag_result = (flags & TAG_OBJECT) != 0;
1342
  if (top_reg.is(result)) {
1343
    if (tag_result) {
1344
      sub(result, Immediate(object_size - kHeapObjectTag));
1345
    } else {
1346
      sub(result, Immediate(object_size));
1347
    }
1348
  } else if (tag_result) {
1349
    DCHECK(kHeapObjectTag == 1);
1350
    inc(result);
1351
  }
1352 1353 1354
}


1355 1356 1357 1358 1359 1360 1361 1362 1363
void MacroAssembler::Allocate(int header_size,
                              ScaleFactor element_size,
                              Register element_count,
                              RegisterValueType element_count_type,
                              Register result,
                              Register result_end,
                              Register scratch,
                              Label* gc_required,
                              AllocationFlags flags) {
1364
  DCHECK((flags & SIZE_IN_WORDS) == 0);
1365
  if (!FLAG_inline_new) {
1366
    if (emit_debug_code()) {
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
      // Trash the registers to simulate an allocation failure.
      mov(result, Immediate(0x7091));
      mov(result_end, Immediate(0x7191));
      if (scratch.is_valid()) {
        mov(scratch, Immediate(0x7291));
      }
      // Register element_count is not modified by the function.
    }
    jmp(gc_required);
    return;
  }
1378
  DCHECK(!result.is(result_end));
1379 1380

  // Load address of new object into result.
1381
  LoadAllocationTopHelper(result, scratch, flags);
1382

1383 1384 1385
  ExternalReference allocation_limit =
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);

1386
  // Align the next allocation. Storing the filler map without checking top is
1387
  // safe in new-space because the limit of the heap is aligned there.
1388
  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1389
    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1390 1391 1392
    Label aligned;
    test(result, Immediate(kDoubleAlignmentMask));
    j(zero, &aligned, Label::kNear);
1393
    if ((flags & PRETENURE) != 0) {
1394 1395 1396
      cmp(result, Operand::StaticVariable(allocation_limit));
      j(above_equal, gc_required);
    }
1397 1398 1399 1400 1401 1402
    mov(Operand(result, 0),
        Immediate(isolate()->factory()->one_pointer_filler_map()));
    add(result, Immediate(kDoubleSize / 2));
    bind(&aligned);
  }

1403
  // Calculate new top and bail out if space is exhausted.
1404 1405
  // We assume that element_count*element_size + header_size does not
  // overflow.
1406 1407 1408 1409
  if (element_count_type == REGISTER_VALUE_IS_SMI) {
    STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
    STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
    STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
1410 1411
    DCHECK(element_size >= times_2);
    DCHECK(kSmiTagSize == 1);
1412 1413
    element_size = static_cast<ScaleFactor>(element_size - 1);
  } else {
1414
    DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
1415
  }
1416
  lea(result_end, Operand(element_count, element_size, header_size));
1417
  add(result_end, result);
1418
  j(carry, gc_required);
1419
  cmp(result_end, Operand::StaticVariable(allocation_limit));
1420 1421
  j(above, gc_required);

1422
  if ((flags & TAG_OBJECT) != 0) {
1423
    DCHECK(kHeapObjectTag == 1);
1424
    inc(result);
1425
  }
1426 1427

  // Update allocation top.
1428
  UpdateAllocationTopHelper(result_end, scratch, flags);
1429 1430 1431
}


1432 1433 1434 1435 1436 1437
void MacroAssembler::Allocate(Register object_size,
                              Register result,
                              Register result_end,
                              Register scratch,
                              Label* gc_required,
                              AllocationFlags flags) {
1438
  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1439
  if (!FLAG_inline_new) {
1440
    if (emit_debug_code()) {
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
      // Trash the registers to simulate an allocation failure.
      mov(result, Immediate(0x7091));
      mov(result_end, Immediate(0x7191));
      if (scratch.is_valid()) {
        mov(scratch, Immediate(0x7291));
      }
      // object_size is left unchanged by this function.
    }
    jmp(gc_required);
    return;
  }
1452
  DCHECK(!result.is(result_end));
1453 1454

  // Load address of new object into result.
1455
  LoadAllocationTopHelper(result, scratch, flags);
1456

1457 1458 1459
  ExternalReference allocation_limit =
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);

1460
  // Align the next allocation. Storing the filler map without checking top is
1461
  // safe in new-space because the limit of the heap is aligned there.
1462
  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1463
    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1464 1465 1466
    Label aligned;
    test(result, Immediate(kDoubleAlignmentMask));
    j(zero, &aligned, Label::kNear);
1467
    if ((flags & PRETENURE) != 0) {
1468 1469 1470
      cmp(result, Operand::StaticVariable(allocation_limit));
      j(above_equal, gc_required);
    }
1471 1472 1473 1474 1475 1476
    mov(Operand(result, 0),
        Immediate(isolate()->factory()->one_pointer_filler_map()));
    add(result, Immediate(kDoubleSize / 2));
    bind(&aligned);
  }

1477
  // Calculate new top and bail out if space is exhausted.
1478 1479 1480
  if (!object_size.is(result_end)) {
    mov(result_end, object_size);
  }
1481
  add(result_end, result);
1482
  j(carry, gc_required);
1483
  cmp(result_end, Operand::StaticVariable(allocation_limit));
1484
  j(above, gc_required);
1485

1486 1487
  // Tag result if requested.
  if ((flags & TAG_OBJECT) != 0) {
1488
    DCHECK(kHeapObjectTag == 1);
1489
    inc(result);
1490
  }
1491 1492

  // Update allocation top.
1493
  UpdateAllocationTopHelper(result_end, scratch, flags);
1494 1495 1496 1497 1498
}


void MacroAssembler::UndoAllocationInNewSpace(Register object) {
  ExternalReference new_space_allocation_top =
1499
      ExternalReference::new_space_allocation_top_address(isolate());
1500 1501

  // Make sure the object has no tag before resetting top.
1502
  and_(object, Immediate(~kHeapObjectTagMask));
1503 1504
#ifdef DEBUG
  cmp(object, Operand::StaticVariable(new_space_allocation_top));
1505
  Check(below, kUndoAllocationOfNonAllocatedMemory);
1506 1507 1508 1509 1510
#endif
  mov(Operand::StaticVariable(new_space_allocation_top), object);
}


1511 1512 1513
void MacroAssembler::AllocateHeapNumber(Register result,
                                        Register scratch1,
                                        Register scratch2,
1514 1515
                                        Label* gc_required,
                                        MutableMode mode) {
1516
  // Allocate heap number in new space.
1517 1518
  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
           TAG_OBJECT);
1519

1520 1521 1522 1523
  Handle<Map> map = mode == MUTABLE
      ? isolate()->factory()->mutable_heap_number_map()
      : isolate()->factory()->heap_number_map();

1524
  // Set the map.
1525
  mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
1526 1527 1528
}


1529 1530 1531 1532 1533 1534
void MacroAssembler::AllocateTwoByteString(Register result,
                                           Register length,
                                           Register scratch1,
                                           Register scratch2,
                                           Register scratch3,
                                           Label* gc_required) {
1535 1536
  // Calculate the number of bytes needed for the characters in the string while
  // observing object alignment.
1537 1538
  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
  DCHECK(kShortSize == 2);
1539 1540
  // scratch1 = length * 2 + kObjectAlignmentMask.
  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
1541
  and_(scratch1, Immediate(~kObjectAlignmentMask));
1542 1543

  // Allocate two byte string in new space.
1544 1545 1546 1547 1548 1549 1550 1551 1552
  Allocate(SeqTwoByteString::kHeaderSize,
           times_1,
           scratch1,
           REGISTER_VALUE_IS_INT32,
           result,
           scratch2,
           scratch3,
           gc_required,
           TAG_OBJECT);
1553 1554 1555

  // Set the map, length and hash field.
  mov(FieldOperand(result, HeapObject::kMapOffset),
1556
      Immediate(isolate()->factory()->string_map()));
1557 1558 1559
  mov(scratch1, length);
  SmiTag(scratch1);
  mov(FieldOperand(result, String::kLengthOffset), scratch1);
1560 1561 1562 1563 1564
  mov(FieldOperand(result, String::kHashFieldOffset),
      Immediate(String::kEmptyHashField));
}


1565 1566 1567 1568
void MacroAssembler::AllocateOneByteString(Register result, Register length,
                                           Register scratch1, Register scratch2,
                                           Register scratch3,
                                           Label* gc_required) {
1569 1570
  // Calculate the number of bytes needed for the characters in the string while
  // observing object alignment.
1571
  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1572
  mov(scratch1, length);
1573
  DCHECK(kCharSize == 1);
1574 1575
  add(scratch1, Immediate(kObjectAlignmentMask));
  and_(scratch1, Immediate(~kObjectAlignmentMask));
1576

1577
  // Allocate one-byte string in new space.
1578 1579 1580 1581 1582 1583 1584 1585 1586
  Allocate(SeqOneByteString::kHeaderSize,
           times_1,
           scratch1,
           REGISTER_VALUE_IS_INT32,
           result,
           scratch2,
           scratch3,
           gc_required,
           TAG_OBJECT);
1587 1588 1589

  // Set the map, length and hash field.
  mov(FieldOperand(result, HeapObject::kMapOffset),
1590
      Immediate(isolate()->factory()->one_byte_string_map()));
1591 1592 1593
  mov(scratch1, length);
  SmiTag(scratch1);
  mov(FieldOperand(result, String::kLengthOffset), scratch1);
1594 1595 1596 1597 1598
  mov(FieldOperand(result, String::kHashFieldOffset),
      Immediate(String::kEmptyHashField));
}


1599 1600 1601
void MacroAssembler::AllocateOneByteString(Register result, int length,
                                           Register scratch1, Register scratch2,
                                           Label* gc_required) {
1602
  DCHECK(length > 0);
1603

1604
  // Allocate one-byte string in new space.
1605 1606
  Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
           gc_required, TAG_OBJECT);
1607 1608 1609

  // Set the map, length and hash field.
  mov(FieldOperand(result, HeapObject::kMapOffset),
1610
      Immediate(isolate()->factory()->one_byte_string_map()));
1611 1612 1613 1614 1615 1616 1617
  mov(FieldOperand(result, String::kLengthOffset),
      Immediate(Smi::FromInt(length)));
  mov(FieldOperand(result, String::kHashFieldOffset),
      Immediate(String::kEmptyHashField));
}


1618
void MacroAssembler::AllocateTwoByteConsString(Register result,
1619 1620 1621 1622
                                        Register scratch1,
                                        Register scratch2,
                                        Label* gc_required) {
  // Allocate heap number in new space.
1623 1624
  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
           TAG_OBJECT);
1625 1626 1627

  // Set the map. The other fields are left uninitialized.
  mov(FieldOperand(result, HeapObject::kMapOffset),
1628
      Immediate(isolate()->factory()->cons_string_map()));
1629 1630 1631
}


1632 1633 1634 1635
void MacroAssembler::AllocateOneByteConsString(Register result,
                                               Register scratch1,
                                               Register scratch2,
                                               Label* gc_required) {
1636 1637 1638 1639 1640
  Allocate(ConsString::kSize,
           result,
           scratch1,
           scratch2,
           gc_required,
1641
           TAG_OBJECT);
1642 1643 1644

  // Set the map. The other fields are left uninitialized.
  mov(FieldOperand(result, HeapObject::kMapOffset),
1645
      Immediate(isolate()->factory()->cons_one_byte_string_map()));
1646 1647
}

1648

1649
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1650 1651 1652 1653
                                          Register scratch1,
                                          Register scratch2,
                                          Label* gc_required) {
  // Allocate heap number in new space.
1654 1655
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
           TAG_OBJECT);
1656 1657 1658 1659 1660 1661 1662

  // Set the map. The other fields are left uninitialized.
  mov(FieldOperand(result, HeapObject::kMapOffset),
      Immediate(isolate()->factory()->sliced_string_map()));
}


1663 1664 1665 1666
void MacroAssembler::AllocateOneByteSlicedString(Register result,
                                                 Register scratch1,
                                                 Register scratch2,
                                                 Label* gc_required) {
1667
  // Allocate heap number in new space.
1668 1669
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
           TAG_OBJECT);
1670 1671 1672

  // Set the map. The other fields are left uninitialized.
  mov(FieldOperand(result, HeapObject::kMapOffset),
1673
      Immediate(isolate()->factory()->sliced_one_byte_string_map()));
1674 1675 1676
}


1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
// Copy memory, byte-by-byte, from source to destination.  Not optimized for
// long or aligned copies.  The contents of scratch and length are destroyed.
// Source and destination are incremented by length.
// Many variants of movsb, loop unrolling, word moves, and indexed operands
// have been tried here already, and this is fastest.
// A simpler loop is faster on small copies, but 30% slower on large ones.
// The cld() instruction must have been emitted, to set the direction flag(),
// before calling this function.
void MacroAssembler::CopyBytes(Register source,
                               Register destination,
                               Register length,
                               Register scratch) {
1689
  Label short_loop, len4, len8, len12, done, short_string;
1690 1691 1692
  DCHECK(source.is(esi));
  DCHECK(destination.is(edi));
  DCHECK(length.is(ecx));
1693 1694
  cmp(length, Immediate(4));
  j(below, &short_string, Label::kNear);
1695 1696 1697 1698 1699 1700

  // Because source is 4-byte aligned in our uses of this function,
  // we keep source aligned for the rep_movs call by copying the odd bytes
  // at the end of the ranges.
  mov(scratch, Operand(source, length, times_1, -4));
  mov(Operand(destination, length, times_1, -4), scratch);
1701 1702 1703 1704 1705 1706 1707 1708

  cmp(length, Immediate(8));
  j(below_equal, &len4, Label::kNear);
  cmp(length, Immediate(12));
  j(below_equal, &len8, Label::kNear);
  cmp(length, Immediate(16));
  j(below_equal, &len12, Label::kNear);

1709 1710 1711
  mov(scratch, ecx);
  shr(ecx, 2);
  rep_movs();
1712 1713
  and_(scratch, Immediate(0x3));
  add(destination, scratch);
1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
  jmp(&done, Label::kNear);

  bind(&len12);
  mov(scratch, Operand(source, 8));
  mov(Operand(destination, 8), scratch);
  bind(&len8);
  mov(scratch, Operand(source, 4));
  mov(Operand(destination, 4), scratch);
  bind(&len4);
  mov(scratch, Operand(source, 0));
  mov(Operand(destination, 0), scratch);
  add(destination, length);
  jmp(&done, Label::kNear);
1727 1728

  bind(&short_string);
1729
  test(length, length);
1730
  j(zero, &done, Label::kNear);
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740

  bind(&short_loop);
  mov_b(scratch, Operand(source, 0));
  mov_b(Operand(destination, 0), scratch);
  inc(source);
  inc(destination);
  dec(length);
  j(not_zero, &short_loop);

  bind(&done);
1741 1742
}

1743

1744 1745 1746 1747 1748 1749 1750
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
                                                Register end_offset,
                                                Register filler) {
  Label loop, entry;
  jmp(&entry);
  bind(&loop);
  mov(Operand(start_offset, 0), filler);
1751
  add(start_offset, Immediate(kPointerSize));
1752
  bind(&entry);
1753
  cmp(start_offset, end_offset);
1754 1755 1756 1757
  j(less, &loop);
}


1758 1759 1760 1761
void MacroAssembler::BooleanBitTest(Register object,
                                    int field_offset,
                                    int bit_index) {
  bit_index += kSmiTagSize + kSmiShiftSize;
1762
  DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
1763 1764 1765 1766 1767 1768 1769 1770
  int byte_index = bit_index / kBitsPerByte;
  int byte_bit_index = bit_index & (kBitsPerByte - 1);
  test_b(FieldOperand(object, field_offset + byte_index),
         static_cast<byte>(1 << byte_bit_index));
}



1771 1772 1773 1774
void MacroAssembler::NegativeZeroTest(Register result,
                                      Register op,
                                      Label* then_label) {
  Label ok;
1775
  test(result, result);
1776
  j(not_zero, &ok);
1777
  test(op, op);
1778
  j(sign, then_label);
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
  bind(&ok);
}


void MacroAssembler::NegativeZeroTest(Register result,
                                      Register op1,
                                      Register op2,
                                      Register scratch,
                                      Label* then_label) {
  Label ok;
1789
  test(result, result);
1790
  j(not_zero, &ok);
1791 1792
  mov(scratch, op1);
  or_(scratch, op2);
1793
  j(sign, then_label);
1794 1795 1796 1797
  bind(&ok);
}


1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
void MacroAssembler::GetMapConstructor(Register result, Register map,
                                       Register temp) {
  Label done, loop;
  mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
  bind(&loop);
  JumpIfSmi(result, &done);
  CmpObjectType(result, MAP_TYPE, temp);
  j(not_equal, &done);
  mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
  jmp(&loop);
  bind(&done);
}


1812 1813 1814
void MacroAssembler::TryGetFunctionPrototype(Register function,
                                             Register result,
                                             Register scratch,
1815 1816
                                             Label* miss,
                                             bool miss_on_bound_function) {
1817 1818 1819 1820
  Label non_instance;
  if (miss_on_bound_function) {
    // Check that the receiver isn't a smi.
    JumpIfSmi(function, miss);
1821

1822 1823 1824
    // Check that the function really is a function.
    CmpObjectType(function, JS_FUNCTION_TYPE, result);
    j(not_equal, miss);
1825

1826 1827 1828 1829 1830 1831 1832
    // If a bound function, go to miss label.
    mov(scratch,
        FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
    BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
                   SharedFunctionInfo::kBoundFunction);
    j(not_zero, miss);

1833 1834 1835 1836 1837
    // Make sure that the function has an instance prototype.
    movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
    test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
    j(not_zero, &non_instance);
  }
1838 1839 1840 1841 1842 1843 1844 1845

  // Get the prototype or initial map from the function.
  mov(result,
      FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));

  // If the prototype or initial map is the hole, don't return it and
  // simply miss the cache instead. This will allow us to allocate a
  // prototype object on-demand in the runtime system.
1846
  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
1847
  j(equal, miss);
1848 1849 1850

  // If the function does not have an initial map, we're done.
  Label done;
1851
  CmpObjectType(result, MAP_TYPE, scratch);
1852 1853 1854 1855 1856
  j(not_equal, &done);

  // Get the prototype from the initial map.
  mov(result, FieldOperand(result, Map::kPrototypeOffset));

1857 1858 1859 1860 1861 1862
  if (miss_on_bound_function) {
    jmp(&done);

    // Non-instance prototype: Fetch prototype from constructor field
    // in initial map.
    bind(&non_instance);
1863
    GetMapConstructor(result, result, scratch);
1864
  }
1865 1866 1867 1868 1869 1870

  // All done.
  bind(&done);
}


1871
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1872
  DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
1873
  call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1874 1875 1876 1877
}


void MacroAssembler::TailCallStub(CodeStub* stub) {
1878
  jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
1879 1880 1881 1882
}


void MacroAssembler::StubReturn(int argc) {
1883
  DCHECK(argc >= 1 && generating_stub());
1884 1885 1886 1887
  ret((argc - 1) * kPointerSize);
}


1888
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
1889
  return has_frame_ || !stub->SometimesSetsUpAFrame();
1890 1891 1892
}


1893 1894 1895 1896
void MacroAssembler::IndexFromHash(Register hash, Register index) {
  // The assert checks that the constants for the maximum number of digits
  // for an array index cached in the hash field and the number of bits
  // reserved for it does not conflict.
1897
  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
1898 1899 1900 1901
         (1 << String::kArrayIndexValueBits));
  if (!index.is(hash)) {
    mov(index, hash);
  }
1902
  DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
1903 1904 1905
}


1906
void MacroAssembler::CallRuntime(const Runtime::Function* f,
1907 1908
                                 int num_arguments,
                                 SaveFPRegsMode save_doubles) {
1909 1910 1911
  // If the expected number of arguments of the runtime function is
  // constant, we check that the actual number of arguments match the
  // expectation.
1912
  CHECK(f->nargs < 0 || f->nargs == num_arguments);
1913

1914 1915 1916 1917
  // TODO(1236192): Most runtime routines don't need the number of
  // arguments passed in because it is constant. At some point we
  // should remove this need and make the runtime routine entry code
  // smarter.
1918
  Move(eax, Immediate(num_arguments));
1919
  mov(ebx, Immediate(ExternalReference(f, isolate())));
1920
  CEntryStub ces(isolate(), 1, save_doubles);
1921
  CallStub(&ces);
1922 1923 1924
}


1925 1926 1927 1928 1929
void MacroAssembler::CallExternalReference(ExternalReference ref,
                                           int num_arguments) {
  mov(eax, Immediate(num_arguments));
  mov(ebx, Immediate(ref));

1930
  CEntryStub stub(isolate(), 1);
1931 1932 1933 1934
  CallStub(&stub);
}


serya@chromium.org's avatar
serya@chromium.org committed
1935 1936 1937
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
                                               int num_arguments,
                                               int result_size) {
1938 1939 1940 1941
  // TODO(1236192): Most runtime routines don't need the number of
  // arguments passed in because it is constant. At some point we
  // should remove this need and make the runtime routine entry code
  // smarter.
1942
  Move(eax, Immediate(num_arguments));
serya@chromium.org's avatar
serya@chromium.org committed
1943 1944 1945 1946 1947 1948 1949
  JumpToExternalReference(ext);
}


void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                     int num_arguments,
                                     int result_size) {
1950 1951 1952
  TailCallExternalReference(ExternalReference(fid, isolate()),
                            num_arguments,
                            result_size);
1953 1954 1955
}


serya@chromium.org's avatar
serya@chromium.org committed
1956
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
1957
  // Set the entry point and jump to the C entry runtime stub.
1958
  mov(ebx, Immediate(ext));
1959
  CEntryStub ces(isolate(), 1);
1960
  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
1961 1962 1963 1964 1965 1966 1967
}


void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                    const ParameterCount& actual,
                                    Handle<Code> code_constant,
                                    const Operand& code_operand,
1968
                                    Label* done,
1969
                                    bool* definitely_mismatches,
1970
                                    InvokeFlag flag,
1971
                                    Label::Distance done_near,
1972
                                    const CallWrapper& call_wrapper) {
1973
  bool definitely_matches = false;
1974
  *definitely_mismatches = false;
1975 1976
  Label invoke;
  if (expected.is_immediate()) {
1977
    DCHECK(actual.is_immediate());
1978 1979 1980 1981
    if (expected.immediate() == actual.immediate()) {
      definitely_matches = true;
    } else {
      mov(eax, actual.immediate());
1982 1983 1984 1985 1986 1987 1988 1989
      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
      if (expected.immediate() == sentinel) {
        // Don't worry about adapting arguments for builtins that
        // don't want that done. Skip adaption code by making it look
        // like we have a match between expected and actual number of
        // arguments.
        definitely_matches = true;
      } else {
1990
        *definitely_mismatches = true;
1991 1992
        mov(ebx, expected.immediate());
      }
1993 1994 1995 1996 1997 1998 1999 2000
    }
  } else {
    if (actual.is_immediate()) {
      // Expected is in register, actual is immediate. This is the
      // case when we invoke function values without going through the
      // IC mechanism.
      cmp(expected.reg(), actual.immediate());
      j(equal, &invoke);
2001
      DCHECK(expected.reg().is(ebx));
2002 2003 2004 2005
      mov(eax, actual.immediate());
    } else if (!expected.reg().is(actual.reg())) {
      // Both expected and actual are in (different) registers. This
      // is the case when we invoke functions using call and apply.
2006
      cmp(expected.reg(), actual.reg());
2007
      j(equal, &invoke);
2008 2009
      DCHECK(actual.reg().is(eax));
      DCHECK(expected.reg().is(ebx));
2010 2011 2012 2013 2014
    }
  }

  if (!definitely_matches) {
    Handle<Code> adaptor =
2015
        isolate()->builtins()->ArgumentsAdaptorTrampoline();
2016
    if (!code_constant.is_null()) {
2017
      mov(edx, Immediate(code_constant));
2018
      add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2019 2020 2021 2022 2023
    } else if (!code_operand.is_reg(edx)) {
      mov(edx, code_operand);
    }

    if (flag == CALL_FUNCTION) {
2024
      call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
2025
      call(adaptor, RelocInfo::CODE_TARGET);
2026
      call_wrapper.AfterCall();
2027 2028 2029
      if (!*definitely_mismatches) {
        jmp(done, done_near);
      }
2030
    } else {
2031
      jmp(adaptor, RelocInfo::CODE_TARGET);
2032 2033 2034 2035 2036 2037 2038 2039 2040
    }
    bind(&invoke);
  }
}


void MacroAssembler::InvokeCode(const Operand& code,
                                const ParameterCount& expected,
                                const ParameterCount& actual,
2041
                                InvokeFlag flag,
2042
                                const CallWrapper& call_wrapper) {
2043
  // You can't call a function without a valid frame.
2044
  DCHECK(flag == JUMP_FUNCTION || has_frame());
2045

2046
  Label done;
2047
  bool definitely_mismatches = false;
2048
  InvokePrologue(expected, actual, Handle<Code>::null(), code,
2049
                 &done, &definitely_mismatches, flag, Label::kNear,
2050
                 call_wrapper);
2051 2052 2053 2054 2055 2056
  if (!definitely_mismatches) {
    if (flag == CALL_FUNCTION) {
      call_wrapper.BeforeCall(CallSize(code));
      call(code);
      call_wrapper.AfterCall();
    } else {
2057
      DCHECK(flag == JUMP_FUNCTION);
2058 2059 2060
      jmp(code);
    }
    bind(&done);
2061 2062 2063 2064 2065 2066
  }
}


void MacroAssembler::InvokeFunction(Register fun,
                                    const ParameterCount& actual,
2067
                                    InvokeFlag flag,
2068
                                    const CallWrapper& call_wrapper) {
2069
  // You can't call a function without a valid frame.
2070
  DCHECK(flag == JUMP_FUNCTION || has_frame());
2071

2072
  DCHECK(fun.is(edi));
2073 2074 2075
  mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
  mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
2076
  SmiUntag(ebx);
2077 2078

  ParameterCount expected(ebx);
2079
  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2080
             expected, actual, flag, call_wrapper);
2081 2082 2083
}


2084
void MacroAssembler::InvokeFunction(Register fun,
2085
                                    const ParameterCount& expected,
2086
                                    const ParameterCount& actual,
2087
                                    InvokeFlag flag,
2088
                                    const CallWrapper& call_wrapper) {
2089
  // You can't call a function without a valid frame.
2090
  DCHECK(flag == JUMP_FUNCTION || has_frame());
2091

2092
  DCHECK(fun.is(edi));
2093
  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2094

2095
  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2096
             expected, actual, flag, call_wrapper);
2097 2098 2099
}


2100 2101 2102 2103
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                    const ParameterCount& expected,
                                    const ParameterCount& actual,
                                    InvokeFlag flag,
2104
                                    const CallWrapper& call_wrapper) {
2105
  LoadHeapObject(edi, function);
2106
  InvokeFunction(edi, expected, actual, flag, call_wrapper);
2107 2108 2109
}


2110 2111
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                   InvokeFlag flag,
2112
                                   const CallWrapper& call_wrapper) {
2113
  // You can't call a builtin without a valid frame.
2114
  DCHECK(flag == JUMP_FUNCTION || has_frame());
2115 2116 2117 2118 2119

  // Rely on the assertion to check that the number of provided
  // arguments match the expected number of arguments. Fake a
  // parameter count to avoid emitting code to do the check.
  ParameterCount expected(0);
2120 2121
  GetBuiltinFunction(edi, id);
  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2122
             expected, expected, flag, call_wrapper);
2123 2124
}

2125

2126 2127 2128
void MacroAssembler::GetBuiltinFunction(Register target,
                                        Builtins::JavaScript id) {
  // Load the JavaScript builtin function from the builtins object.
2129
  mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2130
  mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
2131 2132 2133
  mov(target, FieldOperand(target,
                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
2134

2135

2136
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2137
  DCHECK(!target.is(edi));
2138
  // Load the JavaScript builtin function from the builtins object.
2139 2140 2141
  GetBuiltinFunction(edi, id);
  // Load the code entry point from the function into the target register.
  mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
2142 2143 2144
}


2145 2146 2147
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
  if (context_chain_length > 0) {
    // Move up the chain of contexts to the context containing the slot.
2148
    mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2149
    for (int i = 1; i < context_chain_length; i++) {
2150
      mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2151
    }
2152 2153 2154 2155 2156 2157 2158
  } else {
    // Slot is in the current function context.  Move it into the
    // destination register in case we store into it (the write barrier
    // cannot be allowed to destroy the context in esi).
    mov(dst, esi);
  }

2159 2160 2161 2162
  // We should not have found a with context by walking the context chain
  // (i.e., the static scope chain and runtime context chain do not agree).
  // A variable occurring in such a scope should have slot type LOOKUP and
  // not CONTEXT.
2163
  if (emit_debug_code()) {
2164 2165
    cmp(FieldOperand(dst, HeapObject::kMapOffset),
        isolate()->factory()->with_context_map());
2166
    Check(not_equal, kVariableResolvedToWithContext);
2167 2168 2169 2170
  }
}


2171 2172 2173 2174 2175 2176 2177
void MacroAssembler::LoadTransitionedArrayMapConditional(
    ElementsKind expected_kind,
    ElementsKind transitioned_kind,
    Register map_in_out,
    Register scratch,
    Label* no_map_match) {
  // Load the global or builtins object from the current context.
2178
  mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2179
  mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
2180 2181

  // Check that the function's map is the same as the expected cached map.
2182 2183 2184 2185 2186 2187
  mov(scratch, Operand(scratch,
                       Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));

  size_t offset = expected_kind * kPointerSize +
      FixedArrayBase::kHeaderSize;
  cmp(map_in_out, FieldOperand(scratch, offset));
2188 2189 2190
  j(not_equal, no_map_match);

  // Use the transitioned cached map.
2191 2192 2193
  offset = transitioned_kind * kPointerSize +
      FixedArrayBase::kHeaderSize;
  mov(map_in_out, FieldOperand(scratch, offset));
2194 2195 2196
}


2197 2198
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
  // Load the global or builtins object from the current context.
2199 2200
  mov(function,
      Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2201
  // Load the native context from the global or builtins object.
2202 2203
  mov(function,
      FieldOperand(function, GlobalObject::kNativeContextOffset));
2204
  // Load the function from the native context.
2205 2206 2207 2208 2209 2210 2211 2212
  mov(function, Operand(function, Context::SlotOffset(index)));
}


void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
                                                  Register map) {
  // Load the initial map.  The global functions all have initial maps.
  mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2213
  if (emit_debug_code()) {
2214
    Label ok, fail;
2215
    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2216 2217
    jmp(&ok);
    bind(&fail);
2218
    Abort(kGlobalFunctionsMustHaveInitialMap);
2219 2220 2221 2222
    bind(&ok);
  }
}

2223

2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
// Store the value in register src in the safepoint register stack
// slot for register dst.
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
  mov(SafepointRegisterSlot(dst), src);
}


void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
  mov(SafepointRegisterSlot(dst), src);
}


void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
  mov(dst, SafepointRegisterSlot(src));
}


Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
  return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
}


2246 2247 2248 2249
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
  // The registers are pushed starting with the lowest encoding,
  // which means that lowest encodings are furthest away from
  // the stack pointer.
2250
  DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2251 2252 2253 2254
  return kNumSafepointRegisters - reg_code - 1;
}


2255 2256
void MacroAssembler::LoadHeapObject(Register result,
                                    Handle<HeapObject> object) {
2257
  AllowDeferredHandleDereference embedding_raw_address;
2258
  if (isolate()->heap()->InNewSpace(*object)) {
2259
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
2260
    mov(result, Operand::ForCell(cell));
2261 2262 2263 2264 2265 2266
  } else {
    mov(result, object);
  }
}


2267
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
2268
  AllowDeferredHandleDereference using_raw_address;
2269
  if (isolate()->heap()->InNewSpace(*object)) {
2270
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
2271
    cmp(reg, Operand::ForCell(cell));
2272 2273 2274 2275 2276 2277
  } else {
    cmp(reg, object);
  }
}


2278
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2279
  AllowDeferredHandleDereference using_raw_address;
2280
  if (isolate()->heap()->InNewSpace(*object)) {
2281
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
2282
    push(Operand::ForCell(cell));
2283 2284 2285 2286 2287 2288
  } else {
    Push(object);
  }
}


2289 2290 2291 2292 2293 2294 2295
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
                                  Register scratch) {
  mov(scratch, cell);
  cmp(value, FieldOperand(scratch, WeakCell::kValueOffset));
}


2296
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2297 2298
  mov(value, cell);
  mov(value, FieldOperand(value, WeakCell::kValueOffset));
2299 2300 2301 2302 2303 2304
}


void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
                                   Label* miss) {
  GetWeakValue(value, cell);
2305 2306 2307 2308
  JumpIfSmi(value, miss);
}


2309 2310 2311 2312 2313
void MacroAssembler::Ret() {
  ret(0);
}


2314 2315 2316 2317 2318
void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
  if (is_uint16(bytes_dropped)) {
    ret(bytes_dropped);
  } else {
    pop(scratch);
2319
    add(esp, Immediate(bytes_dropped));
2320 2321 2322 2323 2324 2325
    push(scratch);
    ret(0);
  }
}


2326 2327
void MacroAssembler::Drop(int stack_elements) {
  if (stack_elements > 0) {
2328
    add(esp, Immediate(stack_elements * kPointerSize));
2329 2330 2331 2332
  }
}


2333 2334 2335 2336 2337 2338 2339
void MacroAssembler::Move(Register dst, Register src) {
  if (!dst.is(src)) {
    mov(dst, src);
  }
}


2340 2341 2342
void MacroAssembler::Move(Register dst, const Immediate& x) {
  if (x.is_zero()) {
    xor_(dst, dst);  // Shorter than mov of 32-bit immediate 0.
2343
  } else {
2344
    mov(dst, x);
2345 2346 2347 2348
  }
}


2349 2350 2351 2352 2353
void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
  mov(dst, x);
}


2354 2355 2356
void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
  if (src == 0) {
    pxor(dst, dst);
2357
  } else {
2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379
    unsigned cnt = base::bits::CountPopulation32(src);
    unsigned nlz = base::bits::CountLeadingZeros32(src);
    unsigned ntz = base::bits::CountTrailingZeros32(src);
    if (nlz + cnt + ntz == 32) {
      pcmpeqd(dst, dst);
      if (ntz == 0) {
        psrld(dst, 32 - cnt);
      } else {
        pslld(dst, 32 - cnt);
        if (nlz != 0) psrld(dst, nlz);
      }
    } else {
      push(eax);
      mov(eax, Immediate(src));
      movd(dst, Operand(eax));
      pop(eax);
    }
  }
}


void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2380 2381
  if (src == 0) {
    pxor(dst, dst);
2382
  } else {
2383 2384
    uint32_t lower = static_cast<uint32_t>(src);
    uint32_t upper = static_cast<uint32_t>(src >> 32);
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398
    unsigned cnt = base::bits::CountPopulation64(src);
    unsigned nlz = base::bits::CountLeadingZeros64(src);
    unsigned ntz = base::bits::CountTrailingZeros64(src);
    if (nlz + cnt + ntz == 64) {
      pcmpeqd(dst, dst);
      if (ntz == 0) {
        psrlq(dst, 64 - cnt);
      } else {
        psllq(dst, 64 - cnt);
        if (nlz != 0) psrlq(dst, nlz);
      }
    } else if (lower == 0) {
      Move(dst, upper);
      psllq(dst, 32);
2399 2400
    } else if (CpuFeatures::IsSupported(SSE4_1)) {
      CpuFeatureScope scope(this, SSE4_1);
2401 2402 2403 2404
      push(eax);
      Move(eax, Immediate(lower));
      movd(dst, Operand(eax));
      Move(eax, Immediate(upper));
2405
      pinsrd(dst, Operand(eax), 1);
2406
      pop(eax);
2407 2408 2409 2410 2411
    } else {
      push(Immediate(upper));
      push(Immediate(lower));
      movsd(dst, Operand(esp, 0));
      add(esp, Immediate(kDoubleSize));
2412
    }
2413 2414 2415 2416
  }
}


2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
  if (imm8 == 0) {
    movd(dst, src);
    return;
  }
  DCHECK_EQ(1, imm8);
  if (CpuFeatures::IsSupported(SSE4_1)) {
    CpuFeatureScope sse_scope(this, SSE4_1);
    pextrd(dst, src, imm8);
    return;
  }
  pshufd(xmm0, src, 1);
2429
  movd(dst, xmm0);
2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
}


void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
  DCHECK(imm8 == 0 || imm8 == 1);
  if (CpuFeatures::IsSupported(SSE4_1)) {
    CpuFeatureScope sse_scope(this, SSE4_1);
    pinsrd(dst, src, imm8);
    return;
  }
  movd(xmm0, src);
  if (imm8 == 1) {
    punpckldq(dst, xmm0);
  } else {
    DCHECK_EQ(0, imm8);
    psrlq(dst, 32);
    punpckldq(xmm0, dst);
    movaps(dst, xmm0);
  }
}


2452
void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
2453 2454 2455 2456 2457
  if (CpuFeatures::IsSupported(LZCNT)) {
    CpuFeatureScope scope(this, LZCNT);
    lzcnt(dst, src);
    return;
  }
2458 2459 2460 2461 2462 2463 2464 2465 2466
  Label not_zero_src;
  bsr(dst, src);
  j(not_zero, &not_zero_src, Label::kNear);
  Move(dst, Immediate(63));  // 63^31 == 32
  bind(&not_zero_src);
  xor_(dst, Immediate(31));  // for x in [0..31], 31^x == 31-x.
}


2467 2468 2469 2470 2471 2472 2473 2474
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
  if (FLAG_native_code_counters && counter->Enabled()) {
    mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
  }
}


void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2475
  DCHECK(value > 0);
2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
  if (FLAG_native_code_counters && counter->Enabled()) {
    Operand operand = Operand::StaticVariable(ExternalReference(counter));
    if (value == 1) {
      inc(operand);
    } else {
      add(operand, Immediate(value));
    }
  }
}


void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2488
  DCHECK(value > 0);
2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
  if (FLAG_native_code_counters && counter->Enabled()) {
    Operand operand = Operand::StaticVariable(ExternalReference(counter));
    if (value == 1) {
      dec(operand);
    } else {
      sub(operand, Immediate(value));
    }
  }
}


2500 2501 2502
void MacroAssembler::IncrementCounter(Condition cc,
                                      StatsCounter* counter,
                                      int value) {
2503
  DCHECK(value > 0);
2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517
  if (FLAG_native_code_counters && counter->Enabled()) {
    Label skip;
    j(NegateCondition(cc), &skip);
    pushfd();
    IncrementCounter(counter, value);
    popfd();
    bind(&skip);
  }
}


void MacroAssembler::DecrementCounter(Condition cc,
                                      StatsCounter* counter,
                                      int value) {
2518
  DCHECK(value > 0);
2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529
  if (FLAG_native_code_counters && counter->Enabled()) {
    Label skip;
    j(NegateCondition(cc), &skip);
    pushfd();
    DecrementCounter(counter, value);
    popfd();
    bind(&skip);
  }
}


2530 2531
void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
  if (emit_debug_code()) Check(cc, reason);
2532 2533 2534
}


2535
void MacroAssembler::AssertFastElements(Register elements) {
2536
  if (emit_debug_code()) {
2537
    Factory* factory = isolate()->factory();
2538 2539
    Label ok;
    cmp(FieldOperand(elements, HeapObject::kMapOffset),
2540
        Immediate(factory->fixed_array_map()));
2541
    j(equal, &ok);
2542 2543 2544
    cmp(FieldOperand(elements, HeapObject::kMapOffset),
        Immediate(factory->fixed_double_array_map()));
    j(equal, &ok);
2545
    cmp(FieldOperand(elements, HeapObject::kMapOffset),
2546
        Immediate(factory->fixed_cow_array_map()));
2547
    j(equal, &ok);
2548
    Abort(kJSObjectWithFastElementsMapHasSlowElements);
2549 2550 2551 2552 2553
    bind(&ok);
  }
}


2554
void MacroAssembler::Check(Condition cc, BailoutReason reason) {
2555
  Label L;
2556
  j(cc, &L);
2557
  Abort(reason);
2558 2559 2560 2561 2562
  // will not return here
  bind(&L);
}


2563
void MacroAssembler::CheckStackAlignment() {
2564
  int frame_alignment = base::OS::ActivationFrameAlignment();
2565 2566
  int frame_alignment_mask = frame_alignment - 1;
  if (frame_alignment > kPointerSize) {
2567
    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2568 2569 2570 2571 2572 2573 2574 2575 2576 2577
    Label alignment_as_expected;
    test(esp, Immediate(frame_alignment_mask));
    j(zero, &alignment_as_expected);
    // Abort if stack is not aligned.
    int3();
    bind(&alignment_as_expected);
  }
}


2578
void MacroAssembler::Abort(BailoutReason reason) {
2579
#ifdef DEBUG
2580
  const char* msg = GetBailoutReason(reason);
2581 2582 2583 2584
  if (msg != NULL) {
    RecordComment("Abort message: ");
    RecordComment(msg);
  }
2585 2586 2587 2588 2589

  if (FLAG_trap_on_abort) {
    int3();
    return;
  }
2590
#endif
2591

2592
  push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
2593 2594 2595 2596 2597
  // Disable stub call restrictions to always allow calls to abort.
  if (!has_frame_) {
    // We don't actually want to generate a pile of code for this, so just
    // claim there is a stack frame, without generating one.
    FrameScope scope(this, StackFrame::NONE);
2598
    CallRuntime(Runtime::kAbort, 1);
2599
  } else {
2600
    CallRuntime(Runtime::kAbort, 1);
2601
  }
2602
  // will not return here
2603
  int3();
2604 2605 2606
}


2607 2608
void MacroAssembler::LoadInstanceDescriptors(Register map,
                                             Register descriptors) {
2609
  mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
2610 2611 2612
}


2613 2614 2615 2616 2617 2618
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
  mov(dst, FieldOperand(map, Map::kBitField3Offset));
  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}


2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
void MacroAssembler::LoadAccessor(Register dst, Register holder,
                                  int accessor_index,
                                  AccessorComponent accessor) {
  mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
  LoadInstanceDescriptors(dst, dst);
  mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
  int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
                                           : AccessorPair::kSetterOffset;
  mov(dst, FieldOperand(dst, offset));
}


2631 2632 2633
void MacroAssembler::LoadPowerOf2(XMMRegister dst,
                                  Register scratch,
                                  int power) {
2634
  DCHECK(is_uintn(power + HeapNumber::kExponentBias,
2635 2636
                  HeapNumber::kExponentBits));
  mov(scratch, Immediate(power + HeapNumber::kExponentBias));
2637
  movd(dst, scratch);
2638 2639 2640 2641
  psllq(dst, HeapNumber::kMantissaBits);
}


2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
void MacroAssembler::LookupNumberStringCache(Register object,
                                             Register result,
                                             Register scratch1,
                                             Register scratch2,
                                             Label* not_found) {
  // Use of registers. Register result is used as a temporary.
  Register number_string_cache = result;
  Register mask = scratch1;
  Register scratch = scratch2;

  // Load the number string cache.
  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
  // Make the hash mask from the length of the number string cache. It
  // contains two elements (number and string) for each cache entry.
  mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
  shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
  sub(mask, Immediate(1));  // Make mask.

  // Calculate the entry in the number string cache. The hash value in the
  // number string cache for smis is just the smi value, and the hash for
  // doubles is the xor of the upper and lower words. See
  // Heap::GetNumberStringCache.
  Label smi_hash_calculated;
  Label load_result_from_cache;
  Label not_smi;
  STATIC_ASSERT(kSmiTag == 0);
  JumpIfNotSmi(object, &not_smi, Label::kNear);
  mov(scratch, object);
  SmiUntag(scratch);
  jmp(&smi_hash_calculated, Label::kNear);
  bind(&not_smi);
  cmp(FieldOperand(object, HeapObject::kMapOffset),
      isolate()->factory()->heap_number_map());
  j(not_equal, not_found);
  STATIC_ASSERT(8 == kDoubleSize);
  mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
  xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
  // Object is heap number and hash is now in scratch. Calculate cache index.
  and_(scratch, mask);
  Register index = scratch;
  Register probe = mask;
  mov(probe,
      FieldOperand(number_string_cache,
                   index,
                   times_twice_pointer_size,
                   FixedArray::kHeaderSize));
  JumpIfSmi(probe, not_found);
2689 2690
  movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
  ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716
  j(parity_even, not_found);  // Bail out if NaN is involved.
  j(not_equal, not_found);  // The cache did not contain this value.
  jmp(&load_result_from_cache, Label::kNear);

  bind(&smi_hash_calculated);
  // Object is smi and hash is now in scratch. Calculate cache index.
  and_(scratch, mask);
  // Check if the entry is the smi we are looking for.
  cmp(object,
      FieldOperand(number_string_cache,
                   index,
                   times_twice_pointer_size,
                   FixedArray::kHeaderSize));
  j(not_equal, not_found);

  // Get the result from the cache.
  bind(&load_result_from_cache);
  mov(result,
      FieldOperand(number_string_cache,
                   index,
                   times_twice_pointer_size,
                   FixedArray::kHeaderSize + kPointerSize));
  IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
}


2717 2718
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
    Register instance_type, Register scratch, Label* failure) {
2719 2720 2721 2722 2723
  if (!scratch.is(instance_type)) {
    mov(scratch, instance_type);
  }
  and_(scratch,
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
2724
  cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
2725 2726 2727 2728
  j(not_equal, failure);
}


2729 2730 2731 2732 2733
void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
                                                           Register object2,
                                                           Register scratch1,
                                                           Register scratch2,
                                                           Label* failure) {
2734
  // Check that both objects are not smis.
2735
  STATIC_ASSERT(kSmiTag == 0);
2736 2737
  mov(scratch1, object1);
  and_(scratch1, object2);
2738
  JumpIfSmi(scratch1, failure);
2739 2740 2741 2742 2743 2744 2745

  // Load instance type for both strings.
  mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
  mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
  movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
  movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));

2746 2747
  // Check that both are flat one-byte strings.
  const int kFlatOneByteStringMask =
2748
      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2749
  const int kFlatOneByteStringTag =
2750
      kStringTag | kOneByteStringTag | kSeqStringTag;
2751
  // Interleave bits from both instance types and compare them in one check.
2752 2753 2754
  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
  and_(scratch1, kFlatOneByteStringMask);
  and_(scratch2, kFlatOneByteStringMask);
2755
  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2756
  cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
2757 2758 2759 2760
  j(not_equal, failure);
}


2761 2762 2763
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
                                                     Label* not_unique_name,
                                                     Label::Distance distance) {
2764 2765 2766 2767 2768 2769 2770 2771
  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
  Label succeed;
  test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
  j(zero, &succeed);
  cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
  j(not_equal, not_unique_name, distance);

  bind(&succeed);
2772 2773 2774
}


2775 2776 2777 2778 2779 2780
void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
                                               Register index,
                                               Register value,
                                               uint32_t encoding_mask) {
  Label is_object;
  JumpIfNotSmi(string, &is_object, Label::kNear);
2781
  Abort(kNonObject);
2782 2783 2784 2785 2786 2787 2788 2789 2790
  bind(&is_object);

  push(value);
  mov(value, FieldOperand(string, HeapObject::kMapOffset));
  movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));

  and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
  cmp(value, Immediate(encoding_mask));
  pop(value);
2791
  Check(equal, kUnexpectedStringType);
2792 2793 2794 2795 2796

  // The index is assumed to be untagged coming in, tag it to compare with the
  // string length without using a temp register, it is restored at the end of
  // this function.
  SmiTag(index);
2797
  Check(no_overflow, kIndexIsTooLarge);
2798 2799

  cmp(index, FieldOperand(string, String::kLengthOffset));
2800
  Check(less, kIndexIsTooLarge);
2801 2802

  cmp(index, Immediate(Smi::FromInt(0)));
2803
  Check(greater_equal, kIndexIsNegative);
2804 2805 2806 2807 2808 2809

  // Restore the index
  SmiUntag(index);
}


2810
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
2811
  int frame_alignment = base::OS::ActivationFrameAlignment();
2812
  if (frame_alignment != 0) {
2813 2814 2815
    // Make stack end at alignment and make room for num_arguments words
    // and the original value of esp.
    mov(scratch, esp);
2816
    sub(esp, Immediate((num_arguments + 1) * kPointerSize));
2817
    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2818
    and_(esp, -frame_alignment);
2819 2820
    mov(Operand(esp, num_arguments * kPointerSize), scratch);
  } else {
2821
    sub(esp, Immediate(num_arguments * kPointerSize));
2822 2823 2824 2825 2826 2827 2828
  }
}


void MacroAssembler::CallCFunction(ExternalReference function,
                                   int num_arguments) {
  // Trashing eax is ok as it will be the return value.
2829
  mov(eax, Immediate(function));
2830 2831 2832 2833 2834 2835
  CallCFunction(eax, num_arguments);
}


void MacroAssembler::CallCFunction(Register function,
                                   int num_arguments) {
2836
  DCHECK(has_frame());
2837
  // Check stack alignment.
2838
  if (emit_debug_code()) {
2839 2840 2841
    CheckStackAlignment();
  }

2842
  call(function);
2843
  if (base::OS::ActivationFrameAlignment() != 0) {
2844 2845
    mov(esp, Operand(esp, num_arguments * kPointerSize));
  } else {
2846
    add(esp, Immediate(num_arguments * kPointerSize));
2847 2848 2849 2850
  }
}


2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
#ifdef DEBUG
bool AreAliased(Register reg1,
                Register reg2,
                Register reg3,
                Register reg4,
                Register reg5,
                Register reg6,
                Register reg7,
                Register reg8) {
  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
      reg7.is_valid() + reg8.is_valid();

  RegList regs = 0;
  if (reg1.is_valid()) regs |= reg1.bit();
  if (reg2.is_valid()) regs |= reg2.bit();
  if (reg3.is_valid()) regs |= reg3.bit();
  if (reg4.is_valid()) regs |= reg4.bit();
  if (reg5.is_valid()) regs |= reg5.bit();
  if (reg6.is_valid()) regs |= reg6.bit();
  if (reg7.is_valid()) regs |= reg7.bit();
  if (reg8.is_valid()) regs |= reg8.bit();
  int n_of_non_aliasing_regs = NumRegs(regs);

  return n_of_valid_regs != n_of_non_aliasing_regs;
2876
}
2877
#endif
2878 2879


2880
CodePatcher::CodePatcher(byte* address, int size)
2881 2882
    : address_(address),
      size_(size),
2883
      masm_(NULL, address, size + Assembler::kGap) {
2884
  // Create a new macro assembler pointing to the address of the code to patch.
2885 2886
  // The size is adjusted with kGap on order for the assembler to generate size
  // bytes of instructions without failing with buffer size constraints.
2887
  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2888 2889 2890 2891 2892
}


CodePatcher::~CodePatcher() {
  // Indicate that code has changed.
2893
  CpuFeatures::FlushICache(address_, size_);
2894 2895

  // Check that the code was patched as expected.
2896 2897
  DCHECK(masm_.pc_ == address_ + size_);
  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2898 2899 2900
}


2901 2902 2903 2904 2905 2906 2907
void MacroAssembler::CheckPageFlag(
    Register object,
    Register scratch,
    int mask,
    Condition cc,
    Label* condition_met,
    Label::Distance condition_met_distance) {
2908
  DCHECK(cc == zero || cc == not_zero);
2909 2910 2911 2912
  if (scratch.is(object)) {
    and_(scratch, Immediate(~Page::kPageAlignmentMask));
  } else {
    mov(scratch, Immediate(~Page::kPageAlignmentMask));
2913
    and_(scratch, object);
2914 2915 2916 2917 2918 2919 2920 2921
  }
  if (mask < (1 << kBitsPerByte)) {
    test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
           static_cast<uint8_t>(mask));
  } else {
    test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
  }
  j(cc, condition_met, condition_met_distance);
2922 2923 2924 2925 2926 2927 2928 2929 2930
}


void MacroAssembler::CheckPageFlagForMap(
    Handle<Map> map,
    int mask,
    Condition cc,
    Label* condition_met,
    Label::Distance condition_met_distance) {
2931
  DCHECK(cc == zero || cc == not_zero);
2932
  Page* page = Page::FromAddress(map->address());
2933
  DCHECK(!serializer_enabled());  // Serializer cannot match page_flags.
2934 2935 2936
  ExternalReference reference(ExternalReference::page_flags(page));
  // The inlined static address check of the page's flags relies
  // on maps never being compacted.
2937
  DCHECK(!isolate()->heap()->mark_compact_collector()->
2938 2939 2940 2941 2942 2943 2944
         IsOnEvacuationCandidate(*map));
  if (mask < (1 << kBitsPerByte)) {
    test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
  } else {
    test(Operand::StaticVariable(reference), Immediate(mask));
  }
  j(cc, condition_met, condition_met_distance);
2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
}


void MacroAssembler::JumpIfBlack(Register object,
                                 Register scratch0,
                                 Register scratch1,
                                 Label* on_black,
                                 Label::Distance on_black_near) {
  HasColor(object, scratch0, scratch1,
           on_black, on_black_near,
           1, 0);  // kBlackBitPattern.
2956
  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
2957 2958 2959 2960 2961 2962 2963 2964 2965 2966
}


void MacroAssembler::HasColor(Register object,
                              Register bitmap_scratch,
                              Register mask_scratch,
                              Label* has_color,
                              Label::Distance has_color_distance,
                              int first_bit,
                              int second_bit) {
2967
  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
2968 2969 2970 2971 2972 2973

  GetMarkBits(object, bitmap_scratch, mask_scratch);

  Label other_color, word_boundary;
  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
  j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
2974
  add(mask_scratch, mask_scratch);  // Shift left 1 by adding.
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990
  j(zero, &word_boundary, Label::kNear);
  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
  jmp(&other_color, Label::kNear);

  bind(&word_boundary);
  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);

  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
  bind(&other_color);
}


void MacroAssembler::GetMarkBits(Register addr_reg,
                                 Register bitmap_reg,
                                 Register mask_reg) {
2991
  DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
2992
  mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
2993 2994
  and_(bitmap_reg, addr_reg);
  mov(ecx, addr_reg);
2995 2996 2997 2998 2999 3000
  int shift =
      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
  shr(ecx, shift);
  and_(ecx,
       (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));

3001 3002
  add(bitmap_reg, ecx);
  mov(ecx, addr_reg);
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
  shr(ecx, kPointerSizeLog2);
  and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
  mov(mask_reg, Immediate(1));
  shl_cl(mask_reg);
}


void MacroAssembler::EnsureNotWhite(
    Register value,
    Register bitmap_scratch,
    Register mask_scratch,
    Label* value_is_white_and_not_data,
    Label::Distance distance) {
3016
  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
3017 3018 3019
  GetMarkBits(value, bitmap_scratch, mask_scratch);

  // If the value is black or grey we don't need to do anything.
3020 3021 3022 3023
  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3024 3025 3026 3027 3028 3029 3030 3031

  Label done;

  // Since both black and grey have a 1 in the first position and white does
  // not have a 1 there we only need to check one bit.
  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
  j(not_zero, &done, Label::kNear);

3032
  if (emit_debug_code()) {
3033 3034 3035 3036
    // Check for impossible bit pattern.
    Label ok;
    push(mask_scratch);
    // shl.  May overflow making the check conservative.
3037
    add(mask_scratch, mask_scratch);
3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053
    test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
    j(zero, &ok, Label::kNear);
    int3();
    bind(&ok);
    pop(mask_scratch);
  }

  // Value is white.  We check whether it is data that doesn't need scanning.
  // Currently only checks for HeapNumber and non-cons strings.
  Register map = ecx;  // Holds map while checking type.
  Register length = ecx;  // Holds length of object after checking type.
  Label not_heap_number;
  Label is_data_object;

  // Check for heap-number
  mov(map, FieldOperand(value, HeapObject::kMapOffset));
3054
  cmp(map, isolate()->factory()->heap_number_map());
3055 3056 3057 3058 3059 3060
  j(not_equal, &not_heap_number, Label::kNear);
  mov(length, Immediate(HeapNumber::kSize));
  jmp(&is_data_object, Label::kNear);

  bind(&not_heap_number);
  // Check for strings.
3061 3062
  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3063 3064 3065 3066
  // If it's a string and it's not a cons string then it's an object containing
  // no GC pointers.
  Register instance_type = ecx;
  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3067
  test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
3068 3069 3070 3071 3072 3073 3074
  j(not_zero, value_is_white_and_not_data);
  // It's a non-indirect (non-cons and non-slice) string.
  // If it's external, the length is just ExternalString::kSize.
  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
  Label not_external;
  // External strings are the only ones with the kExternalStringTag bit
  // set.
3075 3076
  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3077
  test_b(instance_type, kExternalStringTag);
3078 3079 3080 3081 3082
  j(zero, &not_external, Label::kNear);
  mov(length, Immediate(ExternalString::kSize));
  jmp(&is_data_object, Label::kNear);

  bind(&not_external);
3083
  // Sequential string, either Latin1 or UC16.
3084
  DCHECK(kOneByteStringTag == 0x04);
3085 3086 3087
  and_(length, Immediate(kStringEncodingMask));
  xor_(length, Immediate(kStringEncodingMask));
  add(length, Immediate(0x04));
3088
  // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
3089 3090
  // by 2. If we multiply the string length as smi by this, it still
  // won't overflow a 32-bit value.
3091 3092
  DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
  DCHECK(SeqOneByteString::kMaxSize <=
3093 3094
         static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
  imul(length, FieldOperand(value, String::kLengthOffset));
3095
  shr(length, 2 + kSmiTagSize + kSmiShiftSize);
3096 3097
  add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
  and_(length, Immediate(~kObjectAlignmentMask));
3098 3099 3100 3101 3102 3103 3104 3105 3106

  bind(&is_data_object);
  // Value is a data object, and it is white.  Mark it black.  Since we know
  // that the object is white we can make it black by flipping one bit.
  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);

  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
  add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
      length);
3107
  if (emit_debug_code()) {
3108 3109
    mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
    cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
3110
    Check(less_equal, kLiveBytesCountOverflowChunkSize);
3111
  }
3112 3113 3114 3115

  bind(&done);
}

3116

3117 3118 3119
void MacroAssembler::EnumLength(Register dst, Register map) {
  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
  mov(dst, FieldOperand(map, Map::kBitField3Offset));
3120 3121
  and_(dst, Immediate(Map::EnumLengthBits::kMask));
  SmiTag(dst);
3122 3123 3124
}


3125
void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3126
  Label next, start;
3127 3128
  mov(ecx, eax);

3129 3130
  // Check if the enum length field is properly initialized, indicating that
  // there is an enum cache.
3131 3132
  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));

3133
  EnumLength(edx, ebx);
3134
  cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
3135 3136
  j(equal, call_runtime);

3137 3138 3139 3140
  jmp(&start);

  bind(&next);
  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3141 3142

  // For all objects but the receiver, check that the cache is empty.
3143 3144 3145 3146 3147 3148 3149 3150
  EnumLength(edx, ebx);
  cmp(edx, Immediate(Smi::FromInt(0)));
  j(not_equal, call_runtime);

  bind(&start);

  // Check that there are no elements. Register rcx contains the current JS
  // object we've reached through the prototype chain.
3151
  Label no_elements;
3152 3153
  mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
  cmp(ecx, isolate()->factory()->empty_fixed_array());
3154 3155 3156 3157
  j(equal, &no_elements);

  // Second chance, the object may be using the empty slow element dictionary.
  cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
3158 3159
  j(not_equal, call_runtime);

3160
  bind(&no_elements);
3161 3162 3163 3164 3165
  mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
  cmp(ecx, isolate()->factory()->null_value());
  j(not_equal, &next);
}

3166

3167
void MacroAssembler::TestJSArrayForAllocationMemento(
3168
    Register receiver_reg,
3169 3170
    Register scratch_reg,
    Label* no_memento_found) {
3171 3172 3173 3174 3175 3176
  ExternalReference new_space_start =
      ExternalReference::new_space_start(isolate());
  ExternalReference new_space_allocation_top =
      ExternalReference::new_space_allocation_top_address(isolate());

  lea(scratch_reg, Operand(receiver_reg,
3177
      JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3178
  cmp(scratch_reg, Immediate(new_space_start));
3179
  j(less, no_memento_found);
3180
  cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3181
  j(greater, no_memento_found);
3182
  cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
3183
      Immediate(isolate()->factory()->allocation_memento_map()));
3184 3185 3186
}


3187 3188 3189 3190 3191
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
    Register object,
    Register scratch0,
    Register scratch1,
    Label* found) {
3192
  DCHECK(!scratch1.is(scratch0));
3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203
  Factory* factory = isolate()->factory();
  Register current = scratch0;
  Label loop_again;

  // scratch contained elements pointer.
  mov(current, object);

  // Loop based on the map going up the prototype chain.
  bind(&loop_again);
  mov(current, FieldOperand(current, HeapObject::kMapOffset));
  mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
3204
  DecodeField<Map::ElementsKindBits>(scratch1);
3205 3206 3207 3208 3209 3210 3211
  cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
  j(equal, found);
  mov(current, FieldOperand(current, Map::kPrototypeOffset));
  cmp(current, Immediate(factory->null_value()));
  j(not_equal, &loop_again);
}

3212

3213
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
3214 3215
  DCHECK(!dividend.is(eax));
  DCHECK(!dividend.is(edx));
3216 3217 3218
  base::MagicNumbersForDivision<uint32_t> mag =
      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
  mov(eax, Immediate(mag.multiplier));
3219
  imul(dividend);
3220 3221 3222 3223
  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
  if (divisor > 0 && neg) add(edx, dividend);
  if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
  if (mag.shift > 0) sar(edx, mag.shift);
3224 3225 3226
  mov(eax, dividend);
  shr(eax, 31);
  add(edx, eax);
3227 3228 3229
}


3230 3231
}  // namespace internal
}  // namespace v8
3232 3233

#endif  // V8_TARGET_ARCH_IA32