macro-assembler-arm.cc 137 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#include <limits.h>  // For LONG_MIN, LONG_MAX.

7
#include "src/v8.h"
8

9
#if V8_TARGET_ARCH_ARM
10

11
#include "src/base/bits.h"
12
#include "src/base/division-by-constant.h"
13 14 15 16 17
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
18
#include "src/runtime/runtime.h"
19

20 21
namespace v8 {
namespace internal {
22

23 24
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
    : Assembler(arg_isolate, buffer, size),
25
      generating_stub_(false),
26
      has_frame_(false) {
27 28 29 30
  if (isolate() != NULL) {
    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                  isolate());
  }
31 32 33 34 35 36 37 38
}


void MacroAssembler::Jump(Register target, Condition cond) {
  bx(target, cond);
}


39 40
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
                          Condition cond) {
41
  DCHECK(RelocInfo::IsCodeTarget(rmode));
42
  mov(pc, Operand(target, rmode), LeaveCC, cond);
43 44 45
}


46
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
47
                          Condition cond) {
48
  DCHECK(!RelocInfo::IsCodeTarget(rmode));
49 50 51 52
  Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
}


53 54
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
                          Condition cond) {
55
  DCHECK(RelocInfo::IsCodeTarget(rmode));
56
  // 'code' is always generated ARM code, never THUMB code
57
  AllowDeferredHandleDereference embedding_raw_address;
58 59 60 61
  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}


62 63 64 65 66
int MacroAssembler::CallSize(Register target, Condition cond) {
  return kInstrSize;
}


67
void MacroAssembler::Call(Register target, Condition cond) {
68 69
  // Block constant pool for the call instruction sequence.
  BlockConstPoolScope block_const_pool(this);
70 71
  Label start;
  bind(&start);
72
  blx(target, cond);
73
  DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
74 75 76
}


77
int MacroAssembler::CallSize(
78
    Address target, RelocInfo::Mode rmode, Condition cond) {
79
  Instr mov_instr = cond | MOV | LeaveCC;
80 81 82
  Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
  return kInstrSize +
         mov_operand.instructions_required(this, mov_instr) * kInstrSize;
83 84 85
}


86 87
int MacroAssembler::CallStubSize(
    CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
88
  return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
89 90 91
}


92 93 94 95
int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
                                                   Address target,
                                                   RelocInfo::Mode rmode,
                                                   Condition cond) {
96
  Instr mov_instr = cond | MOV | LeaveCC;
97 98 99
  Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
  return kInstrSize +
         mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
100 101 102
}


103
void MacroAssembler::Call(Address target,
104
                          RelocInfo::Mode rmode,
105 106
                          Condition cond,
                          TargetAddressStorageMode mode) {
107 108
  // Block constant pool for the call instruction sequence.
  BlockConstPoolScope block_const_pool(this);
109 110
  Label start;
  bind(&start);
111 112 113 114 115 116

  bool old_predictable_code_size = predictable_code_size();
  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
    set_predictable_code_size(true);
  }

117 118 119 120 121 122
#ifdef DEBUG
  // Check the expected size before generating code to ensure we assume the same
  // constant pool availability (e.g., whether constant pool is full or not).
  int expected_size = CallSize(target, rmode, cond);
#endif

123 124 125 126 127 128 129 130 131 132
  // Call sequence on V7 or later may be :
  //  movw  ip, #... @ call address low 16
  //  movt  ip, #... @ call address high 16
  //  blx   ip
  //                      @ return address
  // Or for pre-V7 or values that may be back-patched
  // to avoid ICache flushes:
  //  ldr   ip, [pc, #...] @ call address
  //  blx   ip
  //                      @ return address
133

134 135 136 137 138 139
  // Statement positions are expected to be recorded when the target
  // address is loaded. The mov method will automatically record
  // positions when pc is the target, since this is not the case here
  // we have to do it explicitly.
  positions_recorder()->WriteRecordedPositions();

140
  mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
141
  blx(ip, cond);
142

143
  DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
144 145 146
  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
    set_predictable_code_size(old_predictable_code_size);
  }
147 148 149
}


150 151
int MacroAssembler::CallSize(Handle<Code> code,
                             RelocInfo::Mode rmode,
152
                             TypeFeedbackId ast_id,
153
                             Condition cond) {
154
  AllowDeferredHandleDereference using_raw_address;
155
  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
156 157 158 159 160
}


void MacroAssembler::Call(Handle<Code> code,
                          RelocInfo::Mode rmode,
161
                          TypeFeedbackId ast_id,
162 163
                          Condition cond,
                          TargetAddressStorageMode mode) {
164 165
  Label start;
  bind(&start);
166
  DCHECK(RelocInfo::IsCodeTarget(rmode));
167
  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
168
    SetRecordedAstId(ast_id);
169 170
    rmode = RelocInfo::CODE_TARGET_WITH_ID;
  }
171
  // 'code' is always generated ARM code, never THUMB code
172
  AllowDeferredHandleDereference embedding_raw_address;
173
  Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
174 175 176
}


177 178
void MacroAssembler::Ret(Condition cond) {
  bx(lr, cond);
179 180 181
}


182 183 184
void MacroAssembler::Drop(int count, Condition cond) {
  if (count > 0) {
    add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
185 186 187 188
  }
}


189 190 191 192 193 194
void MacroAssembler::Ret(int drop, Condition cond) {
  Drop(drop, cond);
  Ret(cond);
}


195 196 197 198
void MacroAssembler::Swap(Register reg1,
                          Register reg2,
                          Register scratch,
                          Condition cond) {
199
  if (scratch.is(no_reg)) {
200 201 202
    eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
    eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
    eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
203
  } else {
204 205 206
    mov(scratch, reg1, LeaveCC, cond);
    mov(reg1, reg2, LeaveCC, cond);
    mov(reg2, scratch, LeaveCC, cond);
207 208 209 210
  }
}


211 212 213 214 215
void MacroAssembler::Call(Label* target) {
  bl(target);
}


216 217 218 219 220 221
void MacroAssembler::Push(Handle<Object> handle) {
  mov(ip, Operand(handle));
  push(ip);
}


222
void MacroAssembler::Move(Register dst, Handle<Object> value) {
223 224 225 226
  AllowDeferredHandleDereference smi_check;
  if (value->IsSmi()) {
    mov(dst, Operand(value));
  } else {
227
    DCHECK(value->IsHeapObject());
228 229 230 231 232 233 234 235
    if (isolate()->heap()->InNewSpace(*value)) {
      Handle<Cell> cell = isolate()->factory()->NewCell(value);
      mov(dst, Operand(cell));
      ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
    } else {
      mov(dst, Operand(value));
    }
  }
236
}
237 238


239
void MacroAssembler::Move(Register dst, Register src, Condition cond) {
240
  if (!dst.is(src)) {
241
    mov(dst, src, LeaveCC, cond);
242 243 244 245
  }
}


246
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
247 248 249 250 251 252
  if (!dst.is(src)) {
    vmov(dst, src);
  }
}


253 254 255 256 257 258
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
                         Register srcA, Condition cond) {
  if (CpuFeatures::IsSupported(MLS)) {
    CpuFeatureScope scope(this, MLS);
    mls(dst, src1, src2, srcA, cond);
  } else {
259
    DCHECK(!srcA.is(ip));
260 261 262 263 264 265
    mul(ip, src1, src2, LeaveCC, cond);
    sub(dst, srcA, ip, LeaveCC, cond);
  }
}


266 267
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
                         Condition cond) {
268
  if (!src2.is_reg() &&
269
      !src2.must_output_reloc_info(this) &&
270
      src2.immediate() == 0) {
271
    mov(dst, Operand::Zero(), LeaveCC, cond);
272
  } else if (!(src2.instructions_required(this) == 1) &&
273
             !src2.must_output_reloc_info(this) &&
274
             CpuFeatures::IsSupported(ARMv7) &&
275
             base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
276 277
    ubfx(dst, src1, 0,
        WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
278 279
  } else {
    and_(dst, src1, src2, LeaveCC, cond);
280 281 282 283 284 285
  }
}


void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
                          Condition cond) {
286
  DCHECK(lsb < 32);
287
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
288 289 290 291 292 293 294 295 296 297 298 299 300
    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    and_(dst, src1, Operand(mask), LeaveCC, cond);
    if (lsb != 0) {
      mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
    }
  } else {
    ubfx(dst, src1, lsb, width, cond);
  }
}


void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
                          Condition cond) {
301
  DCHECK(lsb < 32);
302
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    and_(dst, src1, Operand(mask), LeaveCC, cond);
    int shift_up = 32 - lsb - width;
    int shift_down = lsb + shift_up;
    if (shift_up != 0) {
      mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
    }
    if (shift_down != 0) {
      mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
    }
  } else {
    sbfx(dst, src1, lsb, width, cond);
  }
}


319 320 321 322 323 324
void MacroAssembler::Bfi(Register dst,
                         Register src,
                         Register scratch,
                         int lsb,
                         int width,
                         Condition cond) {
325 326 327 328
  DCHECK(0 <= lsb && lsb < 32);
  DCHECK(0 <= width && width < 32);
  DCHECK(lsb + width < 32);
  DCHECK(!scratch.is(dst));
329
  if (width == 0) return;
330
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
331 332 333 334 335 336 337 338 339 340 341
    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
    bic(dst, dst, Operand(mask));
    and_(scratch, src, Operand((1 << width) - 1));
    mov(scratch, Operand(scratch, LSL, lsb));
    orr(dst, dst, scratch);
  } else {
    bfi(dst, src, lsb, width, cond);
  }
}


342 343
void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
                         Condition cond) {
344
  DCHECK(lsb < 32);
345
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
346
    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
347
    bic(dst, src, Operand(mask));
348
  } else {
349
    Move(dst, src, cond);
350 351 352 353 354
    bfc(dst, lsb, width, cond);
  }
}


355 356
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
                          Condition cond) {
357
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
358 359
    DCHECK(!dst.is(pc) && !src.rm().is(pc));
    DCHECK((satpos >= 0) && (satpos <= 31));
360 361 362

    // These asserts are required to ensure compatibility with the ARMv7
    // implementation.
363 364
    DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
    DCHECK(src.rs().is(no_reg));
365 366 367 368 369 370 371 372 373 374 375 376

    Label done;
    int satval = (1 << satpos) - 1;

    if (cond != al) {
      b(NegateCondition(cond), &done);  // Skip saturate if !condition.
    }
    if (!(src.is_reg() && dst.is(src.rm()))) {
      mov(dst, src);
    }
    tst(dst, Operand(~satval));
    b(eq, &done);
377
    mov(dst, Operand::Zero(), LeaveCC, mi);  // 0 if negative.
378 379 380 381 382 383 384 385
    mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
    bind(&done);
  } else {
    usat(dst, satpos, src, cond);
  }
}


386 387 388
void MacroAssembler::Load(Register dst,
                          const MemOperand& src,
                          Representation r) {
389
  DCHECK(!r.IsDouble());
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
  if (r.IsInteger8()) {
    ldrsb(dst, src);
  } else if (r.IsUInteger8()) {
    ldrb(dst, src);
  } else if (r.IsInteger16()) {
    ldrsh(dst, src);
  } else if (r.IsUInteger16()) {
    ldrh(dst, src);
  } else {
    ldr(dst, src);
  }
}


void MacroAssembler::Store(Register src,
                           const MemOperand& dst,
                           Representation r) {
407
  DCHECK(!r.IsDouble());
408 409 410 411 412
  if (r.IsInteger8() || r.IsUInteger8()) {
    strb(src, dst);
  } else if (r.IsInteger16() || r.IsUInteger16()) {
    strh(src, dst);
  } else {
413 414 415 416 417
    if (r.IsHeapObject()) {
      AssertNotSmi(src);
    } else if (r.IsSmi()) {
      AssertSmi(src);
    }
418 419 420 421 422
    str(src, dst);
  }
}


423 424 425
void MacroAssembler::LoadRoot(Register destination,
                              Heap::RootListIndex index,
                              Condition cond) {
426
  if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
427
      isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
428
      !predictable_code_size()) {
429 430 431 432 433
    // The CPU supports fast immediate values, and this root will never
    // change. We will load it as a relocatable immediate value.
    Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
    mov(destination, Operand(root), LeaveCC, cond);
    return;
434
  }
435
  ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
436 437 438
}


439 440 441
void MacroAssembler::StoreRoot(Register source,
                               Heap::RootListIndex index,
                               Condition cond) {
442
  str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
443 444 445
}


446 447
void MacroAssembler::InNewSpace(Register object,
                                Register scratch,
448
                                Condition cond,
449
                                Label* branch) {
450
  DCHECK(cond == eq || cond == ne);
451 452
  and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
  cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
453
  b(cond, branch);
454 455 456
}


457 458 459 460 461 462 463 464
void MacroAssembler::RecordWriteField(
    Register object,
    int offset,
    Register value,
    Register dst,
    LinkRegisterStatus lr_status,
    SaveFPRegsMode save_fp,
    RememberedSetAction remembered_set_action,
465 466
    SmiCheck smi_check,
    PointersToHereCheck pointers_to_here_check_for_value) {
467 468
  // First, check if a write barrier is even needed. The tests below
  // catch stores of Smis.
469 470
  Label done;

471 472 473 474 475 476 477
  // Skip barrier if writing a smi.
  if (smi_check == INLINE_SMI_CHECK) {
    JumpIfSmi(value, &done);
  }

  // Although the object register is tagged, the offset is relative to the start
  // of the object, so so offset must be a multiple of kPointerSize.
478
  DCHECK(IsAligned(offset, kPointerSize));
479

480 481 482 483 484 485 486 487
  add(dst, object, Operand(offset - kHeapObjectTag));
  if (emit_debug_code()) {
    Label ok;
    tst(dst, Operand((1 << kPointerSizeLog2) - 1));
    b(eq, &ok);
    stop("Unaligned cell in write barrier");
    bind(&ok);
  }
488

489 490 491 492 493 494
  RecordWrite(object,
              dst,
              value,
              lr_status,
              save_fp,
              remembered_set_action,
495 496
              OMIT_SMI_CHECK,
              pointers_to_here_check_for_value);
497 498

  bind(&done);
499

500
  // Clobber clobbered input registers when running with the debug-code flag
501
  // turned on to provoke errors.
502
  if (emit_debug_code()) {
503 504
    mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
    mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
505
  }
506 507 508
}


509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
// Will clobber 4 registers: object, map, dst, ip.  The
// register 'object' contains a heap object pointer.
void MacroAssembler::RecordWriteForMap(Register object,
                                       Register map,
                                       Register dst,
                                       LinkRegisterStatus lr_status,
                                       SaveFPRegsMode fp_mode) {
  if (emit_debug_code()) {
    ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset));
    cmp(dst, Operand(isolate()->factory()->meta_map()));
    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
  }

  if (!FLAG_incremental_marking) {
    return;
  }

  if (emit_debug_code()) {
    ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
    cmp(ip, map);
    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
  }

  Label done;

  // A single check of the map's pages interesting flag suffices, since it is
  // only set during incremental collection, and then it's also guaranteed that
  // the from object's page's interesting flag is also set.  This optimization
  // relies on the fact that maps can never be in new space.
  CheckPageFlag(map,
                map,  // Used as scratch.
                MemoryChunk::kPointersToHereAreInterestingMask,
                eq,
                &done);

  add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
  if (emit_debug_code()) {
    Label ok;
    tst(dst, Operand((1 << kPointerSizeLog2) - 1));
    b(eq, &ok);
    stop("Unaligned cell in write barrier");
    bind(&ok);
  }

  // Record the actual write.
  if (lr_status == kLRHasNotBeenSaved) {
    push(lr);
  }
  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
                       fp_mode);
  CallStub(&stub);
  if (lr_status == kLRHasNotBeenSaved) {
    pop(lr);
  }

  bind(&done);

566 567 568 569
  // Count number of write barriers in generated code.
  isolate()->counters()->write_barriers_static()->Increment();
  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);

570 571 572
  // Clobber clobbered registers when running with the debug-code flag
  // turned on to provoke errors.
  if (emit_debug_code()) {
573 574
    mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
    mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
575 576 577 578
  }
}


579 580 581
// Will clobber 4 registers: object, address, scratch, ip.  The
// register 'object' contains a heap object pointer.  The heap object
// tag is shifted away.
582 583 584 585 586 587 588 589 590
void MacroAssembler::RecordWrite(
    Register object,
    Register address,
    Register value,
    LinkRegisterStatus lr_status,
    SaveFPRegsMode fp_mode,
    RememberedSetAction remembered_set_action,
    SmiCheck smi_check,
    PointersToHereCheck pointers_to_here_check_for_value) {
591
  DCHECK(!object.is(value));
592
  if (emit_debug_code()) {
593 594
    ldr(ip, MemOperand(address));
    cmp(ip, value);
595
    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
596 597
  }

598 599 600 601 602
  if (remembered_set_action == OMIT_REMEMBERED_SET &&
      !FLAG_incremental_marking) {
    return;
  }

603 604
  // First, check if a write barrier is even needed. The tests below
  // catch stores of smis and stores into the young generation.
605 606
  Label done;

607
  if (smi_check == INLINE_SMI_CHECK) {
608
    JumpIfSmi(value, &done);
609 610
  }

611 612 613 614 615 616 617
  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
    CheckPageFlag(value,
                  value,  // Used as scratch.
                  MemoryChunk::kPointersToHereAreInterestingMask,
                  eq,
                  &done);
  }
618 619 620 621 622
  CheckPageFlag(object,
                value,  // Used as scratch.
                MemoryChunk::kPointersFromHereAreInterestingMask,
                eq,
                &done);
623 624

  // Record the actual write.
625 626 627
  if (lr_status == kLRHasNotBeenSaved) {
    push(lr);
  }
628 629
  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
                       fp_mode);
630 631 632 633
  CallStub(&stub);
  if (lr_status == kLRHasNotBeenSaved) {
    pop(lr);
  }
634 635 636

  bind(&done);

637 638 639 640 641
  // Count number of write barriers in generated code.
  isolate()->counters()->write_barriers_static()->Increment();
  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
                   value);

642
  // Clobber clobbered registers when running with the debug-code flag
643
  // turned on to provoke errors.
644
  if (emit_debug_code()) {
645 646
    mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
    mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
647 648 649 650
  }
}


651 652
void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
                                         Register address,
653 654 655 656
                                         Register scratch,
                                         SaveFPRegsMode fp_mode,
                                         RememberedSetFinalAction and_then) {
  Label done;
657
  if (emit_debug_code()) {
658 659 660 661 662
    Label ok;
    JumpIfNotInNewSpace(object, scratch, &ok);
    stop("Remembered set pointer is in new space");
    bind(&ok);
  }
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
  // Load store buffer top.
  ExternalReference store_buffer =
      ExternalReference::store_buffer_top(isolate());
  mov(ip, Operand(store_buffer));
  ldr(scratch, MemOperand(ip));
  // Store pointer to buffer and increment buffer top.
  str(address, MemOperand(scratch, kPointerSize, PostIndex));
  // Write back new top of buffer.
  str(scratch, MemOperand(ip));
  // Call stub on end of buffer.
  // Check for end of buffer.
  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
  if (and_then == kFallThroughAtEnd) {
    b(eq, &done);
  } else {
678
    DCHECK(and_then == kReturnAtEnd);
679
    Ret(eq);
680 681
  }
  push(lr);
682
  StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
683 684 685 686 687
  CallStub(&store_buffer_overflow);
  pop(lr);
  bind(&done);
  if (and_then == kReturnAtEnd) {
    Ret();
688 689 690 691
  }
}


692
void MacroAssembler::PushFixedFrame(Register marker_reg) {
693
  DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
694 695 696 697 698 699 700 701 702
  stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
                cp.bit() |
                (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
                fp.bit() |
                lr.bit());
}


void MacroAssembler::PopFixedFrame(Register marker_reg) {
703
  DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
704 705 706 707 708 709 710 711
  ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
                cp.bit() |
                (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
                fp.bit() |
                lr.bit());
}


712 713 714
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
  // Safepoints expect a block of contiguous register values starting with r0:
715
  DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
716 717 718
  // Safepoints expect a block of kNumSafepointRegisters values on the
  // stack, so adjust the stack for unsaved registers.
  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
719
  DCHECK(num_unsaved >= 0);
720 721 722 723 724 725 726 727 728 729 730 731
  sub(sp, sp, Operand(num_unsaved * kPointerSize));
  stm(db_w, sp, kSafepointSavedRegisters);
}


void MacroAssembler::PopSafepointRegisters() {
  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
  ldm(ia_w, sp, kSafepointSavedRegisters);
  add(sp, sp, Operand(num_unsaved * kPointerSize));
}


732 733
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
  str(src, SafepointRegisterSlot(dst));
734 735 736
}


737 738
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
  ldr(dst, SafepointRegisterSlot(src));
739 740 741
}


742 743 744
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
  // The registers are pushed starting with the highest encoding,
  // which means that lowest encodings are closest to the stack pointer.
745
  DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
746 747 748 749
  return reg_code;
}


750
MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
751
  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
752 753 754
}


755
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
756
  // Number of d-regs not known at snapshot time.
757
  DCHECK(!serializer_enabled());
758
  // General purpose registers are pushed last on the stack.
759
  int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
760 761 762 763 764
  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
  return MemOperand(sp, doubles_size + register_offset);
}


765 766
void MacroAssembler::Ldrd(Register dst1, Register dst2,
                          const MemOperand& src, Condition cond) {
767 768
  DCHECK(src.rm().is(no_reg));
  DCHECK(!dst1.is(lr));  // r14.
769

770 771
  // V8 does not use this addressing mode, so the fallback code
  // below doesn't support it yet.
772
  DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
773

774
  // Generate two ldr instructions if ldrd is not available.
775 776
  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
      (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
777
    CpuFeatureScope scope(this, ARMv7);
778 779
    ldrd(dst1, dst2, src, cond);
  } else {
780 781 782 783 784 785 786 787 788 789 790
    if ((src.am() == Offset) || (src.am() == NegOffset)) {
      MemOperand src2(src);
      src2.set_offset(src2.offset() + 4);
      if (dst1.is(src.rn())) {
        ldr(dst2, src2, cond);
        ldr(dst1, src, cond);
      } else {
        ldr(dst1, src, cond);
        ldr(dst2, src2, cond);
      }
    } else {  // PostIndex or NegPostIndex.
791
      DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
792 793 794 795 796 797 798 799 800
      if (dst1.is(src.rn())) {
        ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
        ldr(dst1, src, cond);
      } else {
        MemOperand src2(src);
        src2.set_offset(src2.offset() - 4);
        ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
        ldr(dst2, src2, cond);
      }
801 802 803 804 805 806 807
    }
  }
}


void MacroAssembler::Strd(Register src1, Register src2,
                          const MemOperand& dst, Condition cond) {
808 809
  DCHECK(dst.rm().is(no_reg));
  DCHECK(!src1.is(lr));  // r14.
810

811 812
  // V8 does not use this addressing mode, so the fallback code
  // below doesn't support it yet.
813
  DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
814

815
  // Generate two str instructions if strd is not available.
816 817
  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
      (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
818
    CpuFeatureScope scope(this, ARMv7);
819 820 821
    strd(src1, src2, dst, cond);
  } else {
    MemOperand dst2(dst);
822 823 824 825 826
    if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
      dst2.set_offset(dst2.offset() + 4);
      str(src1, dst, cond);
      str(src2, dst2, cond);
    } else {  // PostIndex or NegPostIndex.
827
      DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
828 829 830 831
      dst2.set_offset(dst2.offset() - 4);
      str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
      str(src2, dst2, cond);
    }
832 833 834 835
  }
}


836 837 838 839
void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
  // If needed, restore wanted bits of FPSCR.
  Label fpscr_done;
  vmrs(scratch);
840 841 842 843 844 845 846 847
  if (emit_debug_code()) {
    Label rounding_mode_correct;
    tst(scratch, Operand(kVFPRoundingModeMask));
    b(eq, &rounding_mode_correct);
    // Don't call Assert here, since Runtime_Abort could re-enter here.
    stop("Default rounding mode not set");
    bind(&rounding_mode_correct);
  }
848 849 850 851 852 853 854
  tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
  b(ne, &fpscr_done);
  orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
  vmsr(scratch);
  bind(&fpscr_done);
}

855 856 857

void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
                                        const DwVfpRegister src,
858
                                        const Condition cond) {
859
  vsub(dst, src, kDoubleRegZero, cond);
860 861 862
}


863 864 865 866
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
                                           const DwVfpRegister src2,
                                           const Condition cond) {
  // Compare and move FPSCR flags to the normal condition flags.
867
  VFPCompareAndLoadFlags(src1, src2, pc, cond);
868 869 870 871 872 873
}

void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
                                           const double src2,
                                           const Condition cond) {
  // Compare and move FPSCR flags to the normal condition flags.
874
  VFPCompareAndLoadFlags(src1, src2, pc, cond);
875 876 877 878 879 880 881 882 883
}


void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
                                            const DwVfpRegister src2,
                                            const Register fpscr_flags,
                                            const Condition cond) {
  // Compare and load FPSCR.
  vcmp(src1, src2, cond);
884
  vmrs(fpscr_flags, cond);
885 886 887 888 889 890 891 892
}

void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
                                            const double src2,
                                            const Register fpscr_flags,
                                            const Condition cond) {
  // Compare and load FPSCR.
  vcmp(src1, src2, cond);
893
  vmrs(fpscr_flags, cond);
894 895
}

896 897
void MacroAssembler::Vmov(const DwVfpRegister dst,
                          const double imm,
898
                          const Register scratch) {
899 900
  static const DoubleRepresentation minus_zero(-0.0);
  static const DoubleRepresentation zero(0.0);
901
  DoubleRepresentation value_rep(imm);
902
  // Handle special values first.
903
  if (value_rep == zero) {
904
    vmov(dst, kDoubleRegZero);
905
  } else if (value_rep == minus_zero) {
906
    vneg(dst, kDoubleRegZero);
907
  } else {
908
    vmov(dst, imm, scratch);
909 910 911
  }
}

912

913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
  if (src.code() < 16) {
    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
    vmov(dst, loc.high());
  } else {
    vmov(dst, VmovIndexHi, src);
  }
}


void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
  if (dst.code() < 16) {
    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
    vmov(loc.high(), src);
  } else {
    vmov(dst, VmovIndexHi, src);
  }
}


void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
  if (src.code() < 16) {
    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
    vmov(dst, loc.low());
  } else {
    vmov(dst, VmovIndexLo, src);
  }
}


void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
  if (dst.code() < 16) {
    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
    vmov(loc.low(), src);
  } else {
    vmov(dst, VmovIndexLo, src);
  }
}


953 954 955 956
void MacroAssembler::LoadConstantPoolPointerRegister() {
  if (FLAG_enable_ool_constant_pool) {
    int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
        pc_offset() - Instruction::kPCReadOffset;
957
    DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
958 959 960 961 962
    ldr(pp, MemOperand(pc, constant_pool_offset));
  }
}


963 964 965 966 967 968 969
void MacroAssembler::StubPrologue() {
  PushFixedFrame();
  Push(Smi::FromInt(StackFrame::STUB));
  // Adjust FP to point to saved FP.
  add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
  if (FLAG_enable_ool_constant_pool) {
    LoadConstantPoolPointerRegister();
970
    set_ool_constant_pool_available(true);
971 972 973 974 975
  }
}


void MacroAssembler::Prologue(bool code_pre_aging) {
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
  { PredictableCodeSizeScope predictible_code_size_scope(
        this, kNoCodeAgeSequenceLength);
    // The following three instructions must remain together and unmodified
    // for code aging to work properly.
    if (code_pre_aging) {
      // Pre-age the code.
      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
      add(r0, pc, Operand(-8));
      ldr(pc, MemOperand(pc, -4));
      emit_code_stub_address(stub);
    } else {
      PushFixedFrame(r1);
      nop(ip.code());
      // Adjust FP to point to saved FP.
      add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    }
992
  }
993
  if (FLAG_enable_ool_constant_pool) {
994
    LoadConstantPoolPointerRegister();
995
    set_ool_constant_pool_available(true);
996 997 998 999
  }
}


1000
void MacroAssembler::EnterFrame(StackFrame::Type type,
1001
                                bool load_constant_pool_pointer_reg) {
1002
  // r0-r3: preserved
1003
  PushFixedFrame();
1004
  if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
1005 1006
    LoadConstantPoolPointerRegister();
  }
1007 1008
  mov(ip, Operand(Smi::FromInt(type)));
  push(ip);
1009 1010
  mov(ip, Operand(CodeObject()));
  push(ip);
1011 1012 1013
  // Adjust FP to point to saved FP.
  add(fp, sp,
      Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
1014 1015 1016
}


1017
int MacroAssembler::LeaveFrame(StackFrame::Type type) {
1018 1019 1020
  // r0: preserved
  // r1: preserved
  // r2: preserved
1021

1022
  // Drop the execution stack down to the frame pointer and restore
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
  // the caller frame pointer, return address and constant pool pointer
  // (if FLAG_enable_ool_constant_pool).
  int frame_ends;
  if (FLAG_enable_ool_constant_pool) {
    add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
    frame_ends = pc_offset();
    ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
  } else {
    mov(sp, fp);
    frame_ends = pc_offset();
    ldm(ia_w, sp, fp.bit() | lr.bit());
  }
  return frame_ends;
1036 1037 1038
}


1039
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
1040
  // Set up the frame structure on the stack.
1041 1042 1043
  DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
  DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1044
  Push(lr, fp);
1045
  mov(fp, Operand(sp));  // Set up new frame pointer.
1046
  // Reserve room for saved entry sp and code object.
1047
  sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
1048
  if (emit_debug_code()) {
1049
    mov(ip, Operand::Zero());
1050 1051
    str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
  }
1052 1053 1054
  if (FLAG_enable_ool_constant_pool) {
    str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
  }
serya@chromium.org's avatar
serya@chromium.org committed
1055
  mov(ip, Operand(CodeObject()));
1056
  str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1057 1058

  // Save the frame pointer and the context in top.
1059
  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1060
  str(fp, MemOperand(ip));
1061
  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1062 1063
  str(cp, MemOperand(ip));

1064 1065
  // Optionally save all double registers.
  if (save_doubles) {
1066
    SaveFPRegs(sp, ip);
1067
    // Note that d0 will be accessible at
1068 1069 1070 1071
    //   fp - ExitFrameConstants::kFrameSize -
    //   DwVfpRegister::kMaxNumRegisters * kDoubleSize,
    // since the sp slot, code slot and constant pool slot (if
    // FLAG_enable_ool_constant_pool) were pushed after the fp.
1072
  }
1073

1074 1075
  // Reserve place for the return address and stack space and align the frame
  // preparing for calling the runtime function.
1076
  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1077
  sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1078
  if (frame_alignment > 0) {
1079
    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1080 1081 1082 1083 1084 1085 1086
    and_(sp, sp, Operand(-frame_alignment));
  }

  // Set the exit frame sp value to point just before the return address
  // location.
  add(ip, sp, Operand(kPointerSize));
  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1087 1088 1089
}


1090 1091 1092 1093 1094
void MacroAssembler::InitializeNewString(Register string,
                                         Register length,
                                         Heap::RootListIndex map_index,
                                         Register scratch1,
                                         Register scratch2) {
1095
  SmiTag(scratch1, length);
1096 1097 1098 1099 1100 1101 1102 1103
  LoadRoot(scratch2, map_index);
  str(scratch1, FieldMemOperand(string, String::kLengthOffset));
  mov(scratch1, Operand(String::kEmptyHashField));
  str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
  str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
}


1104
int MacroAssembler::ActivationFrameAlignment() {
1105
#if V8_HOST_ARCH_ARM
1106 1107 1108 1109
  // Running on the real platform. Use the alignment as mandated by the local
  // environment.
  // Note: This will break if we ever start generating snapshots on one ARM
  // platform for another ARM platform with a different alignment.
1110
  return base::OS::ActivationFrameAlignment();
1111
#else  // V8_HOST_ARCH_ARM
1112 1113
  // If we are using the simulator then we should always align to the expected
  // alignment. As the simulator is used to generate snapshots we do not know
1114 1115 1116
  // if the target platform will need alignment, so this is controlled from a
  // flag.
  return FLAG_sim_stack_alignment;
1117
#endif  // V8_HOST_ARCH_ARM
1118 1119 1120
}


1121
void MacroAssembler::LeaveExitFrame(bool save_doubles,
1122 1123
                                    Register argument_count,
                                    bool restore_context) {
1124 1125
  ConstantPoolUnavailableScope constant_pool_unavailable(this);

1126 1127
  // Optionally restore all double registers.
  if (save_doubles) {
1128
    // Calculate the stack location of the saved doubles and restore them.
1129
    const int offset = ExitFrameConstants::kFrameSize;
1130 1131
    sub(r3, fp,
        Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1132
    RestoreFPRegs(r3, ip);
1133 1134
  }

1135
  // Clear top frame.
1136
  mov(r3, Operand::Zero());
1137
  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1138 1139 1140
  str(r3, MemOperand(ip));

  // Restore current context from top and clear it in debug mode.
1141 1142 1143 1144
  if (restore_context) {
    mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
    ldr(cp, MemOperand(ip));
  }
1145
#ifdef DEBUG
1146
  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1147 1148
  str(r3, MemOperand(ip));
#endif
1149

1150
  // Tear down the exit frame, pop the arguments, and return.
1151 1152 1153
  if (FLAG_enable_ool_constant_pool) {
    ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
  }
1154 1155
  mov(sp, Operand(fp));
  ldm(ia_w, sp, fp.bit() | lr.bit());
1156 1157 1158
  if (argument_count.is_valid()) {
    add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
  }
1159 1160
}

1161

1162
void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1163
  if (use_eabi_hardfloat()) {
1164 1165 1166 1167
    Move(dst, d0);
  } else {
    vmov(dst, r0, r1);
  }
1168 1169
}

1170

1171 1172 1173 1174 1175 1176
// On ARM this is just a synonym to make the purpose clear.
void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
  MovFromFloatResult(dst);
}


1177 1178 1179 1180 1181
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                    const ParameterCount& actual,
                                    Handle<Code> code_constant,
                                    Register code_reg,
                                    Label* done,
1182
                                    bool* definitely_mismatches,
1183
                                    InvokeFlag flag,
1184
                                    const CallWrapper& call_wrapper) {
1185
  bool definitely_matches = false;
1186
  *definitely_mismatches = false;
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
  Label regular_invoke;

  // Check whether the expected and actual arguments count match. If not,
  // setup registers according to contract with ArgumentsAdaptorTrampoline:
  //  r0: actual arguments count
  //  r1: function (passed through to callee)
  //  r2: expected arguments count

  // The code below is made a lot easier because the calling code already sets
  // up actual and expected registers according to the contract if values are
  // passed in registers.
1198 1199 1200
  DCHECK(actual.is_immediate() || actual.reg().is(r0));
  DCHECK(expected.is_immediate() || expected.reg().is(r2));
  DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1201 1202

  if (expected.is_immediate()) {
1203
    DCHECK(actual.is_immediate());
1204 1205 1206 1207
    if (expected.immediate() == actual.immediate()) {
      definitely_matches = true;
    } else {
      mov(r0, Operand(actual.immediate()));
1208 1209 1210 1211 1212 1213 1214 1215
      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
      if (expected.immediate() == sentinel) {
        // Don't worry about adapting arguments for builtins that
        // don't want that done. Skip adaption code by making it look
        // like we have a match between expected and actual number of
        // arguments.
        definitely_matches = true;
      } else {
1216
        *definitely_mismatches = true;
1217 1218
        mov(r2, Operand(expected.immediate()));
      }
1219
    }
1220
  } else {
1221 1222 1223 1224 1225 1226 1227
    if (actual.is_immediate()) {
      cmp(expected.reg(), Operand(actual.immediate()));
      b(eq, &regular_invoke);
      mov(r0, Operand(actual.immediate()));
    } else {
      cmp(expected.reg(), Operand(actual.reg()));
      b(eq, &regular_invoke);
1228 1229
    }
  }
1230 1231 1232 1233 1234 1235 1236 1237

  if (!definitely_matches) {
    if (!code_constant.is_null()) {
      mov(r3, Operand(code_constant));
      add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
    }

    Handle<Code> adaptor =
1238
        isolate()->builtins()->ArgumentsAdaptorTrampoline();
1239
    if (flag == CALL_FUNCTION) {
1240 1241
      call_wrapper.BeforeCall(CallSize(adaptor));
      Call(adaptor);
1242
      call_wrapper.AfterCall();
1243 1244 1245
      if (!*definitely_mismatches) {
        b(done);
      }
1246
    } else {
1247
      Jump(adaptor, RelocInfo::CODE_TARGET);
1248 1249 1250
    }
    bind(&regular_invoke);
  }
1251 1252 1253 1254 1255 1256
}


void MacroAssembler::InvokeCode(Register code,
                                const ParameterCount& expected,
                                const ParameterCount& actual,
1257
                                InvokeFlag flag,
1258
                                const CallWrapper& call_wrapper) {
1259
  // You can't call a function without a valid frame.
1260
  DCHECK(flag == JUMP_FUNCTION || has_frame());
1261

1262
  Label done;
1263 1264 1265
  bool definitely_mismatches = false;
  InvokePrologue(expected, actual, Handle<Code>::null(), code,
                 &done, &definitely_mismatches, flag,
1266
                 call_wrapper);
1267 1268 1269 1270 1271 1272
  if (!definitely_mismatches) {
    if (flag == CALL_FUNCTION) {
      call_wrapper.BeforeCall(CallSize(code));
      Call(code);
      call_wrapper.AfterCall();
    } else {
1273
      DCHECK(flag == JUMP_FUNCTION);
1274 1275
      Jump(code);
    }
1276

1277 1278 1279 1280
    // Continue here if InvokePrologue does handle the invocation due to
    // mismatched parameter counts.
    bind(&done);
  }
1281 1282 1283 1284 1285
}


void MacroAssembler::InvokeFunction(Register fun,
                                    const ParameterCount& actual,
1286
                                    InvokeFlag flag,
1287
                                    const CallWrapper& call_wrapper) {
1288
  // You can't call a function without a valid frame.
1289
  DCHECK(flag == JUMP_FUNCTION || has_frame());
1290

1291
  // Contract with called JS functions requires that function is passed in r1.
1292
  DCHECK(fun.is(r1));
1293 1294

  Register expected_reg = r2;
1295
  Register code_reg = r3;
1296 1297 1298 1299 1300 1301

  ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
  ldr(expected_reg,
      FieldMemOperand(code_reg,
                      SharedFunctionInfo::kFormalParameterCountOffset));
1302
  SmiUntag(expected_reg);
1303
  ldr(code_reg,
1304
      FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1305 1306

  ParameterCount expected(expected_reg);
1307
  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1308 1309 1310
}


1311
void MacroAssembler::InvokeFunction(Register function,
1312
                                    const ParameterCount& expected,
1313
                                    const ParameterCount& actual,
1314
                                    InvokeFlag flag,
1315
                                    const CallWrapper& call_wrapper) {
1316
  // You can't call a function without a valid frame.
1317
  DCHECK(flag == JUMP_FUNCTION || has_frame());
1318

1319
  // Contract with called JS functions requires that function is passed in r1.
1320
  DCHECK(function.is(r1));
1321

1322 1323 1324
  // Get the function and setup the context.
  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));

1325 1326 1327 1328
  // We call indirectly through the code field in the function to
  // allow recompilation to take effect without changing any of the
  // call sites.
  ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1329
  InvokeCode(r3, expected, actual, flag, call_wrapper);
1330 1331
}

serya@chromium.org's avatar
serya@chromium.org committed
1332

1333 1334 1335 1336
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                    const ParameterCount& expected,
                                    const ParameterCount& actual,
                                    InvokeFlag flag,
1337
                                    const CallWrapper& call_wrapper) {
1338
  Move(r1, function);
1339
  InvokeFunction(r1, expected, actual, flag, call_wrapper);
1340 1341 1342
}


1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
                                          Register map,
                                          Register scratch,
                                          Label* fail) {
  ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
  IsInstanceJSObjectType(map, scratch, fail);
}


void MacroAssembler::IsInstanceJSObjectType(Register map,
                                            Register scratch,
                                            Label* fail) {
  ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1356
  cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1357
  b(lt, fail);
1358
  cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1359 1360 1361 1362 1363
  b(gt, fail);
}


void MacroAssembler::IsObjectJSStringType(Register object,
1364 1365
                                          Register scratch,
                                          Label* fail) {
1366
  DCHECK(kNotStringTag != 0);
1367 1368 1369 1370

  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
  tst(scratch, Operand(kIsNotStringMask));
1371
  b(ne, fail);
1372 1373 1374
}


1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
void MacroAssembler::IsObjectNameType(Register object,
                                      Register scratch,
                                      Label* fail) {
  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
  cmp(scratch, Operand(LAST_NAME_TYPE));
  b(hi, fail);
}


serya@chromium.org's avatar
serya@chromium.org committed
1385
void MacroAssembler::DebugBreak() {
1386
  mov(r0, Operand::Zero());
1387
  mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1388
  CEntryStub ces(isolate(), 1);
1389
  DCHECK(AllowThisStubCall(&ces));
1390
  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
serya@chromium.org's avatar
serya@chromium.org committed
1391
}
1392

1393

1394
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1395
                                    int handler_index) {
1396
  // Adjust this code if not the case.
1397 1398
  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1399 1400 1401 1402 1403
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);

1404
  // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
1405 1406
  // We will build up the handler from the bottom by pushing on the stack.
  // Set up the code object (r5) and the state (r6) for pushing.
1407 1408 1409
  unsigned state =
      StackHandler::IndexField::encode(handler_index) |
      StackHandler::KindField::encode(kind);
1410 1411 1412 1413
  mov(r5, Operand(CodeObject()));
  mov(r6, Operand(state));

  // Push the frame pointer, context, state, and code object.
1414
  if (kind == StackHandler::JS_ENTRY) {
1415
    mov(cp, Operand(Smi::FromInt(0)));  // Indicates no context.
1416
    mov(ip, Operand::Zero());  // NULL frame pointer.
1417
    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
1418 1419
  } else {
    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1420
  }
1421 1422 1423 1424 1425 1426 1427

  // Link the current handler as the next handler.
  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
  ldr(r5, MemOperand(r6));
  push(r5);
  // Set this new handler as the current one.
  str(sp, MemOperand(r6));
1428 1429 1430
}


1431
void MacroAssembler::PopTryHandler() {
1432
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1433
  pop(r1);
1434
  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1435 1436 1437 1438 1439
  add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
  str(r1, MemOperand(ip));
}


1440 1441 1442 1443
void MacroAssembler::JumpToHandlerEntry() {
  // Compute the handler entry address and jump to it.  The handler table is
  // a fixed array of (smi-tagged) code offsets.
  // r0 = exception, r1 = code object, r2 = state.
1444 1445 1446 1447 1448

  ConstantPoolUnavailableScope constant_pool_unavailable(this);
  if (FLAG_enable_ool_constant_pool) {
    ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset));  // Constant pool.
  }
1449 1450 1451 1452 1453
  ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset));  // Handler table.
  add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
  mov(r2, Operand(r2, LSR, StackHandler::kKindWidth));  // Handler index.
  ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));  // Smi-tagged offset.
  add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
1454
  add(pc, r1, Operand::SmiUntag(r2));  // Jump
1455 1456 1457
}


1458
void MacroAssembler::Throw(Register value) {
1459 1460
  // Adjust this code if not the case.
  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1461 1462 1463 1464 1465 1466 1467
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);

  // The exception is expected in r0.
1468 1469 1470
  if (!value.is(r0)) {
    mov(r0, value);
  }
1471
  // Drop the stack pointer to the top of the top handler.
1472
  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1473
  ldr(sp, MemOperand(r3));
1474
  // Restore the next handler.
1475 1476
  pop(r2);
  str(r2, MemOperand(r3));
1477

1478 1479 1480
  // Get the code object (r1) and state (r2).  Restore the context and frame
  // pointer.
  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1481 1482

  // If the handler is a JS frame, restore the context to the frame.
1483 1484 1485
  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
  // or cp.
  tst(cp, cp);
1486 1487
  str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);

1488
  JumpToHandlerEntry();
1489 1490 1491
}


1492
void MacroAssembler::ThrowUncatchable(Register value) {
1493
  // Adjust this code if not the case.
1494 1495
  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1496 1497 1498 1499
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1500

1501
  // The exception is expected in r0.
1502
  if (!value.is(r0)) {
1503
    mov(r0, value);
1504
  }
1505 1506 1507 1508
  // Drop the stack pointer to the top of the top stack handler.
  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
  ldr(sp, MemOperand(r3));

1509
  // Unwind the handlers until the ENTRY handler is found.
1510 1511 1512 1513 1514 1515
  Label fetch_next, check_kind;
  jmp(&check_kind);
  bind(&fetch_next);
  ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));

  bind(&check_kind);
1516
  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1517
  ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1518
  tst(r2, Operand(StackHandler::KindField::kMask));
1519 1520 1521 1522 1523
  b(ne, &fetch_next);

  // Set the top handler address to next handler past the top ENTRY handler.
  pop(r2);
  str(r2, MemOperand(r3));
1524 1525 1526
  // Get the code object (r1) and state (r2).  Clear the context and frame
  // pointer (0 was saved in the handler).
  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1527

1528
  JumpToHandlerEntry();
1529 1530 1531
}


1532
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1533 1534
                                            Register scratch,
                                            Label* miss) {
1535 1536
  Label same_contexts;

1537 1538 1539
  DCHECK(!holder_reg.is(scratch));
  DCHECK(!holder_reg.is(ip));
  DCHECK(!scratch.is(ip));
1540

1541 1542 1543
  // Load current lexical context from the stack frame.
  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
  // In debug mode, make sure the lexical context is set.
1544
#ifdef DEBUG
1545
  cmp(scratch, Operand::Zero());
1546
  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1547
#endif
1548

1549
  // Load the native context of the current context.
1550 1551
  int offset =
      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1552
  ldr(scratch, FieldMemOperand(scratch, offset));
1553
  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1554

1555
  // Check the context is a native context.
1556
  if (emit_debug_code()) {
1557 1558 1559
    // Cannot use ip as a temporary in this verification code. Due to the fact
    // that ip is clobbered as part of cmp with an object Operand.
    push(holder_reg);  // Temporarily save holder on the stack.
1560
    // Read the first word and compare to the native_context_map.
1561
    ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1562
    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1563
    cmp(holder_reg, ip);
1564
    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1565
    pop(holder_reg);  // Restore holder.
1566 1567 1568
  }

  // Check if both contexts are the same.
1569
  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1570 1571 1572
  cmp(scratch, Operand(ip));
  b(eq, &same_contexts);

1573
  // Check the context is a native context.
1574
  if (emit_debug_code()) {
1575 1576 1577 1578
    // Cannot use ip as a temporary in this verification code. Due to the fact
    // that ip is clobbered as part of cmp with an object Operand.
    push(holder_reg);  // Temporarily save holder on the stack.
    mov(holder_reg, ip);  // Move ip to its holding place.
1579 1580
    LoadRoot(ip, Heap::kNullValueRootIndex);
    cmp(holder_reg, ip);
1581
    Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1582

1583
    ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1584
    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1585
    cmp(holder_reg, ip);
1586
    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1587 1588
    // Restore ip is not needed. ip is reloaded below.
    pop(holder_reg);  // Restore holder.
1589
    // Restore ip to holder's context.
1590
    ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1591 1592
  }

1593 1594 1595
  // Check that the security token in the calling global object is
  // compatible with the security token in the receiving global
  // object.
1596 1597 1598 1599 1600
  int token_offset = Context::kHeaderSize +
                     Context::SECURITY_TOKEN_INDEX * kPointerSize;

  ldr(scratch, FieldMemOperand(scratch, token_offset));
  ldr(ip, FieldMemOperand(ip, token_offset));
1601 1602
  cmp(scratch, Operand(ip));
  b(ne, miss);
1603 1604

  bind(&same_contexts);
1605 1606 1607
}


1608
// Compute the hash code from the untagged key.  This must be kept in sync with
1609
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1610
// code-stub-hydrogen.cc
1611 1612
void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
  // First of all we assign the hash seed to scratch.
1613
  LoadRoot(scratch, Heap::kHashSeedRootIndex);
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
  SmiUntag(scratch);

  // Xor original key with a seed.
  eor(t0, t0, Operand(scratch));

  // Compute the hash code from the untagged key.  This must be kept in sync
  // with ComputeIntegerHash in utils.h.
  //
  // hash = ~hash + (hash << 15);
  mvn(scratch, Operand(t0));
  add(t0, scratch, Operand(t0, LSL, 15));
  // hash = hash ^ (hash >> 12);
  eor(t0, t0, Operand(t0, LSR, 12));
  // hash = hash + (hash << 2);
  add(t0, t0, Operand(t0, LSL, 2));
  // hash = hash ^ (hash >> 4);
  eor(t0, t0, Operand(t0, LSR, 4));
  // hash = hash * 2057;
  mov(scratch, Operand(t0, LSL, 11));
  add(t0, t0, Operand(t0, LSL, 3));
  add(t0, t0, scratch);
  // hash = hash ^ (hash >> 16);
  eor(t0, t0, Operand(t0, LSR, 16));
}


1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
                                              Register elements,
                                              Register key,
                                              Register result,
                                              Register t0,
                                              Register t1,
                                              Register t2) {
  // Register use:
  //
  // elements - holds the slow-case elements of the receiver on entry.
  //            Unchanged unless 'result' is the same register.
  //
  // key      - holds the smi key on entry.
  //            Unchanged unless 'result' is the same register.
  //
  // result   - holds the result on exit if the load succeeded.
  //            Allowed to be the same as 'key' or 'result'.
  //            Unchanged on bailout so 'key' or 'result' can be used
  //            in further computation.
  //
  // Scratch registers:
  //
  // t0 - holds the untagged key on entry and holds the hash once computed.
  //
  // t1 - used to hold the capacity mask of the dictionary
  //
  // t2 - used for the index into the dictionary.
  Label done;

1669
  GetNumberHash(t0, t1);
1670 1671

  // Compute the capacity mask.
1672
  ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1673
  SmiUntag(t1);
1674 1675 1676
  sub(t1, t1, Operand(1));

  // Generate an unrolled loop that performs a few probes before giving up.
1677
  for (int i = 0; i < kNumberDictionaryProbes; i++) {
1678 1679 1680 1681
    // Use t2 for index calculations and keep the hash intact in t0.
    mov(t2, t0);
    // Compute the masked index: (hash + i + i * i) & mask.
    if (i > 0) {
1682
      add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1683 1684 1685 1686
    }
    and_(t2, t2, Operand(t1));

    // Scale the index by multiplying by the element size.
1687
    DCHECK(SeededNumberDictionary::kEntrySize == 3);
1688 1689 1690 1691
    add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3

    // Check if the key is identical to the name.
    add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1692
    ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1693
    cmp(key, Operand(ip));
1694
    if (i != kNumberDictionaryProbes - 1) {
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
      b(eq, &done);
    } else {
      b(ne, miss);
    }
  }

  bind(&done);
  // Check that the value is a normal property.
  // t2: elements + (index * kPointerSize)
  const int kDetailsOffset =
1705
      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1706
  ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1707
  tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1708 1709 1710 1711
  b(ne, miss);

  // Get the value at the masked, scaled index and return.
  const int kValueOffset =
1712
      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1713 1714 1715 1716
  ldr(result, FieldMemOperand(t2, kValueOffset));
}


1717 1718 1719 1720 1721 1722
void MacroAssembler::Allocate(int object_size,
                              Register result,
                              Register scratch1,
                              Register scratch2,
                              Label* gc_required,
                              AllocationFlags flags) {
1723
  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1724
  if (!FLAG_inline_new) {
1725
    if (emit_debug_code()) {
1726 1727 1728 1729 1730 1731 1732 1733 1734
      // Trash the registers to simulate an allocation failure.
      mov(result, Operand(0x7091));
      mov(scratch1, Operand(0x7191));
      mov(scratch2, Operand(0x7291));
    }
    jmp(gc_required);
    return;
  }

1735 1736 1737 1738 1739
  DCHECK(!result.is(scratch1));
  DCHECK(!result.is(scratch2));
  DCHECK(!scratch1.is(scratch2));
  DCHECK(!scratch1.is(ip));
  DCHECK(!scratch2.is(ip));
1740

1741 1742 1743 1744
  // Make object size into bytes.
  if ((flags & SIZE_IN_WORDS) != 0) {
    object_size *= kPointerSize;
  }
1745
  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1746

1747 1748 1749 1750
  // Check relative positions of allocation top and limit addresses.
  // The values must be adjacent in memory to allow the use of LDM.
  // Also, assert that the registers are numbered such that the values
  // are loaded in the correct order.
1751 1752 1753 1754 1755
  ExternalReference allocation_top =
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
  ExternalReference allocation_limit =
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);

1756
  intptr_t top   =
1757
      reinterpret_cast<intptr_t>(allocation_top.address());
1758
  intptr_t limit =
1759
      reinterpret_cast<intptr_t>(allocation_limit.address());
1760 1761
  DCHECK((limit - top) == kPointerSize);
  DCHECK(result.code() < ip.code());
1762

1763
  // Set up allocation top address register.
1764
  Register topaddr = scratch1;
1765
  mov(topaddr, Operand(allocation_top));
1766 1767 1768

  // This code stores a temporary value in ip. This is OK, as the code below
  // does not need ip for implicit literal generation.
1769
  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1770 1771 1772
    // Load allocation top into result and allocation limit into ip.
    ldm(ia, topaddr, result.bit() | ip.bit());
  } else {
1773
    if (emit_debug_code()) {
1774 1775 1776 1777 1778
      // Assert that result actually contains top on entry. ip is used
      // immediately below so this use of ip does not cause difference with
      // respect to register content between debug and release mode.
      ldr(ip, MemOperand(topaddr));
      cmp(result, ip);
1779
      Check(eq, kUnexpectedAllocationTop);
1780 1781 1782
    }
    // Load allocation limit into ip. Result already contains allocation top.
    ldr(ip, MemOperand(topaddr, limit - top));
1783
  }
1784

1785 1786
  if ((flags & DOUBLE_ALIGNMENT) != 0) {
    // Align the next allocation. Storing the filler map without checking top is
1787
    // safe in new-space because the limit of the heap is aligned there.
1788
    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1789
    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1790 1791 1792
    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
    Label aligned;
    b(eq, &aligned);
1793 1794 1795 1796
    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
      cmp(result, Operand(ip));
      b(hs, gc_required);
    }
1797 1798 1799 1800 1801
    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
    bind(&aligned);
  }

1802
  // Calculate new top and bail out if new space is exhausted. Use result
1803 1804
  // to calculate the new top. We must preserve the ip register at this
  // point, so we cannot just use add().
1805
  DCHECK(object_size > 0);
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
  Register source = result;
  Condition cond = al;
  int shift = 0;
  while (object_size != 0) {
    if (((object_size >> shift) & 0x03) == 0) {
      shift += 2;
    } else {
      int bits = object_size & (0xff << shift);
      object_size -= bits;
      shift += 8;
      Operand bits_operand(bits);
1817
      DCHECK(bits_operand.instructions_required(this) == 1);
1818 1819 1820 1821
      add(scratch2, source, bits_operand, SetCC, cond);
      source = scratch2;
      cond = cc;
    }
1822
  }
1823
  b(cs, gc_required);
1824
  cmp(scratch2, Operand(ip));
1825
  b(hi, gc_required);
1826
  str(scratch2, MemOperand(topaddr));
1827

1828
  // Tag object if requested.
1829
  if ((flags & TAG_OBJECT) != 0) {
1830
    add(result, result, Operand(kHeapObjectTag));
1831 1832 1833 1834
  }
}


1835 1836 1837 1838 1839 1840
void MacroAssembler::Allocate(Register object_size,
                              Register result,
                              Register scratch1,
                              Register scratch2,
                              Label* gc_required,
                              AllocationFlags flags) {
1841
  if (!FLAG_inline_new) {
1842
    if (emit_debug_code()) {
1843 1844 1845 1846 1847 1848 1849 1850 1851
      // Trash the registers to simulate an allocation failure.
      mov(result, Operand(0x7091));
      mov(scratch1, Operand(0x7191));
      mov(scratch2, Operand(0x7291));
    }
    jmp(gc_required);
    return;
  }

1852 1853
  // Assert that the register arguments are different and that none of
  // them are ip. ip is used explicitly in the code generated below.
1854 1855 1856 1857 1858 1859 1860
  DCHECK(!result.is(scratch1));
  DCHECK(!result.is(scratch2));
  DCHECK(!scratch1.is(scratch2));
  DCHECK(!object_size.is(ip));
  DCHECK(!result.is(ip));
  DCHECK(!scratch1.is(ip));
  DCHECK(!scratch2.is(ip));
1861

1862 1863 1864 1865
  // Check relative positions of allocation top and limit addresses.
  // The values must be adjacent in memory to allow the use of LDM.
  // Also, assert that the registers are numbered such that the values
  // are loaded in the correct order.
1866 1867 1868 1869
  ExternalReference allocation_top =
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
  ExternalReference allocation_limit =
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1870
  intptr_t top =
1871
      reinterpret_cast<intptr_t>(allocation_top.address());
1872
  intptr_t limit =
1873
      reinterpret_cast<intptr_t>(allocation_limit.address());
1874 1875
  DCHECK((limit - top) == kPointerSize);
  DCHECK(result.code() < ip.code());
1876 1877 1878

  // Set up allocation top address.
  Register topaddr = scratch1;
1879
  mov(topaddr, Operand(allocation_top));
1880 1881 1882

  // This code stores a temporary value in ip. This is OK, as the code below
  // does not need ip for implicit literal generation.
1883
  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1884 1885 1886
    // Load allocation top into result and allocation limit into ip.
    ldm(ia, topaddr, result.bit() | ip.bit());
  } else {
1887
    if (emit_debug_code()) {
1888 1889 1890 1891 1892
      // Assert that result actually contains top on entry. ip is used
      // immediately below so this use of ip does not cause difference with
      // respect to register content between debug and release mode.
      ldr(ip, MemOperand(topaddr));
      cmp(result, ip);
1893
      Check(eq, kUnexpectedAllocationTop);
1894 1895 1896
    }
    // Load allocation limit into ip. Result already contains allocation top.
    ldr(ip, MemOperand(topaddr, limit - top));
1897
  }
1898

1899 1900
  if ((flags & DOUBLE_ALIGNMENT) != 0) {
    // Align the next allocation. Storing the filler map without checking top is
1901
    // safe in new-space because the limit of the heap is aligned there.
1902 1903
    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1904 1905 1906
    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
    Label aligned;
    b(eq, &aligned);
1907 1908 1909 1910
    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
      cmp(result, Operand(ip));
      b(hs, gc_required);
    }
1911 1912 1913 1914 1915
    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
    bind(&aligned);
  }

1916
  // Calculate new top and bail out if new space is exhausted. Use result
1917 1918
  // to calculate the new top. Object size may be in words so a shift is
  // required to get the number of bytes.
1919
  if ((flags & SIZE_IN_WORDS) != 0) {
1920
    add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1921
  } else {
1922
    add(scratch2, result, Operand(object_size), SetCC);
1923
  }
1924
  b(cs, gc_required);
1925
  cmp(scratch2, Operand(ip));
1926 1927
  b(hi, gc_required);

1928
  // Update allocation top. result temporarily holds the new top.
1929
  if (emit_debug_code()) {
1930
    tst(scratch2, Operand(kObjectAlignmentMask));
1931
    Check(eq, kUnalignedAllocationInNewSpace);
1932
  }
1933
  str(scratch2, MemOperand(topaddr));
1934 1935 1936

  // Tag object if requested.
  if ((flags & TAG_OBJECT) != 0) {
1937 1938 1939 1940 1941 1942 1943 1944
    add(result, result, Operand(kHeapObjectTag));
  }
}


void MacroAssembler::UndoAllocationInNewSpace(Register object,
                                              Register scratch) {
  ExternalReference new_space_allocation_top =
1945
      ExternalReference::new_space_allocation_top_address(isolate());
1946 1947 1948 1949 1950 1951 1952 1953

  // Make sure the object has no tag before resetting top.
  and_(object, object, Operand(~kHeapObjectTagMask));
#ifdef DEBUG
  // Check that the object un-allocated is below the current top.
  mov(scratch, Operand(new_space_allocation_top));
  ldr(scratch, MemOperand(scratch));
  cmp(object, scratch);
1954
  Check(lt, kUndoAllocationOfNonAllocatedMemory);
1955 1956 1957 1958 1959 1960 1961
#endif
  // Write the address of the object to un-allocate as the current top.
  mov(scratch, Operand(new_space_allocation_top));
  str(object, MemOperand(scratch));
}


1962 1963 1964 1965 1966 1967 1968 1969
void MacroAssembler::AllocateTwoByteString(Register result,
                                           Register length,
                                           Register scratch1,
                                           Register scratch2,
                                           Register scratch3,
                                           Label* gc_required) {
  // Calculate the number of bytes needed for the characters in the string while
  // observing object alignment.
1970
  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1971 1972 1973
  mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
  add(scratch1, scratch1,
      Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1974
  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1975 1976

  // Allocate two-byte string in new space.
1977 1978 1979 1980 1981 1982
  Allocate(scratch1,
           result,
           scratch2,
           scratch3,
           gc_required,
           TAG_OBJECT);
1983 1984

  // Set the map, length and hash field.
1985 1986 1987 1988 1989
  InitializeNewString(result,
                      length,
                      Heap::kStringMapRootIndex,
                      scratch1,
                      scratch2);
1990 1991 1992
}


1993 1994 1995 1996
void MacroAssembler::AllocateOneByteString(Register result, Register length,
                                           Register scratch1, Register scratch2,
                                           Register scratch3,
                                           Label* gc_required) {
1997 1998
  // Calculate the number of bytes needed for the characters in the string while
  // observing object alignment.
1999 2000
  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
  DCHECK(kCharSize == 1);
2001
  add(scratch1, length,
2002
      Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
2003
  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2004

2005
  // Allocate one-byte string in new space.
2006 2007 2008 2009 2010 2011
  Allocate(scratch1,
           result,
           scratch2,
           scratch3,
           gc_required,
           TAG_OBJECT);
2012 2013

  // Set the map, length and hash field.
2014 2015
  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
                      scratch1, scratch2);
2016 2017 2018
}


2019 2020 2021 2022 2023
void MacroAssembler::AllocateTwoByteConsString(Register result,
                                               Register length,
                                               Register scratch1,
                                               Register scratch2,
                                               Label* gc_required) {
2024 2025
  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
           TAG_OBJECT);
2026 2027 2028 2029 2030 2031

  InitializeNewString(result,
                      length,
                      Heap::kConsStringMapRootIndex,
                      scratch1,
                      scratch2);
2032 2033 2034
}


2035 2036 2037 2038
void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
                                               Register scratch1,
                                               Register scratch2,
                                               Label* gc_required) {
2039 2040 2041 2042 2043
  Allocate(ConsString::kSize,
           result,
           scratch1,
           scratch2,
           gc_required,
2044
           TAG_OBJECT);
2045

2046 2047
  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
                      scratch1, scratch2);
2048 2049 2050 2051 2052 2053 2054 2055
}


void MacroAssembler::AllocateTwoByteSlicedString(Register result,
                                                 Register length,
                                                 Register scratch1,
                                                 Register scratch2,
                                                 Label* gc_required) {
2056 2057
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
           TAG_OBJECT);
2058 2059 2060 2061 2062 2063 2064 2065 2066

  InitializeNewString(result,
                      length,
                      Heap::kSlicedStringMapRootIndex,
                      scratch1,
                      scratch2);
}


2067 2068 2069 2070 2071
void MacroAssembler::AllocateOneByteSlicedString(Register result,
                                                 Register length,
                                                 Register scratch1,
                                                 Register scratch2,
                                                 Label* gc_required) {
2072 2073
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
           TAG_OBJECT);
2074

2075 2076
  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
                      scratch1, scratch2);
2077 2078 2079
}


2080
void MacroAssembler::CompareObjectType(Register object,
2081 2082 2083
                                       Register map,
                                       Register type_reg,
                                       InstanceType type) {
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
  const Register temp = type_reg.is(no_reg) ? ip : type_reg;

  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
  CompareInstanceType(map, temp, type);
}


void MacroAssembler::CheckObjectTypeRange(Register object,
                                          Register map,
                                          InstanceType min_type,
                                          InstanceType max_type,
                                          Label* false_label) {
  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
  STATIC_ASSERT(LAST_TYPE < 256);
2098
  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2099 2100 2101 2102
  ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
  sub(ip, ip, Operand(min_type));
  cmp(ip, Operand(max_type - min_type));
  b(hi, false_label);
2103 2104 2105 2106 2107 2108
}


void MacroAssembler::CompareInstanceType(Register map,
                                         Register type_reg,
                                         InstanceType type) {
2109 2110 2111 2112 2113
  // Registers map and type_reg can be ip. These two lines assert
  // that ip can be used with the two instructions (the constants
  // will never need ip).
  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
  STATIC_ASSERT(LAST_TYPE < 256);
2114 2115 2116 2117 2118
  ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
  cmp(type_reg, Operand(type));
}


2119 2120
void MacroAssembler::CompareRoot(Register obj,
                                 Heap::RootListIndex index) {
2121
  DCHECK(!obj.is(ip));
2122 2123 2124 2125 2126
  LoadRoot(ip, index);
  cmp(obj, ip);
}


2127 2128 2129
void MacroAssembler::CheckFastElements(Register map,
                                       Register scratch,
                                       Label* fail) {
2130 2131 2132 2133
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
  STATIC_ASSERT(FAST_ELEMENTS == 2);
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2134
  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2135
  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2136 2137 2138 2139
  b(hi, fail);
}


2140 2141 2142
void MacroAssembler::CheckFastObjectElements(Register map,
                                             Register scratch,
                                             Label* fail) {
2143 2144 2145 2146
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
  STATIC_ASSERT(FAST_ELEMENTS == 2);
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2147
  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2148
  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2149
  b(ls, fail);
2150
  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2151 2152 2153 2154
  b(hi, fail);
}


2155 2156 2157 2158 2159
void MacroAssembler::CheckFastSmiElements(Register map,
                                          Register scratch,
                                          Label* fail) {
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2160
  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2161
  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2162 2163 2164 2165
  b(hi, fail);
}


2166 2167 2168 2169 2170 2171 2172 2173
void MacroAssembler::StoreNumberToDoubleElements(
                                      Register value_reg,
                                      Register key_reg,
                                      Register elements_reg,
                                      Register scratch1,
                                      LowDwVfpRegister double_scratch,
                                      Label* fail,
                                      int elements_offset) {
2174
  Label smi_value, store;
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185

  // Handle smi values specially.
  JumpIfSmi(value_reg, &smi_value);

  // Ensure that the object is a heap number
  CheckMap(value_reg,
           scratch1,
           isolate()->factory()->heap_number_map(),
           fail,
           DONT_DO_SMI_CHECK);

2186
  vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2187 2188 2189 2190
  // Force a canonical NaN.
  if (emit_debug_code()) {
    vmrs(ip);
    tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2191
    Assert(ne, kDefaultNaNModeNotSet);
2192
  }
2193
  VFPCanonicalizeNaN(double_scratch);
2194
  b(&store);
2195 2196

  bind(&smi_value);
2197
  SmiToDouble(double_scratch, value_reg);
2198 2199

  bind(&store);
2200
  add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2201 2202 2203
  vstr(double_scratch,
       FieldMemOperand(scratch1,
                       FixedDoubleArray::kHeaderSize - elements_offset));
2204 2205 2206
}


2207 2208 2209
void MacroAssembler::CompareMap(Register obj,
                                Register scratch,
                                Handle<Map> map,
2210
                                Label* early_success) {
2211
  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2212
  CompareMap(scratch, map, early_success);
2213 2214 2215 2216 2217
}


void MacroAssembler::CompareMap(Register obj_map,
                                Handle<Map> map,
2218
                                Label* early_success) {
2219
  cmp(obj_map, Operand(map));
2220 2221 2222
}


2223 2224 2225 2226
void MacroAssembler::CheckMap(Register obj,
                              Register scratch,
                              Handle<Map> map,
                              Label* fail,
2227
                              SmiCheckType smi_check_type) {
2228
  if (smi_check_type == DO_SMI_CHECK) {
2229
    JumpIfSmi(obj, fail);
2230
  }
2231 2232

  Label success;
2233
  CompareMap(obj, scratch, map, &success);
2234
  b(ne, fail);
2235
  bind(&success);
2236 2237 2238
}


2239 2240 2241 2242
void MacroAssembler::CheckMap(Register obj,
                              Register scratch,
                              Heap::RootListIndex index,
                              Label* fail,
2243 2244
                              SmiCheckType smi_check_type) {
  if (smi_check_type == DO_SMI_CHECK) {
2245
    JumpIfSmi(obj, fail);
2246 2247 2248 2249 2250 2251 2252 2253
  }
  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
  LoadRoot(ip, index);
  cmp(scratch, ip);
  b(ne, fail);
}


danno@chromium.org's avatar
danno@chromium.org committed
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
void MacroAssembler::DispatchMap(Register obj,
                                 Register scratch,
                                 Handle<Map> map,
                                 Handle<Code> success,
                                 SmiCheckType smi_check_type) {
  Label fail;
  if (smi_check_type == DO_SMI_CHECK) {
    JumpIfSmi(obj, &fail);
  }
  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
  mov(ip, Operand(map));
  cmp(scratch, ip);
  Jump(success, RelocInfo::CODE_TARGET, eq);
  bind(&fail);
}


2271 2272 2273
void MacroAssembler::TryGetFunctionPrototype(Register function,
                                             Register result,
                                             Register scratch,
2274 2275
                                             Label* miss,
                                             bool miss_on_bound_function) {
2276 2277 2278 2279
  Label non_instance;
  if (miss_on_bound_function) {
    // Check that the receiver isn't a smi.
    JumpIfSmi(function, miss);
2280

2281 2282 2283
    // Check that the function really is a function.  Load map into result reg.
    CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
    b(ne, miss);
2284

2285 2286 2287 2288
    ldr(scratch,
        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
    ldr(scratch,
        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2289 2290
    tst(scratch,
        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2291 2292
    b(ne, miss);

2293 2294 2295 2296 2297
    // Make sure that the function has an instance prototype.
    ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
    tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
    b(ne, &non_instance);
  }
2298 2299 2300 2301 2302 2303 2304 2305

  // Get the prototype or initial map from the function.
  ldr(result,
      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));

  // If the prototype or initial map is the hole, don't return it and
  // simply miss the cache instead. This will allow us to allocate a
  // prototype object on-demand in the runtime system.
2306 2307
  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
  cmp(result, ip);
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317
  b(eq, miss);

  // If the function does not have an initial map, we're done.
  Label done;
  CompareObjectType(result, scratch, scratch, MAP_TYPE);
  b(ne, &done);

  // Get the prototype from the initial map.
  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));

2318 2319 2320 2321 2322 2323 2324 2325
  if (miss_on_bound_function) {
    jmp(&done);

    // Non-instance prototype: Fetch prototype from constructor field
    // in initial map.
    bind(&non_instance);
    ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
  }
2326 2327 2328 2329 2330 2331

  // All done.
  bind(&done);
}


2332 2333 2334
void MacroAssembler::CallStub(CodeStub* stub,
                              TypeFeedbackId ast_id,
                              Condition cond) {
2335
  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
2336
  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2337 2338 2339
}


2340
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2341
  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2342 2343 2344
}


2345 2346 2347 2348 2349
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
  return ref0.address() - ref1.address();
}


2350
void MacroAssembler::CallApiFunctionAndReturn(
2351
    Register function_address,
2352 2353 2354 2355
    ExternalReference thunk_ref,
    int stack_space,
    MemOperand return_value_operand,
    MemOperand* context_restore_operand) {
2356
  ExternalReference next_address =
2357
      ExternalReference::handle_scope_next_address(isolate());
2358 2359
  const int kNextOffset = 0;
  const int kLimitOffset = AddressOffset(
2360
      ExternalReference::handle_scope_limit_address(isolate()),
2361 2362
      next_address);
  const int kLevelOffset = AddressOffset(
2363
      ExternalReference::handle_scope_level_address(isolate()),
2364 2365
      next_address);

2366
  DCHECK(function_address.is(r1) || function_address.is(r2));
2367 2368 2369

  Label profiler_disabled;
  Label end_profiler_check;
2370
  mov(r9, Operand(ExternalReference::is_profiling_address(isolate())));
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381
  ldrb(r9, MemOperand(r9, 0));
  cmp(r9, Operand(0));
  b(eq, &profiler_disabled);

  // Additional parameter is the address of the actual callback.
  mov(r3, Operand(thunk_ref));
  jmp(&end_profiler_check);

  bind(&profiler_disabled);
  Move(r3, function_address);
  bind(&end_profiler_check);
2382

2383
  // Allocate HandleScope in callee-save registers.
2384 2385 2386 2387
  mov(r9, Operand(next_address));
  ldr(r4, MemOperand(r9, kNextOffset));
  ldr(r5, MemOperand(r9, kLimitOffset));
  ldr(r6, MemOperand(r9, kLevelOffset));
2388
  add(r6, r6, Operand(1));
2389
  str(r6, MemOperand(r9, kLevelOffset));
2390

2391 2392 2393
  if (FLAG_log_timer_events) {
    FrameScope frame(this, StackFrame::MANUAL);
    PushSafepointRegisters();
2394 2395 2396
    PrepareCallCFunction(1, r0);
    mov(r0, Operand(ExternalReference::isolate_address(isolate())));
    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2397 2398 2399
    PopSafepointRegisters();
  }

2400 2401 2402
  // Native call returns to the DirectCEntry stub which redirects to the
  // return address pushed on stack (could have moved after GC).
  // DirectCEntry stub itself is generated early and never moves.
2403
  DirectCEntryStub stub(isolate());
2404
  stub.GenerateCall(this, r3);
2405

2406 2407 2408
  if (FLAG_log_timer_events) {
    FrameScope frame(this, StackFrame::MANUAL);
    PushSafepointRegisters();
2409 2410 2411
    PrepareCallCFunction(1, r0);
    mov(r0, Operand(ExternalReference::isolate_address(isolate())));
    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2412 2413 2414
    PopSafepointRegisters();
  }

2415
  Label promote_scheduled_exception;
2416
  Label exception_handled;
2417 2418
  Label delete_allocated_handles;
  Label leave_exit_frame;
2419 2420 2421
  Label return_value_loaded;

  // load value from ReturnValue
2422
  ldr(r0, return_value_operand);
2423
  bind(&return_value_loaded);
2424 2425
  // No more valid handles (the result handle was the last one). Restore
  // previous handle scope.
2426
  str(r4, MemOperand(r9, kNextOffset));
2427
  if (emit_debug_code()) {
2428
    ldr(r1, MemOperand(r9, kLevelOffset));
2429
    cmp(r1, r6);
2430
    Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2431 2432
  }
  sub(r6, r6, Operand(1));
2433 2434
  str(r6, MemOperand(r9, kLevelOffset));
  ldr(ip, MemOperand(r9, kLimitOffset));
2435 2436 2437 2438 2439 2440
  cmp(r5, ip);
  b(ne, &delete_allocated_handles);

  // Check if the function scheduled an exception.
  bind(&leave_exit_frame);
  LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2441
  mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2442 2443 2444
  ldr(r5, MemOperand(ip));
  cmp(r4, r5);
  b(ne, &promote_scheduled_exception);
2445
  bind(&exception_handled);
2446

2447 2448 2449 2450
  bool restore_context = context_restore_operand != NULL;
  if (restore_context) {
    ldr(cp, *context_restore_operand);
  }
2451
  // LeaveExitFrame expects unwind space to be in a register.
2452
  mov(r4, Operand(stack_space));
2453
  LeaveExitFrame(false, r4, !restore_context);
2454
  mov(pc, lr);
2455 2456

  bind(&promote_scheduled_exception);
2457 2458 2459
  {
    FrameScope frame(this, StackFrame::INTERNAL);
    CallExternalReference(
2460
        ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2461 2462 2463
        0);
  }
  jmp(&exception_handled);
2464 2465 2466

  // HandleScope limit has changed. Delete allocated extensions.
  bind(&delete_allocated_handles);
2467
  str(r5, MemOperand(r9, kLimitOffset));
2468
  mov(r4, r0);
2469
  PrepareCallCFunction(1, r5);
2470
  mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2471
  CallCFunction(
2472
      ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2473 2474 2475 2476 2477
  mov(r0, r4);
  jmp(&leave_exit_frame);
}


2478
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2479
  return has_frame_ || !stub->SometimesSetsUpAFrame();
2480 2481 2482
}


2483 2484 2485 2486 2487
void MacroAssembler::IndexFromHash(Register hash, Register index) {
  // If the hash field contains an array index pick it out. The assert checks
  // that the constants for the maximum number of digits for an array index
  // cached in the hash field and the number of bits reserved for it does not
  // conflict.
2488
  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2489
         (1 << String::kArrayIndexValueBits));
2490
  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2491 2492 2493
}


2494
void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2495 2496 2497 2498 2499 2500 2501 2502
  if (CpuFeatures::IsSupported(VFP3)) {
    vmov(value.low(), smi);
    vcvt_f64_s32(value, 1);
  } else {
    SmiUntag(ip, smi);
    vmov(value.low(), ip);
    vcvt_f64_s32(value, value.low());
  }
2503 2504 2505
}


2506
void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2507
                                       LowDwVfpRegister double_scratch) {
2508
  DCHECK(!double_input.is(double_scratch));
2509 2510 2511 2512 2513 2514
  vcvt_s32_f64(double_scratch.low(), double_input);
  vcvt_f64_s32(double_scratch, double_scratch.low());
  VFPCompareAndSetFlags(double_input, double_scratch);
}


2515 2516
void MacroAssembler::TryDoubleToInt32Exact(Register result,
                                           DwVfpRegister double_input,
2517
                                           LowDwVfpRegister double_scratch) {
2518
  DCHECK(!double_input.is(double_scratch));
2519
  vcvt_s32_f64(double_scratch.low(), double_input);
2520
  vmov(result, double_scratch.low());
2521 2522 2523
  vcvt_f64_s32(double_scratch, double_scratch.low());
  VFPCompareAndSetFlags(double_input, double_scratch);
}
2524

2525 2526 2527 2528

void MacroAssembler::TryInt32Floor(Register result,
                                   DwVfpRegister double_input,
                                   Register input_high,
2529
                                   LowDwVfpRegister double_scratch,
2530 2531
                                   Label* done,
                                   Label* exact) {
2532 2533
  DCHECK(!result.is(input_high));
  DCHECK(!double_input.is(double_scratch));
2534 2535
  Label negative, exception;

2536 2537
  VmovHigh(input_high, double_input);

2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
  // Test for NaN and infinities.
  Sbfx(result, input_high,
       HeapNumber::kExponentShift, HeapNumber::kExponentBits);
  cmp(result, Operand(-1));
  b(eq, &exception);
  // Test for values that can be exactly represented as a
  // signed 32-bit integer.
  TryDoubleToInt32Exact(result, double_input, double_scratch);
  // If exact, return (result already fetched).
  b(eq, exact);
  cmp(input_high, Operand::Zero());
  b(mi, &negative);

  // Input is in ]+0, +inf[.
  // If result equals 0x7fffffff input was out of range or
  // in ]0x7fffffff, 0x80000000[. We ignore this last case which
  // could fits into an int32, that means we always think input was
  // out of range and always go to exception.
  // If result < 0x7fffffff, go to done, result fetched.
  cmn(result, Operand(1));
  b(mi, &exception);
  b(done);

  // Input is in ]-inf, -0[.
  // If x is a non integer negative number,
  // floor(x) <=> round_to_zero(x) - 1.
  bind(&negative);
  sub(result, result, Operand(1), SetCC);
  // If result is still negative, go to done, result fetched.
  // Else, we had an overflow and we fall through exception.
  b(mi, done);
  bind(&exception);
2570 2571
}

2572 2573 2574 2575
void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
                                                DwVfpRegister double_input,
                                                Label* done) {
  LowDwVfpRegister double_scratch = kScratchDoubleReg;
2576 2577 2578 2579
  vcvt_s32_f64(double_scratch.low(), double_input);
  vmov(result, double_scratch.low());

  // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2580 2581 2582 2583
  sub(ip, result, Operand(1));
  cmp(ip, Operand(0x7ffffffe));
  b(lt, done);
}
2584

2585

2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596
void MacroAssembler::TruncateDoubleToI(Register result,
                                       DwVfpRegister double_input) {
  Label done;

  TryInlineTruncateDoubleToI(result, double_input, &done);

  // If we fell through then inline version didn't succeed - call stub instead.
  push(lr);
  sub(sp, sp, Operand(kDoubleSize));  // Put input on stack.
  vstr(double_input, MemOperand(sp, 0));

2597
  DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610
  CallStub(&stub);

  add(sp, sp, Operand(kDoubleSize));
  pop(lr);

  bind(&done);
}


void MacroAssembler::TruncateHeapNumberToI(Register result,
                                           Register object) {
  Label done;
  LowDwVfpRegister double_scratch = kScratchDoubleReg;
2611
  DCHECK(!result.is(object));
2612 2613 2614 2615 2616 2617 2618

  vldr(double_scratch,
       MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
  TryInlineTruncateDoubleToI(result, double_scratch, &done);

  // If we fell through then inline version didn't succeed - call stub instead.
  push(lr);
2619 2620
  DoubleToIStub stub(isolate(),
                     object,
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
                     result,
                     HeapNumber::kValueOffset - kHeapObjectTag,
                     true,
                     true);
  CallStub(&stub);
  pop(lr);

  bind(&done);
}


void MacroAssembler::TruncateNumberToI(Register object,
                                       Register result,
                                       Register heap_number_map,
                                       Register scratch1,
                                       Label* not_number) {
  Label done;
2638
  DCHECK(!result.is(object));
2639 2640 2641 2642

  UntagAndJumpIfSmi(result, object, &done);
  JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
  TruncateHeapNumberToI(result, object);
2643

2644
  bind(&done);
2645 2646 2647
}


2648 2649 2650
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
                                         Register src,
                                         int num_least_bits) {
2651
  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2652
    ubfx(dst, src, kSmiTagSize, num_least_bits);
2653
  } else {
2654
    SmiUntag(dst, src);
2655 2656 2657 2658 2659
    and_(dst, dst, Operand((1 << num_least_bits) - 1));
  }
}


2660 2661 2662 2663 2664 2665 2666
void MacroAssembler::GetLeastBitsFromInt32(Register dst,
                                           Register src,
                                           int num_least_bits) {
  and_(dst, src, Operand((1 << num_least_bits) - 1));
}


2667
void MacroAssembler::CallRuntime(const Runtime::Function* f,
2668 2669
                                 int num_arguments,
                                 SaveFPRegsMode save_doubles) {
2670
  // All parameters are on the stack.  r0 has the return value after call.
2671

2672 2673 2674
  // If the expected number of arguments of the runtime function is
  // constant, we check that the actual number of arguments match the
  // expectation.
2675
  CHECK(f->nargs < 0 || f->nargs == num_arguments);
2676

2677 2678 2679 2680 2681
  // TODO(1236192): Most runtime routines don't need the number of
  // arguments passed in because it is constant. At some point we
  // should remove this need and make the runtime routine entry code
  // smarter.
  mov(r0, Operand(num_arguments));
2682
  mov(r1, Operand(ExternalReference(f, isolate())));
2683
  CEntryStub stub(isolate(), 1, save_doubles);
2684 2685 2686 2687
  CallStub(&stub);
}


2688 2689 2690 2691 2692
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
                                           int num_arguments) {
  mov(r0, Operand(num_arguments));
  mov(r1, Operand(ext));

2693
  CEntryStub stub(isolate(), 1);
2694 2695 2696 2697
  CallStub(&stub);
}


serya@chromium.org's avatar
serya@chromium.org committed
2698 2699 2700
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
                                               int num_arguments,
                                               int result_size) {
2701 2702 2703 2704 2705
  // TODO(1236192): Most runtime routines don't need the number of
  // arguments passed in because it is constant. At some point we
  // should remove this need and make the runtime routine entry code
  // smarter.
  mov(r0, Operand(num_arguments));
serya@chromium.org's avatar
serya@chromium.org committed
2706 2707 2708 2709 2710 2711 2712
  JumpToExternalReference(ext);
}


void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                     int num_arguments,
                                     int result_size) {
2713 2714 2715
  TailCallExternalReference(ExternalReference(fid, isolate()),
                            num_arguments,
                            result_size);
2716 2717 2718
}


serya@chromium.org's avatar
serya@chromium.org committed
2719
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2720 2721
#if defined(__thumb__)
  // Thumb mode builtin.
2722
  DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2723 2724
#endif
  mov(r1, Operand(builtin));
2725
  CEntryStub stub(isolate(), 1);
2726
  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2727 2728 2729
}


2730
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2731
                                   InvokeFlag flag,
2732
                                   const CallWrapper& call_wrapper) {
2733
  // You can't call a builtin without a valid frame.
2734
  DCHECK(flag == JUMP_FUNCTION || has_frame());
2735

2736
  GetBuiltinEntry(r2, id);
2737
  if (flag == CALL_FUNCTION) {
2738
    call_wrapper.BeforeCall(CallSize(r2));
2739
    Call(r2);
2740
    call_wrapper.AfterCall();
2741
  } else {
2742
    DCHECK(flag == JUMP_FUNCTION);
2743
    Jump(r2);
2744 2745 2746 2747
  }
}


2748 2749
void MacroAssembler::GetBuiltinFunction(Register target,
                                        Builtins::JavaScript id) {
2750
  // Load the builtins object into target register.
2751 2752
  ldr(target,
      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2753
  ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2754
  // Load the JavaScript builtin function from the builtins object.
2755
  ldr(target, FieldMemOperand(target,
2756
                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2757 2758
}

2759

2760
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2761
  DCHECK(!target.is(r1));
2762
  GetBuiltinFunction(r1, id);
2763
  // Load the code entry point from the builtins object.
2764
  ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2765 2766 2767
}


2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                Register scratch1, Register scratch2) {
  if (FLAG_native_code_counters && counter->Enabled()) {
    mov(scratch1, Operand(value));
    mov(scratch2, Operand(ExternalReference(counter)));
    str(scratch1, MemOperand(scratch2));
  }
}


void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
                                      Register scratch1, Register scratch2) {
2780
  DCHECK(value > 0);
2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
  if (FLAG_native_code_counters && counter->Enabled()) {
    mov(scratch2, Operand(ExternalReference(counter)));
    ldr(scratch1, MemOperand(scratch2));
    add(scratch1, scratch1, Operand(value));
    str(scratch1, MemOperand(scratch2));
  }
}


void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
                                      Register scratch1, Register scratch2) {
2792
  DCHECK(value > 0);
2793 2794 2795 2796 2797 2798 2799 2800 2801
  if (FLAG_native_code_counters && counter->Enabled()) {
    mov(scratch2, Operand(ExternalReference(counter)));
    ldr(scratch1, MemOperand(scratch2));
    sub(scratch1, scratch1, Operand(value));
    str(scratch1, MemOperand(scratch2));
  }
}


2802
void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2803
  if (emit_debug_code())
2804
    Check(cond, reason);
2805 2806 2807
}


2808
void MacroAssembler::AssertFastElements(Register elements) {
2809
  if (emit_debug_code()) {
2810
    DCHECK(!elements.is(ip));
2811 2812 2813 2814 2815 2816
    Label ok;
    push(elements);
    ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
    LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
    cmp(elements, ip);
    b(eq, &ok);
2817 2818 2819
    LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
    cmp(elements, ip);
    b(eq, &ok);
2820 2821 2822
    LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
    cmp(elements, ip);
    b(eq, &ok);
2823
    Abort(kJSObjectWithFastElementsMapHasSlowElements);
2824 2825 2826 2827 2828 2829
    bind(&ok);
    pop(elements);
  }
}


2830
void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2831
  Label L;
2832
  b(cond, &L);
2833
  Abort(reason);
2834 2835 2836 2837 2838
  // will not return here
  bind(&L);
}


2839
void MacroAssembler::Abort(BailoutReason reason) {
2840 2841
  Label abort_start;
  bind(&abort_start);
2842
#ifdef DEBUG
2843
  const char* msg = GetBailoutReason(reason);
2844 2845 2846 2847
  if (msg != NULL) {
    RecordComment("Abort message: ");
    RecordComment(msg);
  }
2848 2849 2850 2851 2852

  if (FLAG_trap_on_abort) {
    stop(msg);
    return;
  }
2853
#endif
2854

2855
  mov(r0, Operand(Smi::FromInt(reason)));
2856
  push(r0);
2857

2858 2859 2860 2861 2862
  // Disable stub call restrictions to always allow calls to abort.
  if (!has_frame_) {
    // We don't actually want to generate a pile of code for this, so just
    // claim there is a stack frame, without generating one.
    FrameScope scope(this, StackFrame::NONE);
2863
    CallRuntime(Runtime::kAbort, 1);
2864
  } else {
2865
    CallRuntime(Runtime::kAbort, 1);
2866
  }
2867
  // will not return here
2868 2869 2870 2871
  if (is_const_pool_blocked()) {
    // If the calling code cares about the exact number of
    // instructions generated, we insert padding here to keep the size
    // of the Abort macro constant.
2872
    static const int kExpectedAbortInstructions = 7;
2873
    int abort_instructions = InstructionsGeneratedSince(&abort_start);
2874
    DCHECK(abort_instructions <= kExpectedAbortInstructions);
2875 2876 2877 2878
    while (abort_instructions++ < kExpectedAbortInstructions) {
      nop();
    }
  }
2879 2880
}

2881

2882 2883 2884
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
  if (context_chain_length > 0) {
    // Move up the chain of contexts to the context containing the slot.
2885
    ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2886
    for (int i = 1; i < context_chain_length; i++) {
2887
      ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2888
    }
2889 2890 2891 2892 2893
  } else {
    // Slot is in the current function context.  Move it into the
    // destination register in case we store into it (the write barrier
    // cannot be allowed to destroy the context in esi).
    mov(dst, cp);
2894 2895 2896 2897
  }
}


2898 2899 2900 2901 2902 2903 2904
void MacroAssembler::LoadTransitionedArrayMapConditional(
    ElementsKind expected_kind,
    ElementsKind transitioned_kind,
    Register map_in_out,
    Register scratch,
    Label* no_map_match) {
  // Load the global or builtins object from the current context.
2905 2906
  ldr(scratch,
      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2907
  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2908 2909

  // Check that the function's map is the same as the expected cached map.
2910 2911 2912 2913 2914
  ldr(scratch,
      MemOperand(scratch,
                 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
  size_t offset = expected_kind * kPointerSize +
      FixedArrayBase::kHeaderSize;
2915 2916
  ldr(ip, FieldMemOperand(scratch, offset));
  cmp(map_in_out, ip);
2917 2918 2919
  b(ne, no_map_match);

  // Use the transitioned cached map.
2920 2921 2922
  offset = transitioned_kind * kPointerSize +
      FixedArrayBase::kHeaderSize;
  ldr(map_in_out, FieldMemOperand(scratch, offset));
2923 2924 2925
}


2926 2927
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
  // Load the global or builtins object from the current context.
2928 2929
  ldr(function,
      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2930
  // Load the native context from the global or builtins object.
2931
  ldr(function, FieldMemOperand(function,
2932 2933
                                GlobalObject::kNativeContextOffset));
  // Load the function from the native context.
2934 2935 2936 2937 2938 2939 2940 2941 2942
  ldr(function, MemOperand(function, Context::SlotOffset(index)));
}


void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
                                                  Register map,
                                                  Register scratch) {
  // Load the initial map. The global functions all have initial maps.
  ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2943
  if (emit_debug_code()) {
2944
    Label ok, fail;
2945
    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2946 2947
    b(&ok);
    bind(&fail);
2948
    Abort(kGlobalFunctionsMustHaveInitialMap);
2949 2950 2951 2952 2953
    bind(&ok);
  }
}


2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
    Register reg,
    Register scratch,
    Label* not_power_of_two_or_zero) {
  sub(scratch, reg, Operand(1), SetCC);
  b(mi, not_power_of_two_or_zero);
  tst(scratch, reg);
  b(ne, not_power_of_two_or_zero);
}


2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976
void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
    Register reg,
    Register scratch,
    Label* zero_and_neg,
    Label* not_power_of_two) {
  sub(scratch, reg, Operand(1), SetCC);
  b(mi, zero_and_neg);
  tst(scratch, reg);
  b(ne, not_power_of_two);
}


2977 2978 2979
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
                                      Register reg2,
                                      Label* on_not_both_smi) {
2980
  STATIC_ASSERT(kSmiTag == 0);
2981 2982 2983 2984 2985 2986
  tst(reg1, Operand(kSmiTagMask));
  tst(reg2, Operand(kSmiTagMask), eq);
  b(ne, on_not_both_smi);
}


2987 2988 2989
void MacroAssembler::UntagAndJumpIfSmi(
    Register dst, Register src, Label* smi_case) {
  STATIC_ASSERT(kSmiTag == 0);
2990
  SmiUntag(dst, src, SetCC);
2991 2992 2993 2994 2995 2996 2997
  b(cc, smi_case);  // Shifter carry is not set for a smi.
}


void MacroAssembler::UntagAndJumpIfNotSmi(
    Register dst, Register src, Label* non_smi_case) {
  STATIC_ASSERT(kSmiTag == 0);
2998
  SmiUntag(dst, src, SetCC);
2999 3000 3001 3002
  b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
}


3003 3004 3005
void MacroAssembler::JumpIfEitherSmi(Register reg1,
                                     Register reg2,
                                     Label* on_either_smi) {
3006
  STATIC_ASSERT(kSmiTag == 0);
3007 3008 3009 3010 3011 3012
  tst(reg1, Operand(kSmiTagMask));
  tst(reg2, Operand(kSmiTagMask), ne);
  b(eq, on_either_smi);
}


3013 3014 3015 3016
void MacroAssembler::AssertNotSmi(Register object) {
  if (emit_debug_code()) {
    STATIC_ASSERT(kSmiTag == 0);
    tst(object, Operand(kSmiTagMask));
3017
    Check(ne, kOperandIsASmi);
3018
  }
3019 3020 3021
}


3022 3023 3024 3025
void MacroAssembler::AssertSmi(Register object) {
  if (emit_debug_code()) {
    STATIC_ASSERT(kSmiTag == 0);
    tst(object, Operand(kSmiTagMask));
3026
    Check(eq, kOperandIsNotSmi);
3027
  }
3028 3029 3030
}


3031 3032 3033 3034
void MacroAssembler::AssertString(Register object) {
  if (emit_debug_code()) {
    STATIC_ASSERT(kSmiTag == 0);
    tst(object, Operand(kSmiTagMask));
3035
    Check(ne, kOperandIsASmiAndNotAString);
3036 3037 3038 3039
    push(object);
    ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
    CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
    pop(object);
3040
    Check(lo, kOperandIsNotAString);
3041
  }
3042 3043 3044
}


3045 3046 3047 3048
void MacroAssembler::AssertName(Register object) {
  if (emit_debug_code()) {
    STATIC_ASSERT(kSmiTag == 0);
    tst(object, Operand(kSmiTagMask));
3049
    Check(ne, kOperandIsASmiAndNotAName);
3050 3051 3052 3053
    push(object);
    ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
    CompareInstanceType(object, object, LAST_NAME_TYPE);
    pop(object);
3054
    Check(le, kOperandIsNotAName);
3055 3056 3057 3058
  }
}


3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
                                                     Register scratch) {
  if (emit_debug_code()) {
    Label done_checking;
    AssertNotSmi(object);
    CompareRoot(object, Heap::kUndefinedValueRootIndex);
    b(eq, &done_checking);
    ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
    CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
    Assert(eq, kExpectedUndefinedOrCell);
    bind(&done_checking);
  }
}

3073

3074
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3075
  if (emit_debug_code()) {
3076
    CompareRoot(reg, index);
3077
    Check(eq, kHeapNumberMapRegisterClobbered);
3078
  }
3079 3080 3081
}


3082 3083 3084 3085 3086
void MacroAssembler::JumpIfNotHeapNumber(Register object,
                                         Register heap_number_map,
                                         Register scratch,
                                         Label* on_not_heap_number) {
  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3087
  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3088 3089 3090 3091 3092
  cmp(scratch, heap_number_map);
  b(ne, on_not_heap_number);
}


3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174
void MacroAssembler::LookupNumberStringCache(Register object,
                                             Register result,
                                             Register scratch1,
                                             Register scratch2,
                                             Register scratch3,
                                             Label* not_found) {
  // Use of registers. Register result is used as a temporary.
  Register number_string_cache = result;
  Register mask = scratch3;

  // Load the number string cache.
  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);

  // Make the hash mask from the length of the number string cache. It
  // contains two elements (number and string) for each cache entry.
  ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
  // Divide length by two (length is a smi).
  mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
  sub(mask, mask, Operand(1));  // Make mask.

  // Calculate the entry in the number string cache. The hash value in the
  // number string cache for smis is just the smi value, and the hash for
  // doubles is the xor of the upper and lower words. See
  // Heap::GetNumberStringCache.
  Label is_smi;
  Label load_result_from_cache;
  JumpIfSmi(object, &is_smi);
  CheckMap(object,
           scratch1,
           Heap::kHeapNumberMapRootIndex,
           not_found,
           DONT_DO_SMI_CHECK);

  STATIC_ASSERT(8 == kDoubleSize);
  add(scratch1,
      object,
      Operand(HeapNumber::kValueOffset - kHeapObjectTag));
  ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
  eor(scratch1, scratch1, Operand(scratch2));
  and_(scratch1, scratch1, Operand(mask));

  // Calculate address of entry in string cache: each entry consists
  // of two pointer sized fields.
  add(scratch1,
      number_string_cache,
      Operand(scratch1, LSL, kPointerSizeLog2 + 1));

  Register probe = mask;
  ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
  JumpIfSmi(probe, not_found);
  sub(scratch2, object, Operand(kHeapObjectTag));
  vldr(d0, scratch2, HeapNumber::kValueOffset);
  sub(probe, probe, Operand(kHeapObjectTag));
  vldr(d1, probe, HeapNumber::kValueOffset);
  VFPCompareAndSetFlags(d0, d1);
  b(ne, not_found);  // The cache did not contain this value.
  b(&load_result_from_cache);

  bind(&is_smi);
  Register scratch = scratch1;
  and_(scratch, mask, Operand(object, ASR, 1));
  // Calculate address of entry in string cache: each entry consists
  // of two pointer sized fields.
  add(scratch,
      number_string_cache,
      Operand(scratch, LSL, kPointerSizeLog2 + 1));

  // Check if the entry is the smi we are looking for.
  ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
  cmp(object, probe);
  b(ne, not_found);

  // Get the result from the cache.
  bind(&load_result_from_cache);
  ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
  IncrementCounter(isolate()->counters()->number_to_string_native(),
                   1,
                   scratch1,
                   scratch2);
}


3175 3176
void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
    Register first, Register second, Register scratch1, Register scratch2,
3177
    Label* failure) {
3178
  // Test that both first and second are sequential one-byte strings.
3179 3180 3181 3182 3183
  // Assume that they are non-smis.
  ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
  ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
  ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
  ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3184

3185 3186
  JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
                                                 scratch2, failure);
3187 3188
}

3189 3190 3191 3192 3193
void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
                                                           Register second,
                                                           Register scratch1,
                                                           Register scratch2,
                                                           Label* failure) {
3194 3195
  // Check that neither is a smi.
  and_(scratch1, first, Operand(second));
3196
  JumpIfSmi(scratch1, failure);
3197 3198
  JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
                                               scratch2, failure);
3199 3200
}

3201

3202 3203
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
                                                     Label* not_unique_name) {
3204 3205 3206 3207
  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
  Label succeed;
  tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
  b(eq, &succeed);
3208
  cmp(reg, Operand(SYMBOL_TYPE));
3209 3210 3211
  b(ne, not_unique_name);

  bind(&succeed);
3212 3213 3214
}


3215 3216 3217 3218 3219
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
                                        Register scratch1,
                                        Register scratch2,
3220
                                        Register heap_number_map,
3221
                                        Label* gc_required,
3222 3223
                                        TaggingMode tagging_mode,
                                        MutableMode mode) {
3224 3225
  // Allocate an object in the heap for the heap number and tag it as a heap
  // object.
3226 3227
  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3228

3229 3230 3231 3232 3233
  Heap::RootListIndex map_index = mode == MUTABLE
      ? Heap::kMutableHeapNumberMapRootIndex
      : Heap::kHeapNumberMapRootIndex;
  AssertIsRoot(heap_number_map, map_index);

3234
  // Store heap number map in the allocated object.
3235 3236 3237 3238 3239
  if (tagging_mode == TAG_RESULT) {
    str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
  } else {
    str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
  }
3240 3241 3242
}


3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254
void MacroAssembler::AllocateHeapNumberWithValue(Register result,
                                                 DwVfpRegister value,
                                                 Register scratch1,
                                                 Register scratch2,
                                                 Register heap_number_map,
                                                 Label* gc_required) {
  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
  sub(scratch1, result, Operand(kHeapObjectTag));
  vstr(value, scratch1, HeapNumber::kValueOffset);
}


3255 3256 3257
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
                                Register src,
3258
                                LowDwVfpRegister double_scratch,
3259
                                int field_count) {
3260 3261 3262 3263
  int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
  for (int i = 0; i < double_count; i++) {
    vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
    vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3264 3265
  }

3266 3267 3268 3269 3270
  STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
  STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);

  int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
  if (remain != 0) {
3271
    vldr(double_scratch.low(),
3272
         FieldMemOperand(src, (field_count - 1) * kPointerSize));
3273
    vstr(double_scratch.low(),
3274
         FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3275 3276 3277 3278
  }
}


3279 3280 3281 3282
void MacroAssembler::CopyBytes(Register src,
                               Register dst,
                               Register length,
                               Register scratch) {
3283
  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3284 3285

  // Align src before copying in word size chunks.
3286 3287 3288
  cmp(length, Operand(kPointerSize));
  b(le, &byte_loop);

3289 3290 3291 3292 3293 3294
  bind(&align_loop_1);
  tst(src, Operand(kPointerSize - 1));
  b(eq, &word_loop);
  ldrb(scratch, MemOperand(src, 1, PostIndex));
  strb(scratch, MemOperand(dst, 1, PostIndex));
  sub(length, length, Operand(1), SetCC);
3295
  b(&align_loop_1);
3296 3297
  // Copy bytes in word size chunks.
  bind(&word_loop);
3298
  if (emit_debug_code()) {
3299
    tst(src, Operand(kPointerSize - 1));
3300
    Assert(eq, kExpectingAlignmentForCopyBytes);
3301 3302 3303 3304
  }
  cmp(length, Operand(kPointerSize));
  b(lt, &byte_loop);
  ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315
  if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
    str(scratch, MemOperand(dst, kPointerSize, PostIndex));
  } else {
    strb(scratch, MemOperand(dst, 1, PostIndex));
    mov(scratch, Operand(scratch, LSR, 8));
    strb(scratch, MemOperand(dst, 1, PostIndex));
    mov(scratch, Operand(scratch, LSR, 8));
    strb(scratch, MemOperand(dst, 1, PostIndex));
    mov(scratch, Operand(scratch, LSR, 8));
    strb(scratch, MemOperand(dst, 1, PostIndex));
  }
3316 3317 3318 3319 3320
  sub(length, length, Operand(kPointerSize));
  b(&word_loop);

  // Copy the last bytes if any left.
  bind(&byte_loop);
3321
  cmp(length, Operand::Zero());
3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
  b(eq, &done);
  bind(&byte_loop_1);
  ldrb(scratch, MemOperand(src, 1, PostIndex));
  strb(scratch, MemOperand(dst, 1, PostIndex));
  sub(length, length, Operand(1), SetCC);
  b(ne, &byte_loop_1);
  bind(&done);
}


3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
                                                Register end_offset,
                                                Register filler) {
  Label loop, entry;
  b(&entry);
  bind(&loop);
  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
  bind(&entry);
  cmp(start_offset, end_offset);
  b(lt, &loop);
}


3345 3346 3347 3348 3349 3350 3351
void MacroAssembler::CheckFor32DRegs(Register scratch) {
  mov(scratch, Operand(ExternalReference::cpu_features()));
  ldr(scratch, MemOperand(scratch));
  tst(scratch, Operand(1u << VFP32DREGS));
}


3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367
void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
  CheckFor32DRegs(scratch);
  vstm(db_w, location, d16, d31, ne);
  sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
  vstm(db_w, location, d0, d15);
}


void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
  CheckFor32DRegs(scratch);
  vldm(ia_w, location, d0, d15);
  vldm(ia_w, location, d16, d31, ne);
  add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
}


3368 3369
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
    Register first, Register second, Register scratch1, Register scratch2,
3370
    Label* failure) {
3371
  const int kFlatOneByteStringMask =
3372
      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3373
  const int kFlatOneByteStringTag =
3374
      kStringTag | kOneByteStringTag | kSeqStringTag;
3375 3376 3377
  and_(scratch1, first, Operand(kFlatOneByteStringMask));
  and_(scratch2, second, Operand(kFlatOneByteStringMask));
  cmp(scratch1, Operand(kFlatOneByteStringTag));
3378
  // Ignore second test if first test failed.
3379
  cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
3380 3381 3382 3383
  b(ne, failure);
}


3384 3385 3386 3387
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
                                                              Register scratch,
                                                              Label* failure) {
  const int kFlatOneByteStringMask =
3388
      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3389
  const int kFlatOneByteStringTag =
3390
      kStringTag | kOneByteStringTag | kSeqStringTag;
3391 3392
  and_(scratch, type, Operand(kFlatOneByteStringMask));
  cmp(scratch, Operand(kFlatOneByteStringTag));
3393 3394 3395
  b(ne, failure);
}

3396
static const int kRegisterPassedArguments = 4;
3397

3398

3399 3400 3401
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
                                              int num_double_arguments) {
  int stack_passed_words = 0;
3402
  if (use_eabi_hardfloat()) {
3403 3404
    // In the hard floating point calling convention, we can use
    // all double registers to pass doubles.
3405
    if (num_double_arguments > DoubleRegister::NumRegisters()) {
3406
      stack_passed_words +=
3407
          2 * (num_double_arguments - DoubleRegister::NumRegisters());
3408 3409 3410 3411 3412 3413
    }
  } else {
    // In the soft floating point calling convention, every double
    // argument is passed using two registers.
    num_reg_arguments += 2 * num_double_arguments;
  }
3414
  // Up to four simple arguments are passed in registers r0..r3.
3415 3416 3417 3418 3419 3420 3421
  if (num_reg_arguments > kRegisterPassedArguments) {
    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
  }
  return stack_passed_words;
}


3422 3423 3424 3425 3426 3427
void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
                                               Register index,
                                               Register value,
                                               uint32_t encoding_mask) {
  Label is_object;
  SmiTst(string);
3428
  Check(ne, kNonObject);
3429 3430 3431 3432 3433 3434

  ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
  ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));

  and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
  cmp(ip, Operand(encoding_mask));
3435
  Check(eq, kUnexpectedStringType);
3436 3437 3438 3439 3440 3441 3442 3443

  // The index is assumed to be untagged coming in, tag it to compare with the
  // string length without using a temp register, it is restored at the end of
  // this function.
  Label index_tag_ok, index_tag_bad;
  TrySmiTag(index, index, &index_tag_bad);
  b(&index_tag_ok);
  bind(&index_tag_bad);
3444
  Abort(kIndexIsTooLarge);
3445 3446 3447 3448
  bind(&index_tag_ok);

  ldr(ip, FieldMemOperand(string, String::kLengthOffset));
  cmp(index, ip);
3449
  Check(lt, kIndexIsTooLarge);
3450 3451

  cmp(index, Operand(Smi::FromInt(0)));
3452
  Check(ge, kIndexIsNegative);
3453 3454 3455 3456 3457

  SmiUntag(index, index);
}


3458 3459 3460 3461 3462 3463
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
                                          int num_double_arguments,
                                          Register scratch) {
  int frame_alignment = ActivationFrameAlignment();
  int stack_passed_arguments = CalculateStackPassedWords(
      num_reg_arguments, num_double_arguments);
3464
  if (frame_alignment > kPointerSize) {
3465 3466 3467 3468
    // Make stack end at alignment and make room for num_arguments - 4 words
    // and the original value of sp.
    mov(scratch, sp);
    sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3469
    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3470
    and_(sp, sp, Operand(-frame_alignment));
3471 3472 3473 3474 3475 3476 3477
    str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
  } else {
    sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
  }
}


3478 3479 3480 3481 3482 3483
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
                                          Register scratch) {
  PrepareCallCFunction(num_reg_arguments, 0, scratch);
}


3484
void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3485
  DCHECK(src.is(d0));
3486
  if (!use_eabi_hardfloat()) {
3487
    vmov(r0, r1, src);
3488 3489 3490 3491
  }
}


3492 3493 3494
// On ARM this is just a synonym to make the purpose clear.
void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
  MovToFloatParameter(src);
3495 3496 3497
}


3498 3499
void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
                                          DwVfpRegister src2) {
3500 3501
  DCHECK(src1.is(d0));
  DCHECK(src2.is(d1));
3502 3503 3504
  if (!use_eabi_hardfloat()) {
    vmov(r0, r1, src1);
    vmov(r2, r3, src2);
3505 3506 3507 3508
  }
}


3509
void MacroAssembler::CallCFunction(ExternalReference function,
3510 3511
                                   int num_reg_arguments,
                                   int num_double_arguments) {
3512 3513
  mov(ip, Operand(function));
  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3514 3515
}

3516

3517
void MacroAssembler::CallCFunction(Register function,
3518 3519 3520
                                   int num_reg_arguments,
                                   int num_double_arguments) {
  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531
}


void MacroAssembler::CallCFunction(ExternalReference function,
                                   int num_arguments) {
  CallCFunction(function, num_arguments, 0);
}


void MacroAssembler::CallCFunction(Register function,
                                   int num_arguments) {
3532
  CallCFunction(function, num_arguments, 0);
3533 3534 3535
}


3536
void MacroAssembler::CallCFunctionHelper(Register function,
3537 3538
                                         int num_reg_arguments,
                                         int num_double_arguments) {
3539
  DCHECK(has_frame());
3540 3541 3542
  // Make sure that the stack is aligned before calling a C function unless
  // running in the simulator. The simulator has its own alignment check which
  // provides more information.
3543
#if V8_HOST_ARCH_ARM
3544
  if (emit_debug_code()) {
3545
    int frame_alignment = base::OS::ActivationFrameAlignment();
3546 3547
    int frame_alignment_mask = frame_alignment - 1;
    if (frame_alignment > kPointerSize) {
3548
      DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559
      Label alignment_as_expected;
      tst(sp, Operand(frame_alignment_mask));
      b(eq, &alignment_as_expected);
      // Don't use Check here, as it will call Runtime_Abort possibly
      // re-entering here.
      stop("Unexpected alignment");
      bind(&alignment_as_expected);
    }
  }
#endif

3560 3561 3562 3563
  // Just call directly. The function called cannot cause a GC, or
  // allow preemption, so the return address in the link register
  // stays correct.
  Call(function);
3564 3565
  int stack_passed_arguments = CalculateStackPassedWords(
      num_reg_arguments, num_double_arguments);
3566
  if (ActivationFrameAlignment() > kPointerSize) {
3567 3568 3569 3570 3571 3572 3573
    ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
  } else {
    add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
  }
}


3574
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3575 3576 3577
                                               Register result,
                                               Register scratch) {
  Label small_constant_pool_load, load_result;
3578
  ldr(result, MemOperand(ldr_location));
3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618

  if (FLAG_enable_ool_constant_pool) {
    // Check if this is an extended constant pool load.
    and_(scratch, result, Operand(GetConsantPoolLoadMask()));
    teq(scratch, Operand(GetConsantPoolLoadPattern()));
    b(eq, &small_constant_pool_load);
    if (emit_debug_code()) {
      // Check that the instruction sequence is:
      //   movw reg, #offset_low
      //   movt reg, #offset_high
      //   ldr reg, [pp, reg]
      Instr patterns[] = {GetMovWPattern(), GetMovTPattern(),
                          GetLdrPpRegOffsetPattern()};
      for (int i = 0; i < 3; i++) {
        ldr(result, MemOperand(ldr_location, i * kInstrSize));
        and_(result, result, Operand(patterns[i]));
        cmp(result, Operand(patterns[i]));
        Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
      }
      // Result was clobbered. Restore it.
      ldr(result, MemOperand(ldr_location));
    }

    // Get the offset into the constant pool.  First extract movw immediate into
    // result.
    and_(scratch, result, Operand(0xfff));
    mov(ip, Operand(result, LSR, 4));
    and_(ip, ip, Operand(0xf000));
    orr(result, scratch, Operand(ip));
    // Then extract movt immediate and or into result.
    ldr(scratch, MemOperand(ldr_location, kInstrSize));
    and_(ip, scratch, Operand(0xf0000));
    orr(result, result, Operand(ip, LSL, 12));
    and_(scratch, scratch, Operand(0xfff));
    orr(result, result, Operand(scratch, LSL, 16));

    b(&load_result);
  }

  bind(&small_constant_pool_load);
3619
  if (emit_debug_code()) {
3620
    // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
3621 3622 3623
    and_(result, result, Operand(GetConsantPoolLoadPattern()));
    cmp(result, Operand(GetConsantPoolLoadPattern()));
    Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3624 3625 3626
    // Result was clobbered. Restore it.
    ldr(result, MemOperand(ldr_location));
  }
3627 3628 3629

  // Get the offset into the constant pool.
  const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3630
  and_(result, result, Operand(kLdrOffsetMask));
3631 3632 3633

  bind(&load_result);
  // Get the address of the constant.
3634 3635 3636 3637 3638 3639
  if (FLAG_enable_ool_constant_pool) {
    add(result, pp, Operand(result));
  } else {
    add(result, ldr_location, Operand(result));
    add(result, result, Operand(Instruction::kPCReadOffset));
  }
3640 3641 3642
}


3643 3644 3645 3646 3647 3648
void MacroAssembler::CheckPageFlag(
    Register object,
    Register scratch,
    int mask,
    Condition cc,
    Label* condition_met) {
3649
  Bfc(scratch, object, 0, kPageSizeBits);
3650 3651 3652 3653 3654 3655
  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
  tst(scratch, Operand(mask));
  b(cc, condition_met);
}


3656 3657 3658 3659 3660 3661
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
                                        Register scratch,
                                        Label* if_deprecated) {
  if (map->CanBeDeprecated()) {
    mov(scratch, Operand(map));
    ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3662
    tst(scratch, Operand(Map::Deprecated::kMask));
3663 3664 3665 3666 3667
    b(ne, if_deprecated);
  }
}


3668 3669 3670 3671 3672
void MacroAssembler::JumpIfBlack(Register object,
                                 Register scratch0,
                                 Register scratch1,
                                 Label* on_black) {
  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
3673
  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3674 3675 3676 3677 3678 3679 3680 3681 3682
}


void MacroAssembler::HasColor(Register object,
                              Register bitmap_scratch,
                              Register mask_scratch,
                              Label* has_color,
                              int first_bit,
                              int second_bit) {
3683
  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715

  GetMarkBits(object, bitmap_scratch, mask_scratch);

  Label other_color, word_boundary;
  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
  tst(ip, Operand(mask_scratch));
  b(first_bit == 1 ? eq : ne, &other_color);
  // Shift left 1 by adding.
  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
  b(eq, &word_boundary);
  tst(ip, Operand(mask_scratch));
  b(second_bit == 1 ? ne : eq, has_color);
  jmp(&other_color);

  bind(&word_boundary);
  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
  tst(ip, Operand(1));
  b(second_bit == 1 ? ne : eq, has_color);
  bind(&other_color);
}


// Detect some, but not all, common pointer-free objects.  This is used by the
// incremental write barrier which doesn't care about oddballs (they are always
// marked black immediately so this code is not hit).
void MacroAssembler::JumpIfDataObject(Register value,
                                      Register scratch,
                                      Label* not_data_object) {
  Label is_data_object;
  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
  b(eq, &is_data_object);
3716 3717
  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729
  // If it's a string and it's not a cons string then it's an object containing
  // no GC pointers.
  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
  b(ne, not_data_object);
  bind(&is_data_object);
}


void MacroAssembler::GetMarkBits(Register addr_reg,
                                 Register bitmap_reg,
                                 Register mask_reg) {
3730
  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746
  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
  mov(ip, Operand(1));
  mov(mask_reg, Operand(ip, LSL, mask_reg));
}


void MacroAssembler::EnsureNotWhite(
    Register value,
    Register bitmap_scratch,
    Register mask_scratch,
    Register load_scratch,
    Label* value_is_white_and_not_data) {
3747
  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3748 3749 3750
  GetMarkBits(value, bitmap_scratch, mask_scratch);

  // If the value is black or grey we don't need to do anything.
3751 3752 3753 3754
  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3755 3756 3757 3758 3759 3760 3761 3762 3763

  Label done;

  // Since both black and grey have a 1 in the first position and white does
  // not have a 1 there we only need to check one bit.
  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
  tst(mask_scratch, load_scratch);
  b(ne, &done);

3764
  if (emit_debug_code()) {
3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786
    // Check for impossible bit pattern.
    Label ok;
    // LSL may overflow, making the check conservative.
    tst(load_scratch, Operand(mask_scratch, LSL, 1));
    b(eq, &ok);
    stop("Impossible marking bit pattern");
    bind(&ok);
  }

  // Value is white.  We check whether it is data that doesn't need scanning.
  // Currently only checks for HeapNumber and non-cons strings.
  Register map = load_scratch;  // Holds map while checking type.
  Register length = load_scratch;  // Holds length of object after testing type.
  Label is_data_object;

  // Check for heap-number
  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
  b(eq, &is_data_object);

  // Check for strings.
3787 3788
  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799
  // If it's a string and it's not a cons string then it's an object containing
  // no GC pointers.
  Register instance_type = load_scratch;
  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
  b(ne, value_is_white_and_not_data);
  // It's a non-indirect (non-cons and non-slice) string.
  // If it's external, the length is just ExternalString::kSize.
  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
  // External strings are the only ones with the kExternalStringTag bit
  // set.
3800 3801
  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3802 3803 3804 3805
  tst(instance_type, Operand(kExternalStringTag));
  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
  b(ne, &is_data_object);

3806 3807
  // Sequential string, either Latin1 or UC16.
  // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
3808 3809
  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
  // getting the length multiplied by 2.
3810 3811
  DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
  DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833
  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
  tst(instance_type, Operand(kStringEncodingMask));
  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
  and_(length, length, Operand(~kObjectAlignmentMask));

  bind(&is_data_object);
  // Value is a data object, and it is white.  Mark it black.  Since we know
  // that the object is white we can make it black by flipping one bit.
  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
  orr(ip, ip, Operand(mask_scratch));
  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));

  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
  add(ip, ip, Operand(length));
  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));

  bind(&done);
}


3834 3835 3836 3837 3838 3839
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
  Usat(output_reg, 8, Operand(input_reg));
}


void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3840
                                        DwVfpRegister input_reg,
3841
                                        LowDwVfpRegister double_scratch) {
3842 3843
  Label done;

3844
  // Handle inputs >= 255 (including +infinity).
3845
  Vmov(double_scratch, 255.0, result_reg);
3846
  mov(result_reg, Operand(255));
3847 3848 3849 3850 3851 3852
  VFPCompareAndSetFlags(input_reg, double_scratch);
  b(ge, &done);

  // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
  // rounding mode will provide the correct result.
  vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3853
  vmov(result_reg, double_scratch.low());
3854

3855 3856 3857 3858
  bind(&done);
}


3859
void MacroAssembler::LoadInstanceDescriptors(Register map,
3860
                                             Register descriptors) {
3861
  ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3862 3863 3864
}


3865
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3866
  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3867 3868 3869 3870
  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}


3871 3872 3873
void MacroAssembler::EnumLength(Register dst, Register map) {
  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3874 3875
  and_(dst, dst, Operand(Map::EnumLengthBits::kMask));
  SmiTag(dst);
3876 3877 3878
}


3879 3880 3881
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
  Register  empty_fixed_array_value = r6;
  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3882 3883
  Label next, start;
  mov(r2, r0);
3884

3885 3886 3887
  // Check if the enum length field is properly initialized, indicating that
  // there is an enum cache.
  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3888

3889
  EnumLength(r3, r1);
3890
  cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3891 3892
  b(eq, call_runtime);

3893 3894 3895 3896
  jmp(&start);

  bind(&next);
  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3897 3898

  // For all objects but the receiver, check that the cache is empty.
3899 3900 3901 3902 3903 3904 3905 3906
  EnumLength(r3, r1);
  cmp(r3, Operand(Smi::FromInt(0)));
  b(ne, call_runtime);

  bind(&start);

  // Check that there are no elements. Register r2 contains the current JS
  // object we've reached through the prototype chain.
3907
  Label no_elements;
3908 3909
  ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
  cmp(r2, empty_fixed_array_value);
3910 3911 3912 3913
  b(eq, &no_elements);

  // Second chance, the object may be using the empty slow element dictionary.
  CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex);
3914 3915
  b(ne, call_runtime);

3916
  bind(&no_elements);
3917 3918
  ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
  cmp(r2, null_value);
3919 3920 3921 3922
  b(ne, &next);
}


3923
void MacroAssembler::TestJSArrayForAllocationMemento(
3924
    Register receiver_reg,
3925 3926
    Register scratch_reg,
    Label* no_memento_found) {
3927 3928 3929 3930
  ExternalReference new_space_start =
      ExternalReference::new_space_start(isolate());
  ExternalReference new_space_allocation_top =
      ExternalReference::new_space_allocation_top_address(isolate());
3931
  add(scratch_reg, receiver_reg,
3932
      Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3933
  cmp(scratch_reg, Operand(new_space_start));
3934
  b(lt, no_memento_found);
3935 3936 3937
  mov(ip, Operand(new_space_allocation_top));
  ldr(ip, MemOperand(ip));
  cmp(scratch_reg, ip);
3938
  b(gt, no_memento_found);
3939
  ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3940
  cmp(scratch_reg,
3941
      Operand(isolate()->factory()->allocation_memento_map()));
3942 3943 3944
}


3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968
Register GetRegisterThatIsNotOneOf(Register reg1,
                                   Register reg2,
                                   Register reg3,
                                   Register reg4,
                                   Register reg5,
                                   Register reg6) {
  RegList regs = 0;
  if (reg1.is_valid()) regs |= reg1.bit();
  if (reg2.is_valid()) regs |= reg2.bit();
  if (reg3.is_valid()) regs |= reg3.bit();
  if (reg4.is_valid()) regs |= reg4.bit();
  if (reg5.is_valid()) regs |= reg5.bit();
  if (reg6.is_valid()) regs |= reg6.bit();

  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
    Register candidate = Register::FromAllocationIndex(i);
    if (regs & candidate.bit()) continue;
    return candidate;
  }
  UNREACHABLE();
  return no_reg;
}


3969 3970 3971 3972 3973
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
    Register object,
    Register scratch0,
    Register scratch1,
    Label* found) {
3974
  DCHECK(!scratch1.is(scratch0));
3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985
  Factory* factory = isolate()->factory();
  Register current = scratch0;
  Label loop_again;

  // scratch contained elements pointer.
  mov(current, object);

  // Loop based on the map going up the prototype chain.
  bind(&loop_again);
  ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
  ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
3986
  DecodeField<Map::ElementsKindBits>(scratch1);
3987 3988 3989 3990 3991 3992 3993 3994
  cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
  b(eq, found);
  ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
  cmp(current, Operand(factory->null_value()));
  b(ne, &loop_again);
}


3995 3996 3997 3998 3999 4000
#ifdef DEBUG
bool AreAliased(Register reg1,
                Register reg2,
                Register reg3,
                Register reg4,
                Register reg5,
4001 4002 4003
                Register reg6,
                Register reg7,
                Register reg8) {
4004
  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4005 4006
      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
      reg7.is_valid() + reg8.is_valid();
4007 4008 4009 4010 4011 4012 4013 4014

  RegList regs = 0;
  if (reg1.is_valid()) regs |= reg1.bit();
  if (reg2.is_valid()) regs |= reg2.bit();
  if (reg3.is_valid()) regs |= reg3.bit();
  if (reg4.is_valid()) regs |= reg4.bit();
  if (reg5.is_valid()) regs |= reg5.bit();
  if (reg6.is_valid()) regs |= reg6.bit();
4015 4016
  if (reg7.is_valid()) regs |= reg7.bit();
  if (reg8.is_valid()) regs |= reg8.bit();
4017 4018 4019
  int n_of_non_aliasing_regs = NumRegs(regs);

  return n_of_valid_regs != n_of_non_aliasing_regs;
4020
}
4021
#endif
4022 4023


4024 4025 4026
CodePatcher::CodePatcher(byte* address,
                         int instructions,
                         FlushICache flush_cache)
4027 4028
    : address_(address),
      size_(instructions * Assembler::kInstrSize),
4029 4030
      masm_(NULL, address, size_ + Assembler::kGap),
      flush_cache_(flush_cache) {
4031 4032 4033
  // Create a new macro assembler pointing to the address of the code to patch.
  // The size is adjusted with kGap on order for the assembler to generate size
  // bytes of instructions without failing with buffer size constraints.
4034
  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4035 4036 4037 4038 4039
}


CodePatcher::~CodePatcher() {
  // Indicate that code has changed.
4040
  if (flush_cache_ == FLUSH) {
4041
    CpuFeatures::FlushICache(address_, size_);
4042
  }
4043 4044

  // Check that the code was patched as expected.
4045 4046
  DCHECK(masm_.pc_ == address_ + size_);
  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4047 4048 4049
}


4050 4051
void CodePatcher::Emit(Instr instr) {
  masm()->emit(instr);
4052 4053 4054 4055 4056 4057
}


void CodePatcher::Emit(Address addr) {
  masm()->emit(reinterpret_cast<Instr>(addr));
}
4058 4059 4060 4061 4062 4063 4064


void CodePatcher::EmitCondition(Condition cond) {
  Instr instr = Assembler::instr_at(masm_.pc_);
  instr = (instr & ~kCondMask) | cond;
  masm_.emit(instr);
}
4065 4066


4067 4068 4069
void MacroAssembler::TruncatingDiv(Register result,
                                   Register dividend,
                                   int32_t divisor) {
4070 4071 4072
  DCHECK(!dividend.is(result));
  DCHECK(!dividend.is(ip));
  DCHECK(!result.is(ip));
4073
  base::MagicNumbersForDivision<uint32_t> mag =
4074
      base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
4075
  mov(ip, Operand(mag.multiplier));
4076
  bool neg = (mag.multiplier & (1U << 31)) != 0;
4077
  if (divisor > 0 && neg) {
4078 4079 4080 4081 4082 4083
    smmla(result, dividend, ip, dividend);
  } else {
    smmul(result, dividend, ip);
    if (divisor < 0 && !neg && mag.multiplier > 0) {
      sub(result, result, Operand(dividend));
    }
4084
  }
4085
  if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
4086
  add(result, result, Operand(dividend, LSR, 31));
4087 4088
}

4089 4090
}  // namespace internal
}  // namespace v8
4091 4092

#endif  // V8_TARGET_ARCH_ARM