codegen-arm.cc 30.6 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "v8.h"

30
#if V8_TARGET_ARCH_ARM
31

32
#include "codegen.h"
33
#include "macro-assembler.h"
34
#include "simulator-arm.h"
35

36 37
namespace v8 {
namespace internal {
38

39

40 41 42 43 44 45
#define __ masm.


#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = NULL;
double fast_exp_simulator(double x) {
46
  return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
47 48 49 50 51 52
      fast_exp_arm_machine_code, x, 0);
}
#endif


UnaryMathFunction CreateExpFunction() {
53
  if (!FLAG_fast_math) return &std::exp;
54
  size_t actual_size;
55
  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
56
  if (buffer == NULL) return &std::exp;
57 58 59 60 61
  ExternalReference::InitializeMathExpData();

  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));

  {
62 63 64 65
    DwVfpRegister input = d0;
    DwVfpRegister result = d1;
    DwVfpRegister double_scratch1 = d2;
    DwVfpRegister double_scratch2 = d3;
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
    Register temp1 = r4;
    Register temp2 = r5;
    Register temp3 = r6;

    if (masm.use_eabi_hardfloat()) {
      // Input value is in d0 anyway, nothing to do.
    } else {
      __ vmov(input, r0, r1);
    }
    __ Push(temp3, temp2, temp1);
    MathExpGenerator::EmitMathExp(
        &masm, input, result, double_scratch1, double_scratch2,
        temp1, temp2, temp3);
    __ Pop(temp3, temp2, temp1);
    if (masm.use_eabi_hardfloat()) {
      __ vmov(d0, result);
    } else {
      __ vmov(r0, r1, result);
    }
    __ Ret();
  }

  CodeDesc desc;
  masm.GetCode(&desc);
90
  ASSERT(!RelocInfo::RequiresRelocation(desc));
91 92

  CPU::FlushICache(buffer, actual_size);
93
  OS::ProtectCode(buffer, actual_size);
94 95 96 97 98 99 100 101 102

#if !defined(USE_SIMULATOR)
  return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
  fast_exp_arm_machine_code = buffer;
  return &fast_exp_simulator;
#endif
}

103 104 105 106 107 108 109 110 111 112
#if defined(V8_HOST_ARCH_ARM)
OS::MemCopyUint8Function CreateMemCopyUint8Function(
      OS::MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
  return stub;
#else
  if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
    return stub;
  }
  size_t actual_size;
113
  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
  if (buffer == NULL) return stub;

  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));

  Register dest = r0;
  Register src = r1;
  Register chars = r2;
  Register temp1 = r3;
  Label less_4;

  if (CpuFeatures::IsSupported(NEON)) {
    Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
    Label size_less_than_8;
    __ pld(MemOperand(src, 0));

    __ cmp(chars, Operand(8));
    __ b(lt, &size_less_than_8);
    __ cmp(chars, Operand(32));
    __ b(lt, &less_32);
    if (CpuFeatures::cache_line_size() == 32) {
      __ pld(MemOperand(src, 32));
    }
    __ cmp(chars, Operand(64));
    __ b(lt, &less_64);
    __ pld(MemOperand(src, 64));
    if (CpuFeatures::cache_line_size() == 32) {
      __ pld(MemOperand(src, 96));
    }
    __ cmp(chars, Operand(128));
    __ b(lt, &less_128);
    __ pld(MemOperand(src, 128));
    if (CpuFeatures::cache_line_size() == 32) {
      __ pld(MemOperand(src, 160));
    }
    __ pld(MemOperand(src, 192));
    if (CpuFeatures::cache_line_size() == 32) {
      __ pld(MemOperand(src, 224));
    }
    __ cmp(chars, Operand(256));
    __ b(lt, &less_256);
    __ sub(chars, chars, Operand(256));

    __ bind(&loop);
    __ pld(MemOperand(src, 256));
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
    if (CpuFeatures::cache_line_size() == 32) {
      __ pld(MemOperand(src, 256));
    }
    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
    __ sub(chars, chars, Operand(64), SetCC);
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
    __ b(ge, &loop);
    __ add(chars, chars, Operand(256));

    __ bind(&less_256);
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
    __ sub(chars, chars, Operand(128));
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
    __ cmp(chars, Operand(64));
    __ b(lt, &less_64);

    __ bind(&less_128);
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
    __ sub(chars, chars, Operand(64));
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));

    __ bind(&less_64);
    __ cmp(chars, Operand(32));
    __ b(lt, &less_32);
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ sub(chars, chars, Operand(32));

    __ bind(&less_32);
    __ cmp(chars, Operand(16));
    __ b(le, &_16_or_less);
    __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
    __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
    __ sub(chars, chars, Operand(16));

    __ bind(&_16_or_less);
    __ cmp(chars, Operand(8));
    __ b(le, &_8_or_less);
    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
    __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
    __ sub(chars, chars, Operand(8));

    // Do a last copy which may overlap with the previous copy (up to 8 bytes).
    __ bind(&_8_or_less);
    __ rsb(chars, chars, Operand(8));
    __ sub(src, src, Operand(chars));
    __ sub(dest, dest, Operand(chars));
    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
    __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));

    __ Ret();

    __ bind(&size_less_than_8);

    __ bic(temp1, chars, Operand(0x3), SetCC);
    __ b(&less_4, eq);
    __ ldr(temp1, MemOperand(src, 4, PostIndex));
    __ str(temp1, MemOperand(dest, 4, PostIndex));
  } else {
    Register temp2 = ip;
    Label loop;

    __ bic(temp2, chars, Operand(0x3), SetCC);
    __ b(&less_4, eq);
    __ add(temp2, dest, temp2);

    __ bind(&loop);
    __ ldr(temp1, MemOperand(src, 4, PostIndex));
    __ str(temp1, MemOperand(dest, 4, PostIndex));
    __ cmp(dest, temp2);
    __ b(&loop, ne);
  }

  __ bind(&less_4);
  __ mov(chars, Operand(chars, LSL, 31), SetCC);
  // bit0 => Z (ne), bit1 => C (cs)
  __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
  __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
  __ ldrb(temp1, MemOperand(src), ne);
  __ strb(temp1, MemOperand(dest), ne);
  __ Ret();

  CodeDesc desc;
  masm.GetCode(&desc);
  ASSERT(!RelocInfo::RequiresRelocation(desc));

  CPU::FlushICache(buffer, actual_size);
255
  OS::ProtectCode(buffer, actual_size);
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
  return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
#endif
}


// Convert 8 to 16. The number of character to copy must be at least 8.
OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
      OS::MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
  return stub;
#else
  if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
    return stub;
  }
  size_t actual_size;
271
  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
  if (buffer == NULL) return stub;

  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));

  Register dest = r0;
  Register src = r1;
  Register chars = r2;
  if (CpuFeatures::IsSupported(NEON)) {
    Register temp = r3;
    Label loop;

    __ bic(temp, chars, Operand(0x7));
    __ sub(chars, chars, Operand(temp));
    __ add(temp, dest, Operand(temp, LSL, 1));

    __ bind(&loop);
    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
    __ vmovl(NeonU8, q0, d0);
    __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
    __ cmp(dest, temp);
    __ b(&loop, ne);

    // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
    __ rsb(chars, chars, Operand(8));
    __ sub(src, src, Operand(chars));
    __ sub(dest, dest, Operand(chars, LSL, 1));
    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
    __ vmovl(NeonU8, q0, d0);
    __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
    __ Ret();
  } else {
    Register temp1 = r3;
    Register temp2 = ip;
    Register temp3 = lr;
    Register temp4 = r4;
    Label loop;
    Label not_two;

    __ Push(lr, r4);
    __ bic(temp2, chars, Operand(0x3));
    __ add(temp2, dest, Operand(temp2, LSL, 1));

    __ bind(&loop);
    __ ldr(temp1, MemOperand(src, 4, PostIndex));
    __ uxtb16(temp3, Operand(temp1, ROR, 0));
    __ uxtb16(temp4, Operand(temp1, ROR, 8));
    __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
    __ str(temp1, MemOperand(dest));
    __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
    __ str(temp1, MemOperand(dest, 4));
    __ add(dest, dest, Operand(8));
    __ cmp(dest, temp2);
    __ b(&loop, ne);

    __ mov(chars, Operand(chars, LSL, 31), SetCC);  // bit0 => ne, bit1 => cs
    __ b(&not_two, cc);
    __ ldrh(temp1, MemOperand(src, 2, PostIndex));
    __ uxtb(temp3, Operand(temp1, ROR, 8));
    __ mov(temp3, Operand(temp3, LSL, 16));
    __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
    __ str(temp3, MemOperand(dest, 4, PostIndex));
    __ bind(&not_two);
    __ ldrb(temp1, MemOperand(src), ne);
    __ strh(temp1, MemOperand(dest), ne);
    __ Pop(pc, r4);
  }

  CodeDesc desc;
  masm.GetCode(&desc);

  CPU::FlushICache(buffer, actual_size);
343
  OS::ProtectCode(buffer, actual_size);
344 345 346 347 348

  return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
#endif
}
#endif
349

350 351 352 353 354 355 356
UnaryMathFunction CreateSqrtFunction() {
#if defined(USE_SIMULATOR)
  return &std::sqrt;
#else
  size_t actual_size;
  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
  if (buffer == NULL) return &std::sqrt;
357

358
  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
359

360
  __ MovFromFloatParameter(d0);
361
  __ vsqrt(d0, d0);
362
  __ MovToFloatResult(d0);
363 364 365 366 367 368 369 370 371 372
  __ Ret();

  CodeDesc desc;
  masm.GetCode(&desc);
  ASSERT(!RelocInfo::RequiresRelocation(desc));

  CPU::FlushICache(buffer, actual_size);
  OS::ProtectCode(buffer, actual_size);
  return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
373 374
}

375 376
#undef __

377

378 379 380
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

381
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
382 383 384
  masm->EnterFrame(StackFrame::INTERNAL);
  ASSERT(!masm->has_frame());
  masm->set_has_frame(true);
385 386 387
}


388
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
389 390 391
  masm->LeaveFrame(StackFrame::INTERNAL);
  ASSERT(masm->has_frame());
  masm->set_has_frame(false);
392 393 394
}


395 396 397
// -------------------------------------------------------------------------
// Code generators

398 399
#define __ ACCESS_MASM(masm)

400
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
401
    MacroAssembler* masm, AllocationSiteMode mode,
402
    Label* allocation_memento_found) {
403 404 405 406 407 408 409 410
  // ----------- S t a t e -------------
  //  -- r0    : value
  //  -- r1    : key
  //  -- r2    : receiver
  //  -- lr    : return address
  //  -- r3    : target map, scratch for subsequent call
  //  -- r4    : scratch (elements)
  // -----------------------------------
411
  if (mode == TRACK_ALLOCATION_SITE) {
412
    ASSERT(allocation_memento_found != NULL);
413
    __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
414 415
  }

416 417 418 419 420 421 422 423 424 425 426 427 428
  // Set transitioned map.
  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
  __ RecordWriteField(r2,
                      HeapObject::kMapOffset,
                      r3,
                      r9,
                      kLRHasNotBeenSaved,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
}


429
void ElementsTransitionGenerator::GenerateSmiToDouble(
430
    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
431 432 433 434 435 436 437 438
  // ----------- S t a t e -------------
  //  -- r0    : value
  //  -- r1    : key
  //  -- r2    : receiver
  //  -- lr    : return address
  //  -- r3    : target map, scratch for subsequent call
  //  -- r4    : scratch (elements)
  // -----------------------------------
439
  Label loop, entry, convert_hole, gc_required, only_change_map, done;
440

441
  if (mode == TRACK_ALLOCATION_SITE) {
442
    __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
443 444
  }

445 446
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
447
  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
448 449 450 451
  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
  __ b(eq, &only_change_map);

  __ push(lr);
452 453 454 455
  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
  // r5: number of elements (smi-tagged)

  // Allocate new FixedDoubleArray.
456 457
  // Use lr as a temporary register.
  __ mov(lr, Operand(r5, LSL, 2));
458
  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
459
  __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
460
  // r6: destination FixedDoubleArray, not tagged as heap object.
461 462
  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
  // r4: source FixedArray.
463

464
  // Set destination FixedDoubleArray's length and map.
465 466 467
  __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
  // Update receiver's map.
468
  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
469 470 471 472 473 474 475 476

  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
  __ RecordWriteField(r2,
                      HeapObject::kMapOffset,
                      r3,
                      r9,
                      kLRHasBeenSaved,
                      kDontSaveFPRegs,
477
                      OMIT_REMEMBERED_SET,
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
                      OMIT_SMI_CHECK);
  // Replace receiver's backing store with newly created FixedDoubleArray.
  __ add(r3, r6, Operand(kHeapObjectTag));
  __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
  __ RecordWriteField(r2,
                      JSObject::kElementsOffset,
                      r3,
                      r9,
                      kLRHasBeenSaved,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);

  // Prepare for conversion loop.
  __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
493 494
  __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
  __ add(r6, r9, Operand(r5, LSL, 2));
495 496 497 498 499 500
  __ mov(r4, Operand(kHoleNanLower32));
  __ mov(r5, Operand(kHoleNanUpper32));
  // r3: begin of source FixedArray element fields, not tagged
  // r4: kHoleNanLower32
  // r5: kHoleNanUpper32
  // r6: end of destination FixedDoubleArray, not tagged
501
  // r9: begin of FixedDoubleArray element fields, not tagged
502 503 504

  __ b(&entry);

505 506 507 508 509 510
  __ bind(&only_change_map);
  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
  __ RecordWriteField(r2,
                      HeapObject::kMapOffset,
                      r3,
                      r9,
511
                      kLRHasNotBeenSaved,
512 513 514 515 516
                      kDontSaveFPRegs,
                      OMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ b(&done);

517 518 519 520 521 522 523
  // Call into runtime if GC is required.
  __ bind(&gc_required);
  __ pop(lr);
  __ b(fail);

  // Convert and copy elements.
  __ bind(&loop);
524 525 526
  __ ldr(lr, MemOperand(r3, 4, PostIndex));
  // lr: current element
  __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
527 528

  // Normal smi, convert to double and store.
529
  __ vmov(s0, lr);
530
  __ vcvt_f64_s32(d0, s0);
531 532
  __ vstr(d0, r9, 0);
  __ add(r9, r9, Operand(8));
533 534 535 536
  __ b(&entry);

  // Hole found, store the-hole NaN.
  __ bind(&convert_hole);
537
  if (FLAG_debug_code) {
538
    // Restore a "smi-untagged" heap object.
539 540 541
    __ SmiTag(lr);
    __ orr(lr, lr, Operand(1));
    __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
542
    __ Assert(eq, kObjectFoundInSmiOnlyArray);
543
  }
544
  __ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
545 546

  __ bind(&entry);
547
  __ cmp(r9, r6);
548 549 550
  __ b(lt, &loop);

  __ pop(lr);
551
  __ bind(&done);
552 553 554 555
}


void ElementsTransitionGenerator::GenerateDoubleToObject(
556
    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
557 558 559 560 561 562 563 564
  // ----------- S t a t e -------------
  //  -- r0    : value
  //  -- r1    : key
  //  -- r2    : receiver
  //  -- lr    : return address
  //  -- r3    : target map, scratch for subsequent call
  //  -- r4    : scratch (elements)
  // -----------------------------------
565
  Label entry, loop, convert_hole, gc_required, only_change_map;
566

567
  if (mode == TRACK_ALLOCATION_SITE) {
568
    __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
569 570
  }

571 572
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
573
  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
574 575 576 577
  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
  __ b(eq, &only_change_map);

  __ push(lr);
578
  __ Push(r3, r2, r1, r0);
579 580 581 582 583 584 585
  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
  // r4: source FixedDoubleArray
  // r5: number of elements (smi-tagged)

  // Allocate new FixedArray.
  __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
  __ add(r0, r0, Operand(r5, LSL, 1));
586
  __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
587
  // r6: destination FixedArray, not tagged as heap object
588
  // Set destination FixedDoubleArray's length and map.
589 590
  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
591
  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614

  // Prepare for conversion loop.
  __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
  __ add(r3, r6, Operand(FixedArray::kHeaderSize));
  __ add(r6, r6, Operand(kHeapObjectTag));
  __ add(r5, r3, Operand(r5, LSL, 1));
  __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
  // Using offsetted addresses in r4 to fully take advantage of post-indexing.
  // r3: begin of destination FixedArray element fields, not tagged
  // r4: begin of source FixedDoubleArray element fields, not tagged, +4
  // r5: end of destination FixedArray, not tagged
  // r6: destination FixedArray
  // r9: heap number map
  __ b(&entry);

  // Call into runtime if GC is required.
  __ bind(&gc_required);
  __ Pop(r3, r2, r1, r0);
  __ pop(lr);
  __ b(fail);

  __ bind(&loop);
  __ ldr(r1, MemOperand(r4, 8, PostIndex));
615
  // r1: current element's upper 32 bit
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
  // r4: address of next element's upper 32 bit
  __ cmp(r1, Operand(kHoleNanUpper32));
  __ b(eq, &convert_hole);

  // Non-hole double, copy value into a heap number.
  __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
  // r2: new heap number
  __ ldr(r0, MemOperand(r4, 12, NegOffset));
  __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
  __ mov(r0, r3);
  __ str(r2, MemOperand(r3, 4, PostIndex));
  __ RecordWrite(r6,
                 r0,
                 r2,
                 kLRHasBeenSaved,
                 kDontSaveFPRegs,
                 EMIT_REMEMBERED_SET,
                 OMIT_SMI_CHECK);
  __ b(&entry);

  // Replace the-hole NaN with the-hole pointer.
  __ bind(&convert_hole);
638 639
  __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
  __ str(r0, MemOperand(r3, 4, PostIndex));
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656

  __ bind(&entry);
  __ cmp(r3, r5);
  __ b(lt, &loop);

  __ Pop(r3, r2, r1, r0);
  // Replace receiver's backing store with newly created and filled FixedArray.
  __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
  __ RecordWriteField(r2,
                      JSObject::kElementsOffset,
                      r6,
                      r9,
                      kLRHasBeenSaved,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ pop(lr);
657 658 659 660 661 662 663 664 665 666 667 668

  __ bind(&only_change_map);
  // Update receiver's map.
  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
  __ RecordWriteField(r2,
                      HeapObject::kMapOffset,
                      r3,
                      r9,
                      kLRHasNotBeenSaved,
                      kDontSaveFPRegs,
                      OMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
669 670
}

671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694

void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                       Register string,
                                       Register index,
                                       Register result,
                                       Label* call_runtime) {
  // Fetch the instance type of the receiver into result register.
  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));

  // We need special handling for indirect strings.
  Label check_sequential;
  __ tst(result, Operand(kIsIndirectStringMask));
  __ b(eq, &check_sequential);

  // Dispatch on the indirect string shape: slice or cons.
  Label cons_string;
  __ tst(result, Operand(kSlicedNotConsMask));
  __ b(eq, &cons_string);

  // Handle slices.
  Label indirect_string_loaded;
  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
695
  __ add(index, index, Operand::SmiUntag(result));
696 697 698 699 700 701 702 703 704
  __ jmp(&indirect_string_loaded);

  // Handle cons strings.
  // Check whether the right hand side is the empty string (i.e. if
  // this is really a flat string in a cons string). If that is not
  // the case we would rather go to the runtime system now to flatten
  // the string.
  __ bind(&cons_string);
  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
705
  __ CompareRoot(result, Heap::kempty_stringRootIndex);
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
  __ b(ne, call_runtime);
  // Get the first of the two strings and load its instance type.
  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));

  __ bind(&indirect_string_loaded);
  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));

  // Distinguish sequential and external strings. Only these two string
  // representations can reach here (slices and flat cons strings have been
  // reduced to the underlying sequential or external string).
  Label external_string, check_encoding;
  __ bind(&check_sequential);
  STATIC_ASSERT(kSeqStringTag == 0);
  __ tst(result, Operand(kStringRepresentationMask));
  __ b(ne, &external_string);

  // Prepare sequential strings
724
  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
725 726 727 728 729 730 731 732 733 734 735
  __ add(string,
         string,
         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
  __ jmp(&check_encoding);

  // Handle external strings.
  __ bind(&external_string);
  if (FLAG_debug_code) {
    // Assert that we do not have a cons or slice (indirect strings) here.
    // Sequential strings have already been ruled out.
    __ tst(result, Operand(kIsIndirectStringMask));
736
    __ Assert(eq, kExternalStringExpectedButNotFound);
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
  }
  // Rule out short external strings.
  STATIC_CHECK(kShortExternalStringTag != 0);
  __ tst(result, Operand(kShortExternalStringMask));
  __ b(ne, call_runtime);
  __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));

  Label ascii, done;
  __ bind(&check_encoding);
  STATIC_ASSERT(kTwoByteStringTag == 0);
  __ tst(result, Operand(kStringEncodingMask));
  __ b(ne, &ascii);
  // Two-byte string.
  __ ldrh(result, MemOperand(string, index, LSL, 1));
  __ jmp(&done);
  __ bind(&ascii);
  // Ascii string.
  __ ldrb(result, MemOperand(string, index));
  __ bind(&done);
}

758 759 760 761 762 763 764

static MemOperand ExpConstant(int index, Register base) {
  return MemOperand(base, index * kDoubleSize);
}


void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
765 766 767 768
                                   DwVfpRegister input,
                                   DwVfpRegister result,
                                   DwVfpRegister double_scratch1,
                                   DwVfpRegister double_scratch2,
769 770 771 772 773 774 775 776 777 778 779 780 781 782
                                   Register temp1,
                                   Register temp2,
                                   Register temp3) {
  ASSERT(!input.is(result));
  ASSERT(!input.is(double_scratch1));
  ASSERT(!input.is(double_scratch2));
  ASSERT(!result.is(double_scratch1));
  ASSERT(!result.is(double_scratch2));
  ASSERT(!double_scratch1.is(double_scratch2));
  ASSERT(!temp1.is(temp2));
  ASSERT(!temp1.is(temp3));
  ASSERT(!temp2.is(temp3));
  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);

783
  Label zero, infinity, done;
784 785 786 787 788

  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));

  __ vldr(double_scratch1, ExpConstant(0, temp3));
  __ VFPCompareAndSetFlags(double_scratch1, input);
789 790
  __ b(ge, &zero);

791 792
  __ vldr(double_scratch2, ExpConstant(1, temp3));
  __ VFPCompareAndSetFlags(input, double_scratch2);
793 794
  __ b(ge, &infinity);

795 796 797 798
  __ vldr(double_scratch1, ExpConstant(3, temp3));
  __ vldr(result, ExpConstant(4, temp3));
  __ vmul(double_scratch1, double_scratch1, input);
  __ vadd(double_scratch1, double_scratch1, result);
799
  __ VmovLow(temp2, double_scratch1);
800 801 802 803 804 805
  __ vsub(double_scratch1, double_scratch1, result);
  __ vldr(result, ExpConstant(6, temp3));
  __ vldr(double_scratch2, ExpConstant(5, temp3));
  __ vmul(double_scratch1, double_scratch1, double_scratch2);
  __ vsub(double_scratch1, double_scratch1, input);
  __ vsub(result, result, double_scratch1);
806 807
  __ vmul(double_scratch2, double_scratch1, double_scratch1);
  __ vmul(result, result, double_scratch2);
808 809 810
  __ vldr(double_scratch2, ExpConstant(7, temp3));
  __ vmul(result, result, double_scratch2);
  __ vsub(result, result, double_scratch1);
811 812 813 814
  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
  ASSERT(*reinterpret_cast<double*>
         (ExternalReference::math_exp_constants(8).address()) == 1);
  __ vmov(double_scratch2, 1);
815
  __ vadd(result, result, double_scratch2);
816 817
  __ mov(temp1, Operand(temp2, LSR, 11));
  __ Ubfx(temp2, temp2, 0, 11);
818 819 820 821
  __ add(temp1, temp1, Operand(0x3ff));

  // Must not call ExpConstant() after overwriting temp3!
  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
  __ add(temp3, temp3, Operand(temp2, LSL, 3));
  __ ldm(ia, temp3, temp2.bit() | temp3.bit());
  // The first word is loaded is the lower number register.
  if (temp2.code() < temp3.code()) {
    __ orr(temp1, temp3, Operand(temp1, LSL, 20));
    __ vmov(double_scratch1, temp2, temp1);
  } else {
    __ orr(temp1, temp2, Operand(temp1, LSL, 20));
    __ vmov(double_scratch1, temp3, temp1);
  }
  __ vmul(result, result, double_scratch1);
  __ b(&done);

  __ bind(&zero);
  __ vmov(result, kDoubleRegZero);
  __ b(&done);

  __ bind(&infinity);
  __ vldr(result, ExpConstant(2, temp3));

842 843 844
  __ bind(&done);
}

845 846
#undef __

847
#ifdef DEBUG
848 849
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
850
#endif
851 852 853 854 855 856 857 858 859

static byte* GetNoCodeAgeSequence(uint32_t* length) {
  // The sequence of instructions that is patched out for aging code is the
  // following boilerplate stack-building prologue that is found in FUNCTIONS
  static bool initialized = false;
  static uint32_t sequence[kNoCodeAgeSequenceLength];
  byte* byte_sequence = reinterpret_cast<byte*>(sequence);
  *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
  if (!initialized) {
860 861 862 863 864 865 866 867 868
    // Since patcher is a large object, allocate it dynamically when needed,
    // to avoid overloading the stack in stress conditions.
    SmartPointer<CodePatcher>
        patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
    PredictableCodeSizeScope scope(patcher->masm(), *length);
    patcher->masm()->PushFixedFrame(r1);
    patcher->masm()->nop(ip.code());
    patcher->masm()->add(
        fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
    initialized = true;
  }
  return byte_sequence;
}


bool Code::IsYoungSequence(byte* sequence) {
  uint32_t young_length;
  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
  bool result = !memcmp(sequence, young_sequence, young_length);
  ASSERT(result ||
         Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
  return result;
}


void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
                               MarkingParity* parity) {
  if (IsYoungSequence(sequence)) {
888
    *age = kNoAgeCodeAge;
889 890 891 892 893 894 895 896 897 898
    *parity = NO_MARKING_PARITY;
  } else {
    Address target_address = Memory::Address_at(
        sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
    Code* stub = GetCodeFromTargetAddress(target_address);
    GetCodeAgeAndParity(stub, age, parity);
  }
}


899 900
void Code::PatchPlatformCodeAge(Isolate* isolate,
                                byte* sequence,
901 902 903 904
                                Code::Age age,
                                MarkingParity parity) {
  uint32_t young_length;
  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
905
  if (age == kNoAgeCodeAge) {
906
    CopyBytes(sequence, young_sequence, young_length);
907 908
    CPU::FlushICache(sequence, young_length);
  } else {
909
    Code* stub = GetCodeAgeStub(isolate, age, parity);
910 911 912
    CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
    patcher.masm()->add(r0, pc, Operand(-8));
    patcher.masm()->ldr(pc, MemOperand(pc, -4));
913
    patcher.masm()->emit_code_stub_address(stub);
914 915 916 917
  }
}


918
} }  // namespace v8::internal
919 920

#endif  // V8_TARGET_ARCH_ARM