codegen-arm.cc 27.3 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#include "src/arm/codegen-arm.h"

7
#if V8_TARGET_ARCH_ARM
8

9 10
#include <memory>

11
#include "src/arm/simulator-arm.h"
12 13
#include "src/codegen.h"
#include "src/macro-assembler.h"
14

15 16
namespace v8 {
namespace internal {
17

18

19 20
#define __ masm.

21
#if defined(V8_HOST_ARCH_ARM)
22 23
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
                                                MemCopyUint8Function stub) {
24 25 26 27
#if defined(USE_SIMULATOR)
  return stub;
#else
  size_t actual_size;
28 29
  byte* buffer =
      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
30
  if (buffer == nullptr) return stub;
31

32
  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
33
                      CodeObjectRequired::kNo);
34 35 36 37 38 39 40 41

  Register dest = r0;
  Register src = r1;
  Register chars = r2;
  Register temp1 = r3;
  Label less_4;

  if (CpuFeatures::IsSupported(NEON)) {
42
    CpuFeatureScope scope(&masm, NEON);
43 44 45 46 47 48 49 50
    Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
    Label size_less_than_8;
    __ pld(MemOperand(src, 0));

    __ cmp(chars, Operand(8));
    __ b(lt, &size_less_than_8);
    __ cmp(chars, Operand(32));
    __ b(lt, &less_32);
51
    if (CpuFeatures::dcache_line_size() == 32) {
52 53 54 55 56
      __ pld(MemOperand(src, 32));
    }
    __ cmp(chars, Operand(64));
    __ b(lt, &less_64);
    __ pld(MemOperand(src, 64));
57
    if (CpuFeatures::dcache_line_size() == 32) {
58 59 60 61 62
      __ pld(MemOperand(src, 96));
    }
    __ cmp(chars, Operand(128));
    __ b(lt, &less_128);
    __ pld(MemOperand(src, 128));
63
    if (CpuFeatures::dcache_line_size() == 32) {
64 65 66
      __ pld(MemOperand(src, 160));
    }
    __ pld(MemOperand(src, 192));
67
    if (CpuFeatures::dcache_line_size() == 32) {
68 69 70 71 72 73 74 75 76
      __ pld(MemOperand(src, 224));
    }
    __ cmp(chars, Operand(256));
    __ b(lt, &less_256);
    __ sub(chars, chars, Operand(256));

    __ bind(&loop);
    __ pld(MemOperand(src, 256));
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
77
    if (CpuFeatures::dcache_line_size() == 32) {
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
      __ pld(MemOperand(src, 256));
    }
    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
    __ sub(chars, chars, Operand(64), SetCC);
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
    __ b(ge, &loop);
    __ add(chars, chars, Operand(256));

    __ bind(&less_256);
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
    __ sub(chars, chars, Operand(128));
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
    __ cmp(chars, Operand(64));
    __ b(lt, &less_64);

    __ bind(&less_128);
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
    __ sub(chars, chars, Operand(64));
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));

    __ bind(&less_64);
    __ cmp(chars, Operand(32));
    __ b(lt, &less_32);
    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
    __ sub(chars, chars, Operand(32));

    __ bind(&less_32);
    __ cmp(chars, Operand(16));
    __ b(le, &_16_or_less);
    __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
    __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
    __ sub(chars, chars, Operand(16));

    __ bind(&_16_or_less);
    __ cmp(chars, Operand(8));
    __ b(le, &_8_or_less);
    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
    __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
    __ sub(chars, chars, Operand(8));

    // Do a last copy which may overlap with the previous copy (up to 8 bytes).
    __ bind(&_8_or_less);
    __ rsb(chars, chars, Operand(8));
    __ sub(src, src, Operand(chars));
    __ sub(dest, dest, Operand(chars));
    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
    __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));

    __ Ret();

    __ bind(&size_less_than_8);

    __ bic(temp1, chars, Operand(0x3), SetCC);
    __ b(&less_4, eq);
    __ ldr(temp1, MemOperand(src, 4, PostIndex));
    __ str(temp1, MemOperand(dest, 4, PostIndex));
  } else {
    Register temp2 = ip;
    Label loop;

    __ bic(temp2, chars, Operand(0x3), SetCC);
    __ b(&less_4, eq);
    __ add(temp2, dest, temp2);

    __ bind(&loop);
    __ ldr(temp1, MemOperand(src, 4, PostIndex));
    __ str(temp1, MemOperand(dest, 4, PostIndex));
    __ cmp(dest, temp2);
    __ b(&loop, ne);
  }

  __ bind(&less_4);
  __ mov(chars, Operand(chars, LSL, 31), SetCC);
  // bit0 => Z (ne), bit1 => C (cs)
  __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
  __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
  __ ldrb(temp1, MemOperand(src), ne);
  __ strb(temp1, MemOperand(dest), ne);
  __ Ret();

  CodeDesc desc;
  masm.GetCode(&desc);
170
  DCHECK(!RelocInfo::RequiresRelocation(desc));
171

172
  Assembler::FlushICache(isolate, buffer, actual_size);
173
  base::OS::ProtectCode(buffer, actual_size);
174
  return FUNCTION_CAST<MemCopyUint8Function>(buffer);
175 176 177 178 179
#endif
}


// Convert 8 to 16. The number of character to copy must be at least 8.
180
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
181
    Isolate* isolate, MemCopyUint16Uint8Function stub) {
182 183 184 185
#if defined(USE_SIMULATOR)
  return stub;
#else
  size_t actual_size;
186 187
  byte* buffer =
      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
188
  if (buffer == nullptr) return stub;
189

190
  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
191
                      CodeObjectRequired::kNo);
192 193 194 195 196

  Register dest = r0;
  Register src = r1;
  Register chars = r2;
  if (CpuFeatures::IsSupported(NEON)) {
197
    CpuFeatureScope scope(&masm, NEON);
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
    Register temp = r3;
    Label loop;

    __ bic(temp, chars, Operand(0x7));
    __ sub(chars, chars, Operand(temp));
    __ add(temp, dest, Operand(temp, LSL, 1));

    __ bind(&loop);
    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
    __ vmovl(NeonU8, q0, d0);
    __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
    __ cmp(dest, temp);
    __ b(&loop, ne);

    // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
    __ rsb(chars, chars, Operand(8));
    __ sub(src, src, Operand(chars));
    __ sub(dest, dest, Operand(chars, LSL, 1));
    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
    __ vmovl(NeonU8, q0, d0);
    __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
    __ Ret();
  } else {
    Register temp1 = r3;
    Register temp2 = ip;
    Register temp3 = lr;
    Register temp4 = r4;
    Label loop;
    Label not_two;

    __ Push(lr, r4);
    __ bic(temp2, chars, Operand(0x3));
    __ add(temp2, dest, Operand(temp2, LSL, 1));

    __ bind(&loop);
    __ ldr(temp1, MemOperand(src, 4, PostIndex));
234 235
    __ uxtb16(temp3, temp1);
    __ uxtb16(temp4, temp1, 8);
236 237 238 239 240 241 242 243 244 245 246
    __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
    __ str(temp1, MemOperand(dest));
    __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
    __ str(temp1, MemOperand(dest, 4));
    __ add(dest, dest, Operand(8));
    __ cmp(dest, temp2);
    __ b(&loop, ne);

    __ mov(chars, Operand(chars, LSL, 31), SetCC);  // bit0 => ne, bit1 => cs
    __ b(&not_two, cc);
    __ ldrh(temp1, MemOperand(src, 2, PostIndex));
247
    __ uxtb(temp3, temp1, 8);
248
    __ mov(temp3, Operand(temp3, LSL, 16));
249
    __ uxtab(temp3, temp3, temp1);
250 251 252 253 254 255 256 257 258 259
    __ str(temp3, MemOperand(dest, 4, PostIndex));
    __ bind(&not_two);
    __ ldrb(temp1, MemOperand(src), ne);
    __ strh(temp1, MemOperand(dest), ne);
    __ Pop(pc, r4);
  }

  CodeDesc desc;
  masm.GetCode(&desc);

260
  Assembler::FlushICache(isolate, buffer, actual_size);
261
  base::OS::ProtectCode(buffer, actual_size);
262

263
  return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
264 265 266
#endif
}
#endif
267

268
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
269
#if defined(USE_SIMULATOR)
270
  return nullptr;
271 272
#else
  size_t actual_size;
273 274
  byte* buffer =
      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
275
  if (buffer == nullptr) return nullptr;
276

277
  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
278
                      CodeObjectRequired::kNo);
279

280
  __ MovFromFloatParameter(d0);
281
  __ vsqrt(d0, d0);
282
  __ MovToFloatResult(d0);
283 284 285 286
  __ Ret();

  CodeDesc desc;
  masm.GetCode(&desc);
287
  DCHECK(!RelocInfo::RequiresRelocation(desc));
288

289
  Assembler::FlushICache(isolate, buffer, actual_size);
290
  base::OS::ProtectCode(buffer, actual_size);
291
  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
292
#endif
293 294
}

295 296
#undef __

297

298 299 300
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

301
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
302
  masm->EnterFrame(StackFrame::INTERNAL);
303
  DCHECK(!masm->has_frame());
304
  masm->set_has_frame(true);
305 306 307
}


308
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
309
  masm->LeaveFrame(StackFrame::INTERNAL);
310
  DCHECK(masm->has_frame());
311
  masm->set_has_frame(false);
312 313 314
}


315 316 317
// -------------------------------------------------------------------------
// Code generators

318 319
#define __ ACCESS_MASM(masm)

320
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
321 322 323 324 325 326
    MacroAssembler* masm,
    Register receiver,
    Register key,
    Register value,
    Register target_map,
    AllocationSiteMode mode,
327
    Label* allocation_memento_found) {
328
  Register scratch_elements = r4;
329
  DCHECK(!AreAliased(receiver, key, value, target_map,
330 331
                     scratch_elements));

332
  if (mode == TRACK_ALLOCATION_SITE) {
333
    DCHECK(allocation_memento_found != NULL);
334 335
    __ JumpIfJSArrayHasAllocationMemento(
        receiver, scratch_elements, allocation_memento_found);
336 337
  }

338
  // Set transitioned map.
339 340
  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
  __ RecordWriteField(receiver,
341
                      HeapObject::kMapOffset,
342
                      target_map,
343 344 345 346 347 348 349 350
                      r9,
                      kLRHasNotBeenSaved,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
}


351
void ElementsTransitionGenerator::GenerateSmiToDouble(
352 353 354 355 356 357 358 359
    MacroAssembler* masm,
    Register receiver,
    Register key,
    Register value,
    Register target_map,
    AllocationSiteMode mode,
    Label* fail) {
  // Register lr contains the return address.
360
  Label loop, entry, convert_hole, gc_required, only_change_map, done;
361 362 363 364 365 366 367 368 369 370
  Register elements = r4;
  Register length = r5;
  Register array = r6;
  Register array_end = array;

  // target_map parameter can be clobbered.
  Register scratch1 = target_map;
  Register scratch2 = r9;

  // Verify input registers don't conflict with locals.
371
  DCHECK(!AreAliased(receiver, key, value, target_map,
372
                     elements, length, array, scratch2));
373

374
  if (mode == TRACK_ALLOCATION_SITE) {
375
    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
376 377
  }

378 379
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
380 381
  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
382 383 384
  __ b(eq, &only_change_map);

  __ push(lr);
385 386
  __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
  // length: number of elements (smi-tagged)
387 388

  // Allocate new FixedDoubleArray.
389
  // Use lr as a temporary register.
390
  __ mov(lr, Operand(length, LSL, 2));
391
  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
392
  __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
393
  __ sub(array, array, Operand(kHeapObjectTag));
394 395
  // array: destination FixedDoubleArray, not tagged as heap object.
  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
396
  // r4: source FixedArray.
397

398
  // Set destination FixedDoubleArray's length and map.
399 400
  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
  __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
401
  // Update receiver's map.
402
  __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
403

404 405
  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
  __ RecordWriteField(receiver,
406
                      HeapObject::kMapOffset,
407 408
                      target_map,
                      scratch2,
409 410
                      kLRHasBeenSaved,
                      kDontSaveFPRegs,
411
                      OMIT_REMEMBERED_SET,
412 413
                      OMIT_SMI_CHECK);
  // Replace receiver's backing store with newly created FixedDoubleArray.
414 415 416
  __ add(scratch1, array, Operand(kHeapObjectTag));
  __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
  __ RecordWriteField(receiver,
417
                      JSObject::kElementsOffset,
418 419
                      scratch1,
                      scratch2,
420 421 422 423 424 425
                      kLRHasBeenSaved,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);

  // Prepare for conversion loop.
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
  __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
  __ add(array_end, scratch2, Operand(length, LSL, 2));

  // Repurpose registers no longer in use.
  Register hole_lower = elements;
  Register hole_upper = length;

  __ mov(hole_lower, Operand(kHoleNanLower32));
  __ mov(hole_upper, Operand(kHoleNanUpper32));
  // scratch1: begin of source FixedArray element fields, not tagged
  // hole_lower: kHoleNanLower32
  // hole_upper: kHoleNanUpper32
  // array_end: end of destination FixedDoubleArray, not tagged
  // scratch2: begin of FixedDoubleArray element fields, not tagged
441 442 443

  __ b(&entry);

444
  __ bind(&only_change_map);
445 446
  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
  __ RecordWriteField(receiver,
447
                      HeapObject::kMapOffset,
448 449
                      target_map,
                      scratch2,
450
                      kLRHasNotBeenSaved,
451 452 453 454 455
                      kDontSaveFPRegs,
                      OMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ b(&done);

456 457 458 459 460 461 462
  // Call into runtime if GC is required.
  __ bind(&gc_required);
  __ pop(lr);
  __ b(fail);

  // Convert and copy elements.
  __ bind(&loop);
463
  __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
464 465
  // lr: current element
  __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
466 467

  // Normal smi, convert to double and store.
468
  __ vmov(s0, lr);
469
  __ vcvt_f64_s32(d0, s0);
470 471
  __ vstr(d0, scratch2, 0);
  __ add(scratch2, scratch2, Operand(8));
472 473 474 475
  __ b(&entry);

  // Hole found, store the-hole NaN.
  __ bind(&convert_hole);
476
  if (FLAG_debug_code) {
477
    // Restore a "smi-untagged" heap object.
478 479 480
    __ SmiTag(lr);
    __ orr(lr, lr, Operand(1));
    __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
481
    __ Assert(eq, kObjectFoundInSmiOnlyArray);
482
  }
483
  __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
484 485

  __ bind(&entry);
486
  __ cmp(scratch2, array_end);
487 488 489
  __ b(lt, &loop);

  __ pop(lr);
490
  __ bind(&done);
491 492 493 494
}


void ElementsTransitionGenerator::GenerateDoubleToObject(
495 496 497 498 499 500 501 502
    MacroAssembler* masm,
    Register receiver,
    Register key,
    Register value,
    Register target_map,
    AllocationSiteMode mode,
    Label* fail) {
  // Register lr contains the return address.
503
  Label entry, loop, convert_hole, gc_required, only_change_map;
504 505 506 507 508 509
  Register elements = r4;
  Register array = r6;
  Register length = r5;
  Register scratch = r9;

  // Verify input registers don't conflict with locals.
510
  DCHECK(!AreAliased(receiver, key, value, target_map,
511
                     elements, array, length, scratch));
512

513
  if (mode == TRACK_ALLOCATION_SITE) {
514
    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
515 516
  }

517 518
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
519 520
  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
521 522 523
  __ b(eq, &only_change_map);

  __ push(lr);
524 525 526 527
  __ Push(target_map, receiver, key, value);
  __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
  // elements: source FixedDoubleArray
  // length: number of elements (smi-tagged)
528 529

  // Allocate new FixedArray.
530 531 532 533 534 535 536 537
  // Re-use value and target_map registers, as they have been saved on the
  // stack.
  Register array_size = value;
  Register allocate_scratch = target_map;
  __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
  __ add(array_size, array_size, Operand(length, LSL, 1));
  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
              NO_ALLOCATION_FLAGS);
538
  // array: destination FixedArray, tagged as heap object
539
  // Set destination FixedDoubleArray's length and map.
540
  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
541 542 543 544
  __ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
  __ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));

  __ sub(array, array, Operand(kHeapObjectTag));
545 546

  // Prepare for conversion loop.
547 548 549 550 551 552 553 554
  Register src_elements = elements;
  Register dst_elements = target_map;
  Register dst_end = length;
  Register heap_number_map = scratch;
  __ add(src_elements, elements,
         Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
  __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
  __ add(dst_end, dst_elements, Operand(length, LSL, 1));
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569

  // Allocating heap numbers in the loop below can fail and cause a jump to
  // gc_required. We can't leave a partly initialized FixedArray behind,
  // so pessimistically fill it with holes now.
  Label initialization_loop, initialization_loop_entry;
  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
  __ b(&initialization_loop_entry);
  __ bind(&initialization_loop);
  __ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
  __ bind(&initialization_loop_entry);
  __ cmp(dst_elements, dst_end);
  __ b(lt, &initialization_loop);

  __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
  __ add(array, array, Operand(kHeapObjectTag));
570 571 572 573 574 575 576 577 578
  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
  // Using offsetted addresses in src_elements to fully take advantage of
  // post-indexing.
  // dst_elements: begin of destination FixedArray element fields, not tagged
  // src_elements: begin of source FixedDoubleArray element fields,
  //               not tagged, +4
  // dst_end: end of destination FixedArray, not tagged
  // array: destination FixedArray
  // heap_number_map: heap number map
579 580 581 582
  __ b(&entry);

  // Call into runtime if GC is required.
  __ bind(&gc_required);
583
  __ Pop(target_map, receiver, key, value);
584 585 586 587
  __ pop(lr);
  __ b(fail);

  __ bind(&loop);
588 589 590 591 592
  Register upper_bits = key;
  __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
  // upper_bits: current element's upper 32 bit
  // src_elements: address of next element's upper 32 bit
  __ cmp(upper_bits, Operand(kHoleNanUpper32));
593 594 595
  __ b(eq, &convert_hole);

  // Non-hole double, copy value into a heap number.
596 597 598 599 600 601 602 603 604 605 606 607 608
  Register heap_number = receiver;
  Register scratch2 = value;
  __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
                        &gc_required);
  // heap_number: new heap number
  __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
  __ Strd(scratch2, upper_bits,
          FieldMemOperand(heap_number, HeapNumber::kValueOffset));
  __ mov(scratch2, dst_elements);
  __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
  __ RecordWrite(array,
                 scratch2,
                 heap_number,
609 610 611 612 613 614 615 616
                 kLRHasBeenSaved,
                 kDontSaveFPRegs,
                 EMIT_REMEMBERED_SET,
                 OMIT_SMI_CHECK);
  __ b(&entry);

  // Replace the-hole NaN with the-hole pointer.
  __ bind(&convert_hole);
617 618
  __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
  __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
619 620

  __ bind(&entry);
621
  __ cmp(dst_elements, dst_end);
622 623
  __ b(lt, &loop);

624
  __ Pop(target_map, receiver, key, value);
625
  // Replace receiver's backing store with newly created and filled FixedArray.
626 627
  __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
  __ RecordWriteField(receiver,
628
                      JSObject::kElementsOffset,
629 630
                      array,
                      scratch,
631 632 633 634 635
                      kLRHasBeenSaved,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ pop(lr);
636 637 638

  __ bind(&only_change_map);
  // Update receiver's map.
639 640
  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
  __ RecordWriteField(receiver,
641
                      HeapObject::kMapOffset,
642 643
                      target_map,
                      scratch,
644 645 646 647
                      kLRHasNotBeenSaved,
                      kDontSaveFPRegs,
                      OMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
648 649
}

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673

void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                       Register string,
                                       Register index,
                                       Register result,
                                       Label* call_runtime) {
  // Fetch the instance type of the receiver into result register.
  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));

  // We need special handling for indirect strings.
  Label check_sequential;
  __ tst(result, Operand(kIsIndirectStringMask));
  __ b(eq, &check_sequential);

  // Dispatch on the indirect string shape: slice or cons.
  Label cons_string;
  __ tst(result, Operand(kSlicedNotConsMask));
  __ b(eq, &cons_string);

  // Handle slices.
  Label indirect_string_loaded;
  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
674
  __ add(index, index, Operand::SmiUntag(result));
675 676 677 678 679 680 681 682 683
  __ jmp(&indirect_string_loaded);

  // Handle cons strings.
  // Check whether the right hand side is the empty string (i.e. if
  // this is really a flat string in a cons string). If that is not
  // the case we would rather go to the runtime system now to flatten
  // the string.
  __ bind(&cons_string);
  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
684
  __ CompareRoot(result, Heap::kempty_stringRootIndex);
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
  __ b(ne, call_runtime);
  // Get the first of the two strings and load its instance type.
  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));

  __ bind(&indirect_string_loaded);
  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));

  // Distinguish sequential and external strings. Only these two string
  // representations can reach here (slices and flat cons strings have been
  // reduced to the underlying sequential or external string).
  Label external_string, check_encoding;
  __ bind(&check_sequential);
  STATIC_ASSERT(kSeqStringTag == 0);
  __ tst(result, Operand(kStringRepresentationMask));
  __ b(ne, &external_string);

  // Prepare sequential strings
703
  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
704 705 706 707 708 709 710 711 712 713 714
  __ add(string,
         string,
         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
  __ jmp(&check_encoding);

  // Handle external strings.
  __ bind(&external_string);
  if (FLAG_debug_code) {
    // Assert that we do not have a cons or slice (indirect strings) here.
    // Sequential strings have already been ruled out.
    __ tst(result, Operand(kIsIndirectStringMask));
715
    __ Assert(eq, kExternalStringExpectedButNotFound);
716 717
  }
  // Rule out short external strings.
718
  STATIC_ASSERT(kShortExternalStringTag != 0);
719 720 721 722
  __ tst(result, Operand(kShortExternalStringMask));
  __ b(ne, call_runtime);
  __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));

723
  Label one_byte, done;
724 725 726
  __ bind(&check_encoding);
  STATIC_ASSERT(kTwoByteStringTag == 0);
  __ tst(result, Operand(kStringEncodingMask));
727
  __ b(ne, &one_byte);
728 729 730
  // Two-byte string.
  __ ldrh(result, MemOperand(string, index, LSL, 1));
  __ jmp(&done);
731 732
  __ bind(&one_byte);
  // One-byte string.
733 734 735 736
  __ ldrb(result, MemOperand(string, index));
  __ bind(&done);
}

737 738
#undef __

739
#ifdef DEBUG
740 741
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
742
#endif
743

744 745
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
  USE(isolate);
746
  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
747 748 749 750
  // Since patcher is a large object, allocate it dynamically when needed,
  // to avoid overloading the stack in stress conditions.
  // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
  // the process, before ARM simulator ICache is setup.
751
  std::unique_ptr<CodePatcher> patcher(
752 753 754
      new CodePatcher(isolate, young_sequence_.start(),
                      young_sequence_.length() / Assembler::kInstrSize,
                      CodePatcher::DONT_FLUSH));
755
  PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
756
  patcher->masm()->PushStandardFrame(r1);
757 758 759 760 761 762 763
  patcher->masm()->nop(ip.code());
}


#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
  return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
764
}
765
#endif
766 767


768 769
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
  bool result = isolate->code_aging_helper()->IsYoung(sequence);
770
  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
771 772 773 774
  return result;
}


775
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
776
                               MarkingParity* parity) {
777
  if (IsYoungSequence(isolate, sequence)) {
778
    *age = kNoAgeCodeAge;
779 780 781
    *parity = NO_MARKING_PARITY;
  } else {
    Address target_address = Memory::Address_at(
782
        sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
783 784 785 786 787 788
    Code* stub = GetCodeFromTargetAddress(target_address);
    GetCodeAgeAndParity(stub, age, parity);
  }
}


789 790
void Code::PatchPlatformCodeAge(Isolate* isolate,
                                byte* sequence,
791 792
                                Code::Age age,
                                MarkingParity parity) {
793
  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
794
  if (age == kNoAgeCodeAge) {
795
    isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
796
    Assembler::FlushICache(isolate, sequence, young_length);
797
  } else {
798
    Code* stub = GetCodeAgeStub(isolate, age, parity);
799 800
    CodePatcher patcher(isolate, sequence,
                        young_length / Assembler::kInstrSize);
801 802
    patcher.masm()->add(r0, pc, Operand(-8));
    patcher.masm()->ldr(pc, MemOperand(pc, -4));
803
    patcher.masm()->emit_code_stub_address(stub);
804 805 806 807
  }
}


808 809
}  // namespace internal
}  // namespace v8
810 811

#endif  // V8_TARGET_ARCH_ARM