codegen-ia32.cc 33.9 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#include "src/ia32/codegen-ia32.h"

7
#if V8_TARGET_ARCH_IA32
8

9
#include "src/codegen.h"
10
#include "src/heap/heap.h"
11
#include "src/macro-assembler.h"
12

13 14
namespace v8 {
namespace internal {
15

16 17 18 19

// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

20
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
21
  masm->EnterFrame(StackFrame::INTERNAL);
22
  DCHECK(!masm->has_frame());
23
  masm->set_has_frame(true);
24 25 26
}


27
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
28
  masm->LeaveFrame(StackFrame::INTERNAL);
29
  DCHECK(masm->has_frame());
30
  masm->set_has_frame(false);
31 32 33
}


34 35
#define __ masm.

36

37
UnaryMathFunction CreateExpFunction() {
38
  if (!FLAG_fast_math) return &std::exp;
39
  size_t actual_size;
40 41
  byte* buffer =
      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
42
  if (buffer == NULL) return &std::exp;
43 44 45 46 47 48 49 50
  ExternalReference::InitializeMathExpData();

  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
  // esp[1 * kPointerSize]: raw double input
  // esp[0 * kPointerSize]: return address
  {
    XMMRegister input = xmm1;
    XMMRegister result = xmm2;
51
    __ movsd(input, Operand(esp, 1 * kPointerSize));
52 53 54 55 56 57 58
    __ push(eax);
    __ push(ebx);

    MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);

    __ pop(ebx);
    __ pop(eax);
59
    __ movsd(Operand(esp, 1 * kPointerSize), result);
60 61 62 63 64 65
    __ fld_d(Operand(esp, 1 * kPointerSize));
    __ Ret();
  }

  CodeDesc desc;
  masm.GetCode(&desc);
66
  DCHECK(!RelocInfo::RequiresRelocation(desc));
67

68
  Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
69
  base::OS::ProtectCode(buffer, actual_size);
70 71 72 73
  return FUNCTION_CAST<UnaryMathFunction>(buffer);
}


74 75 76
UnaryMathFunction CreateSqrtFunction() {
  size_t actual_size;
  // Allocate buffer in executable space.
77 78
  byte* buffer =
      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
79
  if (buffer == NULL) return &std::sqrt;
80 81 82 83 84
  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
  // esp[1 * kPointerSize]: raw double input
  // esp[0 * kPointerSize]: return address
  // Move double input into registers.
  {
85
    __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
86
    __ sqrtsd(xmm0, xmm0);
87
    __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
88 89 90 91 92 93 94
    // Load result into floating point register as return value.
    __ fld_d(Operand(esp, 1 * kPointerSize));
    __ Ret();
  }

  CodeDesc desc;
  masm.GetCode(&desc);
95
  DCHECK(!RelocInfo::RequiresRelocation(desc));
96

97
  Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
98
  base::OS::ProtectCode(buffer, actual_size);
99
  return FUNCTION_CAST<UnaryMathFunction>(buffer);
100 101 102
}


103 104 105 106 107
// Helper functions for CreateMemMoveFunction.
#undef __
#define __ ACCESS_MASM(masm)

enum Direction { FORWARD, BACKWARD };
108
enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128

// Expects registers:
// esi - source, aligned if alignment == ALIGNED
// edi - destination, always aligned
// ecx - count (copy size in bytes)
// edx - loop count (number of 64 byte chunks)
void MemMoveEmitMainLoop(MacroAssembler* masm,
                         Label* move_last_15,
                         Direction direction,
                         Alignment alignment) {
  Register src = esi;
  Register dst = edi;
  Register count = ecx;
  Register loop_count = edx;
  Label loop, move_last_31, move_last_63;
  __ cmp(loop_count, 0);
  __ j(equal, &move_last_63);
  __ bind(&loop);
  // Main loop. Copy in 64 byte chunks.
  if (direction == BACKWARD) __ sub(src, Immediate(0x40));
129 130 131 132
  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
  __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
  __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
133 134 135 136 137 138 139 140 141 142 143 144 145 146
  if (direction == FORWARD) __ add(src, Immediate(0x40));
  if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
  __ movdqa(Operand(dst, 0x00), xmm0);
  __ movdqa(Operand(dst, 0x10), xmm1);
  __ movdqa(Operand(dst, 0x20), xmm2);
  __ movdqa(Operand(dst, 0x30), xmm3);
  if (direction == FORWARD) __ add(dst, Immediate(0x40));
  __ dec(loop_count);
  __ j(not_zero, &loop);
  // At most 63 bytes left to copy.
  __ bind(&move_last_63);
  __ test(count, Immediate(0x20));
  __ j(zero, &move_last_31);
  if (direction == BACKWARD) __ sub(src, Immediate(0x20));
147 148
  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
149 150 151 152 153 154 155 156 157 158
  if (direction == FORWARD) __ add(src, Immediate(0x20));
  if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
  __ movdqa(Operand(dst, 0x00), xmm0);
  __ movdqa(Operand(dst, 0x10), xmm1);
  if (direction == FORWARD) __ add(dst, Immediate(0x20));
  // At most 31 bytes left to copy.
  __ bind(&move_last_31);
  __ test(count, Immediate(0x10));
  __ j(zero, move_last_15);
  if (direction == BACKWARD) __ sub(src, Immediate(0x10));
159
  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
160 161 162 163 164 165 166 167 168 169 170
  if (direction == FORWARD) __ add(src, Immediate(0x10));
  if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
  __ movdqa(Operand(dst, 0), xmm0);
  if (direction == FORWARD) __ add(dst, Immediate(0x10));
}


void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
  __ pop(esi);
  __ pop(edi);
  __ ret(0);
171 172 173
}


174 175 176 177
#undef __
#define __ masm.


178 179 180 181 182 183 184 185 186 187 188
class LabelConverter {
 public:
  explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
  int32_t address(Label* l) const {
    return reinterpret_cast<int32_t>(buffer_) + l->pos();
  }
 private:
  byte* buffer_;
};


189
MemMoveFunction CreateMemMoveFunction() {
190 191
  size_t actual_size;
  // Allocate buffer in executable space.
192 193
  byte* buffer =
      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
194
  if (buffer == NULL) return NULL;
195
  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
196
  LabelConverter conv(buffer);
197

198
  // Generated code is put into a fixed, unmovable buffer, and not into
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
  // the V8 heap. We can't, and don't, refer to any relocatable addresses
  // (e.g. the JavaScript nan-object).

  // 32-bit C declaration function calls pass arguments on stack.

  // Stack layout:
  // esp[12]: Third argument, size.
  // esp[8]: Second argument, source pointer.
  // esp[4]: First argument, destination pointer.
  // esp[0]: return address

  const int kDestinationOffset = 1 * kPointerSize;
  const int kSourceOffset = 2 * kPointerSize;
  const int kSizeOffset = 3 * kPointerSize;

214 215 216 217 218 219 220 221 222 223
  // When copying up to this many bytes, use special "small" handlers.
  const size_t kSmallCopySize = 8;
  // When copying up to this many bytes, use special "medium" handlers.
  const size_t kMediumCopySize = 63;
  // When non-overlapping region of src and dst is less than this,
  // use a more careful implementation (slightly slower).
  const size_t kMinMoveDistance = 16;
  // Note that these values are dictated by the implementation below,
  // do not just change them and hope things will work!

224 225
  int stack_offset = 0;  // Update if we change the stack height.

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
  Label backward, backward_much_overlap;
  Label forward_much_overlap, small_size, medium_size, pop_and_return;
  __ push(edi);
  __ push(esi);
  stack_offset += 2 * kPointerSize;
  Register dst = edi;
  Register src = esi;
  Register count = ecx;
  Register loop_count = edx;
  __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
  __ mov(src, Operand(esp, stack_offset + kSourceOffset));
  __ mov(count, Operand(esp, stack_offset + kSizeOffset));

  __ cmp(dst, src);
  __ j(equal, &pop_and_return);

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
  __ prefetch(Operand(src, 0), 1);
  __ cmp(count, kSmallCopySize);
  __ j(below_equal, &small_size);
  __ cmp(count, kMediumCopySize);
  __ j(below_equal, &medium_size);
  __ cmp(dst, src);
  __ j(above, &backward);

  {
    // |dst| is a lower address than |src|. Copy front-to-back.
    Label unaligned_source, move_last_15, skip_last_move;
    __ mov(eax, src);
    __ sub(eax, dst);
    __ cmp(eax, kMinMoveDistance);
    __ j(below, &forward_much_overlap);
    // Copy first 16 bytes.
    __ movdqu(xmm0, Operand(src, 0));
    __ movdqu(Operand(dst, 0), xmm0);
    // Determine distance to alignment: 16 - (dst & 0xF).
    __ mov(edx, dst);
    __ and_(edx, 0xF);
    __ neg(edx);
    __ add(edx, Immediate(16));
    __ add(dst, edx);
    __ add(src, edx);
    __ sub(count, edx);
    // dst is now aligned. Main copy loop.
    __ mov(loop_count, count);
    __ shr(loop_count, 6);
    // Check if src is also aligned.
    __ test(src, Immediate(0xF));
    __ j(not_zero, &unaligned_source);
    // Copy loop for aligned source and destination.
    MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
    // At most 15 bytes to copy. Copy 16 bytes at end of string.
    __ bind(&move_last_15);
    __ and_(count, 0xF);
    __ j(zero, &skip_last_move, Label::kNear);
    __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
    __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
    __ bind(&skip_last_move);
    MemMoveEmitPopAndReturn(&masm);

    // Copy loop for unaligned source and aligned destination.
    __ bind(&unaligned_source);
    MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
    __ jmp(&move_last_15);

    // Less than kMinMoveDistance offset between dst and src.
    Label loop_until_aligned, last_15_much_overlap;
    __ bind(&loop_until_aligned);
    __ mov_b(eax, Operand(src, 0));
    __ inc(src);
    __ mov_b(Operand(dst, 0), eax);
    __ inc(dst);
    __ dec(count);
    __ bind(&forward_much_overlap);  // Entry point into this block.
    __ test(dst, Immediate(0xF));
    __ j(not_zero, &loop_until_aligned);
    // dst is now aligned, src can't be. Main copy loop.
    __ mov(loop_count, count);
    __ shr(loop_count, 6);
    MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
                        FORWARD, MOVE_UNALIGNED);
    __ bind(&last_15_much_overlap);
    __ and_(count, 0xF);
    __ j(zero, &pop_and_return);
309 310
    __ cmp(count, kSmallCopySize);
    __ j(below_equal, &small_size);
311 312
    __ jmp(&medium_size);
  }
313

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
  {
    // |dst| is a higher address than |src|. Copy backwards.
    Label unaligned_source, move_first_15, skip_last_move;
    __ bind(&backward);
    // |dst| and |src| always point to the end of what's left to copy.
    __ add(dst, count);
    __ add(src, count);
    __ mov(eax, dst);
    __ sub(eax, src);
    __ cmp(eax, kMinMoveDistance);
    __ j(below, &backward_much_overlap);
    // Copy last 16 bytes.
    __ movdqu(xmm0, Operand(src, -0x10));
    __ movdqu(Operand(dst, -0x10), xmm0);
    // Find distance to alignment: dst & 0xF
    __ mov(edx, dst);
    __ and_(edx, 0xF);
    __ sub(dst, edx);
    __ sub(src, edx);
    __ sub(count, edx);
    // dst is now aligned. Main copy loop.
    __ mov(loop_count, count);
    __ shr(loop_count, 6);
    // Check if src is also aligned.
    __ test(src, Immediate(0xF));
    __ j(not_zero, &unaligned_source);
    // Copy loop for aligned source and destination.
    MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
    // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
    __ bind(&move_first_15);
    __ and_(count, 0xF);
    __ j(zero, &skip_last_move, Label::kNear);
    __ sub(src, count);
    __ sub(dst, count);
    __ movdqu(xmm0, Operand(src, 0));
    __ movdqu(Operand(dst, 0), xmm0);
    __ bind(&skip_last_move);
    MemMoveEmitPopAndReturn(&masm);

    // Copy loop for unaligned source and aligned destination.
    __ bind(&unaligned_source);
    MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
    __ jmp(&move_first_15);

    // Less than kMinMoveDistance offset between dst and src.
    Label loop_until_aligned, first_15_much_overlap;
    __ bind(&loop_until_aligned);
    __ dec(src);
    __ dec(dst);
    __ mov_b(eax, Operand(src, 0));
    __ mov_b(Operand(dst, 0), eax);
    __ dec(count);
    __ bind(&backward_much_overlap);  // Entry point into this block.
    __ test(dst, Immediate(0xF));
    __ j(not_zero, &loop_until_aligned);
    // dst is now aligned, src can't be. Main copy loop.
    __ mov(loop_count, count);
    __ shr(loop_count, 6);
    MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
                        BACKWARD, MOVE_UNALIGNED);
    __ bind(&first_15_much_overlap);
    __ and_(count, 0xF);
    __ j(zero, &pop_and_return);
    // Small/medium handlers expect dst/src to point to the beginning.
    __ sub(dst, count);
    __ sub(src, count);
    __ cmp(count, kSmallCopySize);
    __ j(below_equal, &small_size);
    __ jmp(&medium_size);
  }
  {
    // Special handlers for 9 <= copy_size < 64. No assumptions about
    // alignment or move distance, so all reads must be unaligned and
    // must happen before any writes.
    Label medium_handlers, f9_16, f17_32, f33_48, f49_63;

    __ bind(&f9_16);
    __ movsd(xmm0, Operand(src, 0));
    __ movsd(xmm1, Operand(src, count, times_1, -8));
    __ movsd(Operand(dst, 0), xmm0);
    __ movsd(Operand(dst, count, times_1, -8), xmm1);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&f17_32);
    __ movdqu(xmm0, Operand(src, 0));
    __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
    __ movdqu(Operand(dst, 0x00), xmm0);
    __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&f33_48);
    __ movdqu(xmm0, Operand(src, 0x00));
    __ movdqu(xmm1, Operand(src, 0x10));
    __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
    __ movdqu(Operand(dst, 0x00), xmm0);
    __ movdqu(Operand(dst, 0x10), xmm1);
    __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&f49_63);
    __ movdqu(xmm0, Operand(src, 0x00));
    __ movdqu(xmm1, Operand(src, 0x10));
    __ movdqu(xmm2, Operand(src, 0x20));
    __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
    __ movdqu(Operand(dst, 0x00), xmm0);
    __ movdqu(Operand(dst, 0x10), xmm1);
    __ movdqu(Operand(dst, 0x20), xmm2);
    __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&medium_handlers);
    __ dd(conv.address(&f9_16));
    __ dd(conv.address(&f17_32));
    __ dd(conv.address(&f33_48));
    __ dd(conv.address(&f49_63));

    __ bind(&medium_size);  // Entry point into this block.
    __ mov(eax, count);
    __ dec(eax);
    __ shr(eax, 4);
    if (FLAG_debug_code) {
      Label ok;
      __ cmp(eax, 3);
      __ j(below_equal, &ok);
      __ int3();
      __ bind(&ok);
440
    }
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
    __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
    __ jmp(eax);
  }
  {
    // Specialized copiers for copy_size <= 8 bytes.
    Label small_handlers, f0, f1, f2, f3, f4, f5_8;
    __ bind(&f0);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&f1);
    __ mov_b(eax, Operand(src, 0));
    __ mov_b(Operand(dst, 0), eax);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&f2);
    __ mov_w(eax, Operand(src, 0));
    __ mov_w(Operand(dst, 0), eax);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&f3);
    __ mov_w(eax, Operand(src, 0));
    __ mov_b(edx, Operand(src, 2));
    __ mov_w(Operand(dst, 0), eax);
    __ mov_b(Operand(dst, 2), edx);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&f4);
    __ mov(eax, Operand(src, 0));
    __ mov(Operand(dst, 0), eax);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&f5_8);
    __ mov(eax, Operand(src, 0));
    __ mov(edx, Operand(src, count, times_1, -4));
    __ mov(Operand(dst, 0), eax);
    __ mov(Operand(dst, count, times_1, -4), edx);
    MemMoveEmitPopAndReturn(&masm);

    __ bind(&small_handlers);
    __ dd(conv.address(&f0));
    __ dd(conv.address(&f1));
    __ dd(conv.address(&f2));
    __ dd(conv.address(&f3));
    __ dd(conv.address(&f4));
    __ dd(conv.address(&f5_8));
    __ dd(conv.address(&f5_8));
    __ dd(conv.address(&f5_8));
    __ dd(conv.address(&f5_8));

    __ bind(&small_size);  // Entry point into this block.
    if (FLAG_debug_code) {
      Label ok;
      __ cmp(count, 8);
      __ j(below_equal, &ok);
      __ int3();
      __ bind(&ok);
497
    }
498 499
    __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
    __ jmp(eax);
500 501
  }

502 503 504
  __ bind(&pop_and_return);
  MemMoveEmitPopAndReturn(&masm);

505 506
  CodeDesc desc;
  masm.GetCode(&desc);
507
  DCHECK(!RelocInfo::RequiresRelocation(desc));
508
  Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
509
  base::OS::ProtectCode(buffer, actual_size);
510 511
  // TODO(jkummerow): It would be nice to register this code creation event
  // with the PROFILE / GDBJIT system.
512
  return FUNCTION_CAST<MemMoveFunction>(buffer);
513 514
}

515

516 517
#undef __

518 519 520 521 522
// -------------------------------------------------------------------------
// Code generators

#define __ ACCESS_MASM(masm)

523

524
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
525 526 527 528 529 530
    MacroAssembler* masm,
    Register receiver,
    Register key,
    Register value,
    Register target_map,
    AllocationSiteMode mode,
531
    Label* allocation_memento_found) {
532
  Register scratch = edi;
533
  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
534

535
  if (mode == TRACK_ALLOCATION_SITE) {
536
    DCHECK(allocation_memento_found != NULL);
537 538
    __ JumpIfJSArrayHasAllocationMemento(
        receiver, scratch, allocation_memento_found);
539 540
  }

541
  // Set transitioned map.
542 543
  __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
  __ RecordWriteField(receiver,
544
                      HeapObject::kMapOffset,
545 546
                      target_map,
                      scratch,
547 548 549 550 551 552
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
}


553
void ElementsTransitionGenerator::GenerateSmiToDouble(
554 555 556 557 558 559 560 561
    MacroAssembler* masm,
    Register receiver,
    Register key,
    Register value,
    Register target_map,
    AllocationSiteMode mode,
    Label* fail) {
  // Return address is on the stack.
562 563 564 565
  DCHECK(receiver.is(edx));
  DCHECK(key.is(ecx));
  DCHECK(value.is(eax));
  DCHECK(target_map.is(ebx));
566

567 568
  Label loop, entry, convert_hole, gc_required, only_change_map;

569
  if (mode == TRACK_ALLOCATION_SITE) {
570
    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
571 572
  }

573 574 575 576 577 578
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
  __ j(equal, &only_change_map);

579 580 581 582 583 584 585 586
  __ push(eax);
  __ push(ebx);

  __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));

  // Allocate new FixedDoubleArray.
  // edx: receiver
  // edi: length of source FixedArray (smi-tagged)
587 588
  AllocationFlags flags =
      static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
589 590
  __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
              REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
591

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
  // eax: destination FixedDoubleArray
  // edi: number of elements
  // edx: receiver
  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
         Immediate(masm->isolate()->factory()->fixed_double_array_map()));
  __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
  __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
  // Replace receiver's backing store with newly created FixedDoubleArray.
  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
  __ mov(ebx, eax);
  __ RecordWriteField(edx,
                      JSObject::kElementsOffset,
                      ebx,
                      edi,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);

  __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));

  // Prepare for conversion loop.
  ExternalReference canonical_the_hole_nan_reference =
      ExternalReference::address_of_the_hole_nan();
  XMMRegister the_hole_nan = xmm1;
616 617
  __ movsd(the_hole_nan,
           Operand::StaticVariable(canonical_the_hole_nan_reference));
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
  __ jmp(&entry);

  // Call into runtime if GC is required.
  __ bind(&gc_required);
  // Restore registers before jumping into runtime.
  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
  __ pop(ebx);
  __ pop(eax);
  __ jmp(fail);

  // Convert and copy elements
  // esi: source FixedArray
  __ bind(&loop);
  __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
  // ebx: current element from source
  // edi: index of current element
  __ JumpIfNotSmi(ebx, &convert_hole);

  // Normal smi, convert it to double and store.
  __ SmiUntag(ebx);
638 639 640
  __ Cvtsi2sd(xmm0, ebx);
  __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
           xmm0);
641 642 643 644
  __ jmp(&entry);

  // Found hole, store hole_nan_as_double instead.
  __ bind(&convert_hole);
645 646 647

  if (FLAG_debug_code) {
    __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
648
    __ Assert(equal, kObjectFoundInSmiOnlyArray);
649 650
  }

651 652
  __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
           the_hole_nan);
653 654

  __ bind(&entry);
655 656
  __ sub(edi, Immediate(Smi::FromInt(1)));
  __ j(not_sign, &loop);
657 658 659

  __ pop(ebx);
  __ pop(eax);
660 661 662 663 664

  // Restore esi.
  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));

  __ bind(&only_change_map);
665 666 667 668 669 670 671 672 673
  // eax: value
  // ebx: target map
  // Set transitioned map.
  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
  __ RecordWriteField(edx,
                      HeapObject::kMapOffset,
                      ebx,
                      edi,
                      kDontSaveFPRegs,
674
                      OMIT_REMEMBERED_SET,
675 676 677 678 679
                      OMIT_SMI_CHECK);
}


void ElementsTransitionGenerator::GenerateDoubleToObject(
680 681 682 683 684 685 686 687
    MacroAssembler* masm,
    Register receiver,
    Register key,
    Register value,
    Register target_map,
    AllocationSiteMode mode,
    Label* fail) {
  // Return address is on the stack.
688 689 690 691
  DCHECK(receiver.is(edx));
  DCHECK(key.is(ecx));
  DCHECK(value.is(eax));
  DCHECK(target_map.is(ebx));
692

693
  Label loop, entry, convert_hole, gc_required, only_change_map, success;
694

695
  if (mode == TRACK_ALLOCATION_SITE) {
696
    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
697 698
  }

699 700 701 702 703 704
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
  __ j(equal, &only_change_map);

705 706 707 708 709 710 711 712 713
  __ push(eax);
  __ push(edx);
  __ push(ebx);

  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));

  // Allocate new FixedArray.
  // ebx: length of source FixedDoubleArray (smi-tagged)
  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
714
  __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
715 716 717 718 719 720 721 722

  // eax: destination FixedArray
  // ebx: number of elements
  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
         Immediate(masm->isolate()->factory()->fixed_array_map()));
  __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));

723 724 725 726 727 728 729 730 731 732 733 734 735
  // Allocating heap numbers in the loop below can fail and cause a jump to
  // gc_required. We can't leave a partly initialized FixedArray behind,
  // so pessimistically fill it with holes now.
  Label initialization_loop, initialization_loop_entry;
  __ jmp(&initialization_loop_entry, Label::kNear);
  __ bind(&initialization_loop);
  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
         masm->isolate()->factory()->the_hole_value());
  __ bind(&initialization_loop_entry);
  __ sub(ebx, Immediate(Smi::FromInt(1)));
  __ j(not_sign, &initialization_loop);

  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
736 737
  __ jmp(&entry);

738 739 740 741 742 743 744 745 746 747 748 749 750 751
  // ebx: target map
  // edx: receiver
  // Set transitioned map.
  __ bind(&only_change_map);
  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
  __ RecordWriteField(edx,
                      HeapObject::kMapOffset,
                      ebx,
                      edi,
                      kDontSaveFPRegs,
                      OMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ jmp(&success);

752
  // Call into runtime if GC is required.
753
  __ bind(&gc_required);
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
  __ pop(ebx);
  __ pop(edx);
  __ pop(eax);
  __ jmp(fail);

  // Box doubles into heap numbers.
  // edi: source FixedDoubleArray
  // eax: destination FixedArray
  __ bind(&loop);
  // ebx: index of current element (smi-tagged)
  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
  __ j(equal, &convert_hole);

  // Non-hole double, copy value into a heap number.
770
  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
771
  // edx: new heap number
772 773 774
  __ movsd(xmm0,
           FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
  __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
  __ mov(esi, ebx);
  __ RecordWriteArray(eax,
                      edx,
                      esi,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ jmp(&entry, Label::kNear);

  // Replace the-hole NaN with the-hole pointer.
  __ bind(&convert_hole);
  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
         masm->isolate()->factory()->the_hole_value());

  __ bind(&entry);
791 792
  __ sub(ebx, Immediate(Smi::FromInt(1)));
  __ j(not_sign, &loop);
793 794 795 796 797 798 799 800 801 802 803 804

  __ pop(ebx);
  __ pop(edx);
  // ebx: target map
  // edx: receiver
  // Set transitioned map.
  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
  __ RecordWriteField(edx,
                      HeapObject::kMapOffset,
                      ebx,
                      edi,
                      kDontSaveFPRegs,
805
                      OMIT_REMEMBERED_SET,
806 807 808 809 810 811 812 813 814 815 816 817 818 819
                      OMIT_SMI_CHECK);
  // Replace receiver's backing store with newly created and filled FixedArray.
  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
  __ RecordWriteField(edx,
                      JSObject::kElementsOffset,
                      eax,
                      edi,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);

  // Restore registers.
  __ pop(eax);
  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
820 821

  __ bind(&success);
822 823
}

824 825 826 827 828 829 830 831 832 833 834 835 836 837

void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                       Factory* factory,
                                       Register string,
                                       Register index,
                                       Register result,
                                       Label* call_runtime) {
  // Fetch the instance type of the receiver into result register.
  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));

  // We need special handling for indirect strings.
  Label check_sequential;
  __ test(result, Immediate(kIsIndirectStringMask));
838
  __ j(zero, &check_sequential, Label::kNear);
839 840 841 842

  // Dispatch on the indirect string shape: slice or cons.
  Label cons_string;
  __ test(result, Immediate(kSlicedNotConsMask));
843
  __ j(zero, &cons_string, Label::kNear);
844 845 846 847 848 849 850

  // Handle slices.
  Label indirect_string_loaded;
  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
  __ SmiUntag(result);
  __ add(index, result);
  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
851
  __ jmp(&indirect_string_loaded, Label::kNear);
852

853
  // Handle cons strings.
854 855 856 857 858 859 860 861 862 863 864 865 866 867
  // Check whether the right hand side is the empty string (i.e. if
  // this is really a flat string in a cons string). If that is not
  // the case we would rather go to the runtime system now to flatten
  // the string.
  __ bind(&cons_string);
  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
         Immediate(factory->empty_string()));
  __ j(not_equal, call_runtime);
  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));

  __ bind(&indirect_string_loaded);
  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));

868 869 870
  // Distinguish sequential and external strings. Only these two string
  // representations can reach here (slices and flat cons strings have been
  // reduced to the underlying sequential or external string).
871
  Label seq_string;
872 873 874
  __ bind(&check_sequential);
  STATIC_ASSERT(kSeqStringTag == 0);
  __ test(result, Immediate(kStringRepresentationMask));
875 876 877
  __ j(zero, &seq_string, Label::kNear);

  // Handle external strings.
878
  Label one_byte_external, done;
879 880 881 882
  if (FLAG_debug_code) {
    // Assert that we do not have a cons or slice (indirect strings) here.
    // Sequential strings have already been ruled out.
    __ test(result, Immediate(kIsIndirectStringMask));
883
    __ Assert(zero, kExternalStringExpectedButNotFound);
884 885
  }
  // Rule out short external strings.
886
  STATIC_ASSERT(kShortExternalStringTag != 0);
887 888 889 890 891 892
  __ test_b(result, kShortExternalStringMask);
  __ j(not_zero, call_runtime);
  // Check encoding.
  STATIC_ASSERT(kTwoByteStringTag == 0);
  __ test_b(result, kStringEncodingMask);
  __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
893
  __ j(not_equal, &one_byte_external, Label::kNear);
894 895 896
  // Two-byte string.
  __ movzx_w(result, Operand(result, index, times_2, 0));
  __ jmp(&done, Label::kNear);
897 898
  __ bind(&one_byte_external);
  // One-byte string.
899 900
  __ movzx_b(result, Operand(result, index, times_1, 0));
  __ jmp(&done, Label::kNear);
901

902 903
  // Dispatch on the encoding: one-byte or two-byte.
  Label one_byte;
904
  __ bind(&seq_string);
905
  STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
906 907
  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
  __ test(result, Immediate(kStringEncodingMask));
908
  __ j(not_zero, &one_byte, Label::kNear);
909 910 911 912 913 914 915 916 917

  // Two-byte string.
  // Load the two-byte character code into the result register.
  __ movzx_w(result, FieldOperand(string,
                                  index,
                                  times_2,
                                  SeqTwoByteString::kHeaderSize));
  __ jmp(&done, Label::kNear);

918
  // One-byte string.
919
  // Load the byte into the result register.
920
  __ bind(&one_byte);
921 922 923
  __ movzx_b(result, FieldOperand(string,
                                  index,
                                  times_1,
924
                                  SeqOneByteString::kHeaderSize));
925 926 927
  __ bind(&done);
}

928 929 930 931 932 933 934 935 936 937 938 939

static Operand ExpConstant(int index) {
  return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
}


void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
                                   XMMRegister input,
                                   XMMRegister result,
                                   XMMRegister double_scratch,
                                   Register temp1,
                                   Register temp2) {
940 941 942 943 944
  DCHECK(!input.is(double_scratch));
  DCHECK(!input.is(result));
  DCHECK(!result.is(double_scratch));
  DCHECK(!temp1.is(temp2));
  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
945
  DCHECK(!masm->serializer_enabled());  // External references not serializable.
946 947 948

  Label done;

949
  __ movsd(double_scratch, ExpConstant(0));
950 951 952 953
  __ xorpd(result, result);
  __ ucomisd(double_scratch, input);
  __ j(above_equal, &done);
  __ ucomisd(input, ExpConstant(1));
954
  __ movsd(result, ExpConstant(2));
955
  __ j(above_equal, &done);
956 957
  __ movsd(double_scratch, ExpConstant(3));
  __ movsd(result, ExpConstant(4));
958 959 960 961
  __ mulsd(double_scratch, input);
  __ addsd(double_scratch, result);
  __ movd(temp2, double_scratch);
  __ subsd(double_scratch, result);
962
  __ movsd(result, ExpConstant(6));
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
  __ mulsd(double_scratch, ExpConstant(5));
  __ subsd(double_scratch, input);
  __ subsd(result, double_scratch);
  __ movsd(input, double_scratch);
  __ mulsd(input, double_scratch);
  __ mulsd(result, input);
  __ mov(temp1, temp2);
  __ mulsd(result, ExpConstant(7));
  __ subsd(result, double_scratch);
  __ add(temp1, Immediate(0x1ff800));
  __ addsd(result, ExpConstant(8));
  __ and_(temp2, Immediate(0x7ff));
  __ shr(temp1, 11);
  __ shl(temp1, 20);
  __ movd(input, temp1);
  __ pshufd(input, input, static_cast<uint8_t>(0xe1));  // Order: 11 10 00 01
979
  __ movsd(double_scratch, Operand::StaticArray(
980
      temp2, times_8, ExternalReference::math_exp_log_table()));
981
  __ orps(input, double_scratch);
982 983 984 985
  __ mulsd(result, input);
  __ bind(&done);
}

986 987
#undef __

988

989
CodeAgingHelper::CodeAgingHelper() {
990
  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
991 992 993 994 995 996 997 998 999 1000 1001
  CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
  patcher.masm()->push(ebp);
  patcher.masm()->mov(ebp, esp);
  patcher.masm()->push(esi);
  patcher.masm()->push(edi);
}


#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
  return *candidate == kCallOpcode;
1002
}
1003
#endif
1004 1005


1006 1007
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
  bool result = isolate->code_aging_helper()->IsYoung(sequence);
1008
  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
1009 1010 1011 1012
  return result;
}


1013
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
1014
                               MarkingParity* parity) {
1015
  if (IsYoungSequence(isolate, sequence)) {
1016
    *age = kNoAgeCodeAge;
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
    *parity = NO_MARKING_PARITY;
  } else {
    sequence++;  // Skip the kCallOpcode byte
    Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
        Assembler::kCallTargetAddressOffset;
    Code* stub = GetCodeFromTargetAddress(target_address);
    GetCodeAgeAndParity(stub, age, parity);
  }
}


1028 1029
void Code::PatchPlatformCodeAge(Isolate* isolate,
                                byte* sequence,
1030 1031
                                Code::Age age,
                                MarkingParity parity) {
1032
  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
1033
  if (age == kNoAgeCodeAge) {
1034
    isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
1035
    Assembler::FlushICache(isolate, sequence, young_length);
1036
  } else {
1037
    Code* stub = GetCodeAgeStub(isolate, age, parity);
1038
    CodePatcher patcher(sequence, young_length);
1039
    patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
1040 1041 1042 1043
  }
}


1044 1045
}  // namespace internal
}  // namespace v8
1046 1047

#endif  // V8_TARGET_ARCH_IA32