codegen-mips.cc 22.6 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "v8.h"

30 31
#if defined(V8_TARGET_ARCH_MIPS)

32
#include "codegen.h"
33
#include "macro-assembler.h"
34
#include "simulator-mips.h"
35 36 37 38

namespace v8 {
namespace internal {

39

40
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
41 42 43 44 45 46 47 48 49 50 51
  switch (type) {
    case TranscendentalCache::SIN: return &sin;
    case TranscendentalCache::COS: return &cos;
    case TranscendentalCache::TAN: return &tan;
    case TranscendentalCache::LOG: return &log;
    default: UNIMPLEMENTED();
  }
  return NULL;
}


52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
#define __ masm.


#if defined(USE_SIMULATOR)
byte* fast_exp_mips_machine_code = NULL;
double fast_exp_simulator(double x) {
  return Simulator::current(Isolate::Current())->CallFP(
      fast_exp_mips_machine_code, x, 0);
}
#endif


UnaryMathFunction CreateExpFunction() {
  if (!FLAG_fast_math) return &exp;
  size_t actual_size;
  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
  if (buffer == NULL) return &exp;
  ExternalReference::InitializeMathExpData();

  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));

  {
    DoubleRegister input = f12;
    DoubleRegister result = f0;
    DoubleRegister double_scratch1 = f4;
    DoubleRegister double_scratch2 = f6;
    Register temp1 = t0;
    Register temp2 = t1;
    Register temp3 = t2;

    if (!IsMipsSoftFloatABI) {
      // Input value is in f12 anyway, nothing to do.
    } else {
      __ Move(input, a0, a1);
    }
    __ Push(temp3, temp2, temp1);
    MathExpGenerator::EmitMathExp(
        &masm, input, result, double_scratch1, double_scratch2,
        temp1, temp2, temp3);
    __ Pop(temp3, temp2, temp1);
    if (!IsMipsSoftFloatABI) {
      // Result is already in f0, nothing to do.
    } else {
95
      __ Move(v0, v1, result);
96 97 98 99 100 101
    }
    __ Ret();
  }

  CodeDesc desc;
  masm.GetCode(&desc);
102
  ASSERT(!RelocInfo::RequiresRelocation(desc));
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

  CPU::FlushICache(buffer, actual_size);
  OS::ProtectCode(buffer, actual_size);

#if !defined(USE_SIMULATOR)
  return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
  fast_exp_mips_machine_code = buffer;
  return &fast_exp_simulator;
#endif
}


#undef __


119 120 121 122
UnaryMathFunction CreateSqrtFunction() {
  return &sqrt;
}

123 124 125 126
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
127 128 129
  masm->EnterFrame(StackFrame::INTERNAL);
  ASSERT(!masm->has_frame());
  masm->set_has_frame(true);
130 131 132 133
}


void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
134 135 136
  masm->LeaveFrame(StackFrame::INTERNAL);
  ASSERT(masm->has_frame());
  masm->set_has_frame(false);
137 138
}

139 140 141
// -------------------------------------------------------------------------
// Code generators

142 143
#define __ ACCESS_MASM(masm)

144
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
145 146
    MacroAssembler* masm, AllocationSiteMode mode,
    Label* allocation_site_info_found) {
147 148 149 150 151 152 153 154
  // ----------- S t a t e -------------
  //  -- a0    : value
  //  -- a1    : key
  //  -- a2    : receiver
  //  -- ra    : return address
  //  -- a3    : target map, scratch for subsequent call
  //  -- t0    : scratch (elements)
  // -----------------------------------
155 156
  if (mode == TRACK_ALLOCATION_SITE) {
    ASSERT(allocation_site_info_found != NULL);
157
    masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq,
158 159 160
                                           allocation_site_info_found);
  }

161 162 163 164 165 166 167 168 169 170 171 172 173
  // Set transitioned map.
  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
  __ RecordWriteField(a2,
                      HeapObject::kMapOffset,
                      a3,
                      t5,
                      kRAHasNotBeenSaved,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
}


174
void ElementsTransitionGenerator::GenerateSmiToDouble(
175
    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
176 177 178 179 180 181 182 183
  // ----------- S t a t e -------------
  //  -- a0    : value
  //  -- a1    : key
  //  -- a2    : receiver
  //  -- ra    : return address
  //  -- a3    : target map, scratch for subsequent call
  //  -- t0    : scratch (elements)
  // -----------------------------------
184
  Label loop, entry, convert_hole, gc_required, only_change_map, done;
185 186 187

  Register scratch = t6;

188
  if (mode == TRACK_ALLOCATION_SITE) {
189
    masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
190 191
  }

192 193
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
194
  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
195 196 197 198
  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
  __ Branch(&only_change_map, eq, at, Operand(t0));

  __ push(ra);
199 200 201 202 203 204 205
  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
  // t0: source FixedArray
  // t1: number of elements (smi-tagged)

  // Allocate new FixedDoubleArray.
  __ sll(scratch, t1, 2);
  __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
206
  __ Allocate(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
207
  // t2: destination FixedDoubleArray, not tagged as heap object
208

209
  // Set destination FixedDoubleArray's length and map.
210 211
  __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
212
  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
213 214 215 216 217 218 219 220 221
  // Update receiver's map.

  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
  __ RecordWriteField(a2,
                      HeapObject::kMapOffset,
                      a3,
                      t5,
                      kRAHasBeenSaved,
                      kDontSaveFPRegs,
222
                      OMIT_REMEMBERED_SET,
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
                      OMIT_SMI_CHECK);
  // Replace receiver's backing store with newly created FixedDoubleArray.
  __ Addu(a3, t2, Operand(kHeapObjectTag));
  __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
  __ RecordWriteField(a2,
                      JSObject::kElementsOffset,
                      a3,
                      t5,
                      kRAHasBeenSaved,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);


  // Prepare for conversion loop.
  __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
  __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
  __ sll(t2, t1, 2);
  __ Addu(t2, t2, t3);
  __ li(t0, Operand(kHoleNanLower32));
  __ li(t1, Operand(kHoleNanUpper32));
  // t0: kHoleNanLower32
  // t1: kHoleNanUpper32
  // t2: end of destination FixedDoubleArray, not tagged
  // t3: begin of FixedDoubleArray element fields, not tagged

  __ Branch(&entry);

251 252 253 254 255 256
  __ bind(&only_change_map);
  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
  __ RecordWriteField(a2,
                      HeapObject::kMapOffset,
                      a3,
                      t5,
257
                      kRAHasNotBeenSaved,
258 259 260 261 262
                      kDontSaveFPRegs,
                      OMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ Branch(&done);

263 264 265 266 267 268 269 270 271 272
  // Call into runtime if GC is required.
  __ bind(&gc_required);
  __ pop(ra);
  __ Branch(fail);

  // Convert and copy elements.
  __ bind(&loop);
  __ lw(t5, MemOperand(a3));
  __ Addu(a3, a3, kIntSize);
  // t5: current element
273
  __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
274 275

  // Normal smi, convert to double and store.
276 277 278 279 280
  __ mtc1(t5, f0);
  __ cvt_d_w(f0, f0);
  __ sdc1(f0, MemOperand(t3));
  __ Addu(t3, t3, kDoubleSize);

281 282 283 284
  __ Branch(&entry);

  // Hole found, store the-hole NaN.
  __ bind(&convert_hole);
285
  if (FLAG_debug_code) {
286 287 288
    // Restore a "smi-untagged" heap object.
    __ SmiTag(t5);
    __ Or(t5, t5, Operand(1));
289 290 291
    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
    __ Assert(eq, "object found in smi-only array", at, Operand(t5));
  }
292 293 294 295 296 297 298 299
  __ sw(t0, MemOperand(t3));  // mantissa
  __ sw(t1, MemOperand(t3, kIntSize));  // exponent
  __ Addu(t3, t3, kDoubleSize);

  __ bind(&entry);
  __ Branch(&loop, lt, t3, Operand(t2));

  __ pop(ra);
300
  __ bind(&done);
301 302 303 304
}


void ElementsTransitionGenerator::GenerateDoubleToObject(
305
    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
306 307 308 309 310 311 312 313
  // ----------- S t a t e -------------
  //  -- a0    : value
  //  -- a1    : key
  //  -- a2    : receiver
  //  -- ra    : return address
  //  -- a3    : target map, scratch for subsequent call
  //  -- t0    : scratch (elements)
  // -----------------------------------
314
  Label entry, loop, convert_hole, gc_required, only_change_map;
315

316
  if (mode == TRACK_ALLOCATION_SITE) {
317
    masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
318 319
  }

320 321
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
322
  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
323 324 325 326 327
  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
  __ Branch(&only_change_map, eq, at, Operand(t0));

  __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());

328 329 330 331 332 333 334
  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
  // t0: source FixedArray
  // t1: number of elements (smi-tagged)

  // Allocate new FixedArray.
  __ sll(a0, t1, 1);
  __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
335
  __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
336
  // t2: destination FixedArray, not tagged as heap object
337
  // Set destination FixedDoubleArray's length and map.
338 339
  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
340
  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409

  // Prepare for conversion loop.
  __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
  __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
  __ Addu(t2, t2, Operand(kHeapObjectTag));
  __ sll(t1, t1, 1);
  __ Addu(t1, a3, t1);
  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
  __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
  // Using offsetted addresses.
  // a3: begin of destination FixedArray element fields, not tagged
  // t0: begin of source FixedDoubleArray element fields, not tagged, +4
  // t1: end of destination FixedArray, not tagged
  // t2: destination FixedArray
  // t3: the-hole pointer
  // t5: heap number map
  __ Branch(&entry);

  // Call into runtime if GC is required.
  __ bind(&gc_required);
  __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());

  __ Branch(fail);

  __ bind(&loop);
  __ lw(a1, MemOperand(t0));
  __ Addu(t0, t0, kDoubleSize);
  // a1: current element's upper 32 bit
  // t0: address of next element's upper 32 bit
  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));

  // Non-hole double, copy value into a heap number.
  __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
  // a2: new heap number
  __ lw(a0, MemOperand(t0, -12));
  __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
  __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
  __ mov(a0, a3);
  __ sw(a2, MemOperand(a3));
  __ Addu(a3, a3, kIntSize);
  __ RecordWrite(t2,
                 a0,
                 a2,
                 kRAHasBeenSaved,
                 kDontSaveFPRegs,
                 EMIT_REMEMBERED_SET,
                 OMIT_SMI_CHECK);
  __ Branch(&entry);

  // Replace the-hole NaN with the-hole pointer.
  __ bind(&convert_hole);
  __ sw(t3, MemOperand(a3));
  __ Addu(a3, a3, kIntSize);

  __ bind(&entry);
  __ Branch(&loop, lt, a3, Operand(t1));

  __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
  // Replace receiver's backing store with newly created and filled FixedArray.
  __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
  __ RecordWriteField(a2,
                      JSObject::kElementsOffset,
                      t2,
                      t5,
                      kRAHasBeenSaved,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ pop(ra);
410 411 412 413 414 415 416 417 418 419 420 421

  __ bind(&only_change_map);
  // Update receiver's map.
  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
  __ RecordWriteField(a2,
                      HeapObject::kMapOffset,
                      a3,
                      t5,
                      kRAHasNotBeenSaved,
                      kDontSaveFPRegs,
                      OMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
422 423
}

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446

void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                       Register string,
                                       Register index,
                                       Register result,
                                       Label* call_runtime) {
  // Fetch the instance type of the receiver into result register.
  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));

  // We need special handling for indirect strings.
  Label check_sequential;
  __ And(at, result, Operand(kIsIndirectStringMask));
  __ Branch(&check_sequential, eq, at, Operand(zero_reg));

  // Dispatch on the indirect string shape: slice or cons.
  Label cons_string;
  __ And(at, result, Operand(kSlicedNotConsMask));
  __ Branch(&cons_string, eq, at, Operand(zero_reg));

  // Handle slices.
  Label indirect_string_loaded;
  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
447
  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
448 449 450 451 452 453 454 455 456 457 458
  __ sra(at, result, kSmiTagSize);
  __ Addu(index, index, at);
  __ jmp(&indirect_string_loaded);

  // Handle cons strings.
  // Check whether the right hand side is the empty string (i.e. if
  // this is really a flat string in a cons string). If that is not
  // the case we would rather go to the runtime system now to flatten
  // the string.
  __ bind(&cons_string);
  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
459
  __ LoadRoot(at, Heap::kempty_stringRootIndex);
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
  __ Branch(call_runtime, ne, result, Operand(at));
  // Get the first of the two strings and load its instance type.
  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));

  __ bind(&indirect_string_loaded);
  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));

  // Distinguish sequential and external strings. Only these two string
  // representations can reach here (slices and flat cons strings have been
  // reduced to the underlying sequential or external string).
  Label external_string, check_encoding;
  __ bind(&check_sequential);
  STATIC_ASSERT(kSeqStringTag == 0);
  __ And(at, result, Operand(kStringRepresentationMask));
  __ Branch(&external_string, ne, at, Operand(zero_reg));

  // Prepare sequential strings
478
  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
  __ Addu(string,
          string,
          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
  __ jmp(&check_encoding);

  // Handle external strings.
  __ bind(&external_string);
  if (FLAG_debug_code) {
    // Assert that we do not have a cons or slice (indirect strings) here.
    // Sequential strings have already been ruled out.
    __ And(at, result, Operand(kIsIndirectStringMask));
    __ Assert(eq, "external string expected, but not found",
        at, Operand(zero_reg));
  }
  // Rule out short external strings.
  STATIC_CHECK(kShortExternalStringTag != 0);
  __ And(at, result, Operand(kShortExternalStringMask));
  __ Branch(call_runtime, ne, at, Operand(zero_reg));
  __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));

  Label ascii, done;
  __ bind(&check_encoding);
  STATIC_ASSERT(kTwoByteStringTag == 0);
  __ And(at, result, Operand(kStringEncodingMask));
  __ Branch(&ascii, ne, at, Operand(zero_reg));
  // Two-byte string.
  __ sll(at, index, 1);
  __ Addu(at, string, at);
  __ lhu(result, MemOperand(at));
  __ jmp(&done);
  __ bind(&ascii);
  // Ascii string.
  __ Addu(at, string, index);
  __ lbu(result, MemOperand(at));
  __ bind(&done);
}

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590

static MemOperand ExpConstant(int index, Register base) {
  return MemOperand(base, index * kDoubleSize);
}


void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
                                   DoubleRegister input,
                                   DoubleRegister result,
                                   DoubleRegister double_scratch1,
                                   DoubleRegister double_scratch2,
                                   Register temp1,
                                   Register temp2,
                                   Register temp3) {
  ASSERT(!input.is(result));
  ASSERT(!input.is(double_scratch1));
  ASSERT(!input.is(double_scratch2));
  ASSERT(!result.is(double_scratch1));
  ASSERT(!result.is(double_scratch2));
  ASSERT(!double_scratch1.is(double_scratch2));
  ASSERT(!temp1.is(temp2));
  ASSERT(!temp1.is(temp3));
  ASSERT(!temp2.is(temp3));
  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);

  Label done;

  __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));

  __ ldc1(double_scratch1, ExpConstant(0, temp3));
  __ Move(result, kDoubleRegZero);
  __ BranchF(&done, NULL, ge, double_scratch1, input);
  __ ldc1(double_scratch2, ExpConstant(1, temp3));
  __ ldc1(result, ExpConstant(2, temp3));
  __ BranchF(&done, NULL, ge, input, double_scratch2);
  __ ldc1(double_scratch1, ExpConstant(3, temp3));
  __ ldc1(result, ExpConstant(4, temp3));
  __ mul_d(double_scratch1, double_scratch1, input);
  __ add_d(double_scratch1, double_scratch1, result);
  __ Move(temp2, temp1, double_scratch1);
  __ sub_d(double_scratch1, double_scratch1, result);
  __ ldc1(result, ExpConstant(6, temp3));
  __ ldc1(double_scratch2, ExpConstant(5, temp3));
  __ mul_d(double_scratch1, double_scratch1, double_scratch2);
  __ sub_d(double_scratch1, double_scratch1, input);
  __ sub_d(result, result, double_scratch1);
  __ mul_d(input, double_scratch1, double_scratch1);
  __ mul_d(result, result, input);
  __ srl(temp1, temp2, 11);
  __ ldc1(double_scratch2, ExpConstant(7, temp3));
  __ mul_d(result, result, double_scratch2);
  __ sub_d(result, result, double_scratch1);
  __ ldc1(double_scratch2, ExpConstant(8, temp3));
  __ add_d(result, result, double_scratch2);
  __ li(at, 0x7ff);
  __ And(temp2, temp2, at);
  __ Addu(temp1, temp1, Operand(0x3ff));
  __ sll(temp1, temp1, 20);

  // Must not call ExpConstant() after overwriting temp3!
  __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
  __ sll(at, temp2, 3);
  __ addu(at, at, temp3);
  __ lw(at, MemOperand(at));
  __ Addu(temp3, temp3, Operand(kPointerSize));
  __ sll(temp2, temp2, 3);
  __ addu(temp2, temp2, temp3);
  __ lw(temp2, MemOperand(temp2));
  __ Or(temp1, temp1, temp2);
  __ Move(input, at, temp1);
  __ mul_d(result, result, input);
  __ bind(&done);
}


591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
// nop(CODE_AGE_MARKER_NOP)
static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;

static byte* GetNoCodeAgeSequence(uint32_t* length) {
  // The sequence of instructions that is patched out for aging code is the
  // following boilerplate stack-building prologue that is found in FUNCTIONS
  static bool initialized = false;
  static uint32_t sequence[kNoCodeAgeSequenceLength];
  byte* byte_sequence = reinterpret_cast<byte*>(sequence);
  *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
  if (!initialized) {
    CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
    patcher.masm()->Push(ra, fp, cp, a1);
    patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex);
    patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
    initialized = true;
  }
  return byte_sequence;
}
610

611 612

bool Code::IsYoungSequence(byte* sequence) {
613 614 615 616 617 618
  uint32_t young_length;
  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
  bool result = !memcmp(sequence, young_sequence, young_length);
  ASSERT(result ||
         Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
  return result;
619 620 621 622 623
}


void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
                               MarkingParity* parity) {
624 625 626 627 628 629 630 631 632
  if (IsYoungSequence(sequence)) {
    *age = kNoAge;
    *parity = NO_MARKING_PARITY;
  } else {
    Address target_address = Memory::Address_at(
        sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
    Code* stub = GetCodeFromTargetAddress(target_address);
    GetCodeAgeAndParity(stub, age, parity);
  }
633 634 635 636 637 638
}


void Code::PatchPlatformCodeAge(byte* sequence,
                                Code::Age age,
                                MarkingParity parity) {
639 640 641
  uint32_t young_length;
  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
  if (age == kNoAge) {
642
    CopyBytes(sequence, young_sequence, young_length);
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
    CPU::FlushICache(sequence, young_length);
  } else {
    Code* stub = GetCodeAgeStub(age, parity);
    CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
    // Mark this code sequence for FindPlatformCodeAgeSequence()
    patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
    // Save the function's original return address
    // (it will be clobbered by Call(t9))
    patcher.masm()->mov(at, ra);
    // Load the stub address to t9 and call it
    patcher.masm()->li(t9,
        Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
    patcher.masm()->Call(t9);
    // Record the stub address in the empty space for GetCodeAgeAndParity()
    patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
  }
659 660
}

661 662 663

#undef __

664
} }  // namespace v8::internal
665 666

#endif  // V8_TARGET_ARCH_MIPS