code-stubs-mips.cc 104 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#if V8_TARGET_ARCH_MIPS
6

7
#include "src/api-arguments.h"
8
#include "src/base/bits.h"
9
#include "src/bootstrapper.h"
10
#include "src/code-stubs.h"
11
#include "src/codegen.h"
12
#include "src/frames-inl.h"
13
#include "src/ic/handler-compiler.h"
14
#include "src/ic/ic.h"
15
#include "src/ic/stub-cache.h"
16
#include "src/isolate.h"
17 18
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
19
#include "src/runtime/runtime.h"
20

21 22
#include "src/mips/code-stubs-mips.h"  // Cannot be the first include.

23 24 25
namespace v8 {
namespace internal {

26
#define __ ACCESS_MASM(masm)
27

28 29 30 31 32 33 34 35
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
  __ sll(t9, a0, kPointerSizeLog2);
  __ Addu(t9, sp, t9);
  __ sw(a1, MemOperand(t9, 0));
  __ Push(a1);
  __ Push(a2);
  __ Addu(a0, a0, Operand(3));
  __ TailCallRuntime(Runtime::kNewArray);
36 37
}

38
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
39
                                          Condition cc);
40 41 42 43 44 45 46 47 48 49 50
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
                                    Register lhs,
                                    Register rhs,
                                    Label* rhs_not_nan,
                                    Label* slow,
                                    bool strict);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
                                           Register lhs,
                                           Register rhs);


51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
void DoubleToIStub::Generate(MacroAssembler* masm) {
  Label out_of_range, only_low, negate, done;
  Register input_reg = source();
  Register result_reg = destination();

  int double_offset = offset();
  // Account for saved regs if input is sp.
  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;

  Register scratch =
      GetRegisterThatIsNotOneOf(input_reg, result_reg);
  Register scratch2 =
      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
  Register scratch3 =
      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
66
  DoubleRegister double_scratch = kLithiumScratchDouble;
67 68 69 70

  __ Push(scratch, scratch2, scratch3);

  if (!skip_fastpath()) {
71
    // Load double input.
72
    __ Ldc1(double_scratch, MemOperand(input_reg, double_offset));
73

74 75 76
    // Clear cumulative exception flags and save the FCSR.
    __ cfc1(scratch2, FCSR);
    __ ctc1(zero_reg, FCSR);
77

78
    // Try a conversion to a signed integer.
79 80
    __ Trunc_w_d(double_scratch, double_scratch);
    // Move the converted value into the result register.
81
    __ mfc1(scratch3, double_scratch);
82

83 84 85
    // Retrieve and restore the FCSR.
    __ cfc1(scratch, FCSR);
    __ ctc1(scratch2, FCSR);
86

87 88 89 90 91
    // Check for overflow and NaNs.
    __ And(
        scratch, scratch,
        kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
           | kFCSRInvalidOpFlagMask);
92 93 94 95 96 97
    // If we had no exceptions then set result_reg and we are done.
    Label error;
    __ Branch(&error, ne, scratch, Operand(zero_reg));
    __ Move(result_reg, scratch3);
    __ Branch(&done);
    __ bind(&error);
98 99 100 101 102
  }

  // Load the double value and perform a manual truncation.
  Register input_high = scratch2;
  Register input_low = scratch3;
103

104 105 106 107
  __ lw(input_low,
      MemOperand(input_reg, double_offset + Register::kMantissaOffset));
  __ lw(input_high,
      MemOperand(input_reg, double_offset + Register::kExponentOffset));
108

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
  Label normal_exponent, restore_sign;
  // Extract the biased exponent in result.
  __ Ext(result_reg,
         input_high,
         HeapNumber::kExponentShift,
         HeapNumber::kExponentBits);

  // Check for Infinity and NaNs, which should return 0.
  __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
  __ Movz(result_reg, zero_reg, scratch);
  __ Branch(&done, eq, scratch, Operand(zero_reg));

  // Express exponent as delta to (number of mantissa bits + 31).
  __ Subu(result_reg,
          result_reg,
          Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));

  // If the delta is strictly positive, all bits would be shifted away,
  // which means that we can return 0.
  __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
  __ mov(result_reg, zero_reg);
  __ Branch(&done);

  __ bind(&normal_exponent);
  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
  // Calculate shift.
  __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));

  // Save the sign.
  Register sign = result_reg;
  result_reg = no_reg;
  __ And(sign, input_high, Operand(HeapNumber::kSignMask));

  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
  // to check for this specific case.
  Label high_shift_needed, high_shift_done;
  __ Branch(&high_shift_needed, lt, scratch, Operand(32));
  __ mov(input_high, zero_reg);
  __ Branch(&high_shift_done);
  __ bind(&high_shift_needed);

  // Set the implicit 1 before the mantissa part in input_high.
  __ Or(input_high,
        input_high,
        Operand(1 << HeapNumber::kMantissaBitsInTopWord));
  // Shift the mantissa bits to the correct position.
  // We don't need to clear non-mantissa bits as they will be shifted away.
  // If they weren't, it would mean that the answer is in the 32bit range.
  __ sllv(input_high, input_high, scratch);

  __ bind(&high_shift_done);

  // Replace the shifted bits with bits from the lower mantissa word.
  Label pos_shift, shift_done;
  __ li(at, 32);
  __ subu(scratch, at, scratch);
  __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));

  // Negate scratch.
  __ Subu(scratch, zero_reg, scratch);
  __ sllv(input_low, input_low, scratch);
  __ Branch(&shift_done);

  __ bind(&pos_shift);
  __ srlv(input_low, input_low, scratch);

  __ bind(&shift_done);
  __ Or(input_high, input_high, Operand(input_low));
  // Restore sign if necessary.
  __ mov(scratch, sign);
  result_reg = sign;
  sign = no_reg;
  __ Subu(result_reg, zero_reg, input_high);
  __ Movz(result_reg, input_high, scratch);
183 184 185 186 187 188 189 190

  __ bind(&done);

  __ Pop(scratch, scratch2, scratch3);
  __ Ret();
}


191 192 193
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
194
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
195
                                          Condition cc) {
196 197 198 199 200 201
  Label not_identical;
  Label heap_number, return_equal;
  Register exp_mask_reg = t5;

  __ Branch(&not_identical, ne, a0, Operand(a1));

202 203
  __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));

204
  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
205 206 207
  // so we do the second best thing - test it ourselves.
  // They are both equal and they are not both Smis so both of them are not
  // Smis. If it's not a heap number, then return equal.
208
  __ GetObjectType(a0, t4, t4);
209
  if (cc == less || cc == greater) {
210
    // Call runtime on identical JSObjects.
211
    __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
212
    // Call runtime on identical symbols since we need to throw a TypeError.
213
    __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
214 215 216 217
  } else {
    __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
    // Comparing JS objects with <=, >= is complicated.
    if (cc != eq) {
218 219 220
      __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
      // Call runtime on identical symbols since we need to throw a TypeError.
      __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
221 222 223 224 225 226 227
      // Normally here we fall through to return_equal, but undefined is
      // special: (undefined == undefined) == true, but
      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
      if (cc == less_equal || cc == greater_equal) {
        __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
        __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
        __ Branch(&return_equal, ne, a0, Operand(t2));
228
        DCHECK(is_int16(GREATER) && is_int16(LESS));
229
        __ Ret(USE_DELAY_SLOT);
230 231 232 233 234 235
        if (cc == le) {
          // undefined <= undefined should fail.
          __ li(v0, Operand(GREATER));
        } else  {
          // undefined >= undefined should fail.
          __ li(v0, Operand(LESS));
236 237 238 239 240 241
        }
      }
    }
  }

  __ bind(&return_equal);
242
  DCHECK(is_int16(GREATER) && is_int16(LESS));
243
  __ Ret(USE_DELAY_SLOT);
244 245 246 247 248 249 250 251
  if (cc == less) {
    __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
  } else if (cc == greater) {
    __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
  } else {
    __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
  }

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
  // For less and greater we don't have to check for NaN since the result of
  // x < x is false regardless.  For the others here is some code to check
  // for NaN.
  if (cc != lt && cc != gt) {
    __ bind(&heap_number);
    // It is a heap number, so return non-equal if it's NaN and equal if it's
    // not NaN.

    // The representation of NaN values has all exponent bits (52..62) set,
    // and not all mantissa bits (0..51) clear.
    // Read top bits of double representation (second word of value).
    __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
    // Test that exponent bits are all set.
    __ And(t3, t2, Operand(exp_mask_reg));
    // If all bits not set (ne cond), then not a NaN, objects are equal.
    __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));

    // Shift out flag and all exponent bits, retaining only mantissa.
    __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
    // Or with all low-bits of mantissa.
    __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
    __ Or(v0, t3, Operand(t2));
    // For equal we already have the right value in v0:  Return zero (equal)
    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
    // not (it's a NaN).  For <= and >= we need to load v0 with the failing
    // value if it's a NaN.
    if (cc != eq) {
      // All-zero means Infinity means equal.
      __ Ret(eq, v0, Operand(zero_reg));
281
      DCHECK(is_int16(GREATER) && is_int16(LESS));
282
      __ Ret(USE_DELAY_SLOT);
283 284 285 286
      if (cc == le) {
        __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
      } else {
        __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
287 288 289
      }
    }
  }
290
  // No fall through here.
291 292 293 294 295 296 297 298 299 300 301

  __ bind(&not_identical);
}


static void EmitSmiNonsmiComparison(MacroAssembler* masm,
                                    Register lhs,
                                    Register rhs,
                                    Label* both_loaded_as_doubles,
                                    Label* slow,
                                    bool strict) {
302
  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
303 304 305
         (lhs.is(a1) && rhs.is(a0)));

  Label lhs_is_smi;
306
  __ JumpIfSmi(lhs, &lhs_is_smi);
307 308 309 310 311 312
  // Rhs is a Smi.
  // Check whether the non-smi is a heap number.
  __ GetObjectType(lhs, t4, t4);
  if (strict) {
    // If lhs was not a number and rhs was a Smi then strict equality cannot
    // succeed. Return non-equal (lhs is already not zero).
313
    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
314 315 316 317 318 319 320 321 322
    __ mov(v0, lhs);
  } else {
    // Smi compared non-strictly with a non-Smi non-heap-number. Call
    // the runtime.
    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
  }

  // Rhs is a smi, lhs is a number.
  // Convert smi rhs to double.
323 324 325
  __ sra(at, rhs, kSmiTagSize);
  __ mtc1(at, f14);
  __ cvt_d_w(f14, f14);
326
  __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
327 328 329 330 331 332 333 334 335 336

  // We now have both loaded as doubles.
  __ jmp(both_loaded_as_doubles);

  __ bind(&lhs_is_smi);
  // Lhs is a Smi.  Check whether the non-smi is a heap number.
  __ GetObjectType(rhs, t4, t4);
  if (strict) {
    // If lhs was not a number and rhs was a Smi then strict equality cannot
    // succeed. Return non-equal.
337
    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
338 339 340 341 342 343 344 345 346
    __ li(v0, Operand(1));
  } else {
    // Smi compared non-strictly with a non-Smi non-heap-number. Call
    // the runtime.
    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
  }

  // Lhs is a smi, rhs is a number.
  // Convert smi lhs to double.
347 348 349
  __ sra(at, lhs, kSmiTagSize);
  __ mtc1(at, f12);
  __ cvt_d_w(f12, f12);
350
  __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
351
  // Fall through to both_loaded_as_doubles.
352 353 354
}


355 356 357
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
                                           Register lhs,
                                           Register rhs) {
358
    // If either operand is a JS object or an oddball value, then they are
359 360
    // not equal since their pointers are different.
    // There is no test for undetectability in strict equality.
361
    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
362 363
    Label first_non_object;
    // Get the type of the first operand into a2 and compare it with
364
    // FIRST_JS_RECEIVER_TYPE.
365
    __ GetObjectType(lhs, a2, a2);
366
    __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
367 368 369 370

    // Return non-zero.
    Label return_not_equal;
    __ bind(&return_not_equal);
371
    __ Ret(USE_DELAY_SLOT);
372 373 374 375 376 377 378
    __ li(v0, Operand(1));

    __ bind(&first_non_object);
    // Check for oddballs: true, false, null, undefined.
    __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));

    __ GetObjectType(rhs, a3, a3);
379
    __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
380 381 382 383

    // Check for oddballs: true, false, null, undefined.
    __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));

384 385
    // Now that we have the types we might as well check for
    // internalized-internalized.
386 387 388 389
    STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
    __ Or(a2, a2, Operand(a3));
    __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
    __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
}


static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
                                       Register lhs,
                                       Register rhs,
                                       Label* both_loaded_as_doubles,
                                       Label* not_heap_numbers,
                                       Label* slow) {
  __ GetObjectType(lhs, a3, a2);
  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
  __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
  // If first was a heap number & second wasn't, go to slow case.
  __ Branch(slow, ne, a3, Operand(a2));

  // Both are heap numbers. Load them up then jump to the code we have
  // for that.
407 408
  __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
  __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
409

410 411 412 413
  __ jmp(both_loaded_as_doubles);
}


414 415
// Fast negative check for internalized-to-internalized equality.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
416
                                                     Register lhs, Register rhs,
417
                                                     Label* possible_strings,
418
                                                     Label* runtime_call) {
419
  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
420 421
         (lhs.is(a1) && rhs.is(a0)));

422
  // a2 is object type of rhs.
423
  Label object_test, return_equal, return_unequal, undetectable;
424
  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
425 426
  __ And(at, a2, Operand(kIsNotStringMask));
  __ Branch(&object_test, ne, at, Operand(zero_reg));
427 428
  __ And(at, a2, Operand(kIsNotInternalizedMask));
  __ Branch(possible_strings, ne, at, Operand(zero_reg));
429
  __ GetObjectType(rhs, a3, a3);
430
  __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
431 432
  __ And(at, a3, Operand(kIsNotInternalizedMask));
  __ Branch(possible_strings, ne, at, Operand(zero_reg));
433

434 435 436
  // Both are internalized. We already checked they weren't the same pointer so
  // they are not equal. Return non-equal by returning the non-zero object
  // pointer in v0.
437
  __ Ret(USE_DELAY_SLOT);
438
  __ mov(v0, a0);  // In delay slot.
439 440

  __ bind(&object_test);
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
  __ lw(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
  __ lw(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
  __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
  __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
  __ And(at, t0, Operand(1 << Map::kIsUndetectable));
  __ Branch(&undetectable, ne, at, Operand(zero_reg));
  __ And(at, t1, Operand(1 << Map::kIsUndetectable));
  __ Branch(&return_unequal, ne, at, Operand(zero_reg));

  __ GetInstanceType(a2, a2);
  __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
  __ GetInstanceType(a3, a3);
  __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));

  __ bind(&return_unequal);
  // Return non-equal by returning the non-zero object pointer in v0.
457
  __ Ret(USE_DELAY_SLOT);
458 459 460 461 462
  __ mov(v0, a0);  // In delay slot.

  __ bind(&undetectable);
  __ And(at, t1, Operand(1 << Map::kIsUndetectable));
  __ Branch(&return_unequal, eq, at, Operand(zero_reg));
463 464 465 466 467 468 469 470 471 472

  // If both sides are JSReceivers, then the result is false according to
  // the HTML specification, which says that only comparisons with null or
  // undefined are affected by special casing for document.all.
  __ GetInstanceType(a2, a2);
  __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
  __ GetInstanceType(a3, a3);
  __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));

  __ bind(&return_equal);
473 474
  __ Ret(USE_DELAY_SLOT);
  __ li(v0, Operand(EQUAL));  // In delay slot.
475 476 477
}


478
static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
479
                                         Register scratch,
480
                                         CompareICState::State expected,
481 482
                                         Label* fail) {
  Label ok;
483
  if (expected == CompareICState::SMI) {
484
    __ JumpIfNotSmi(input, fail);
485
  } else if (expected == CompareICState::NUMBER) {
486 487 488 489
    __ JumpIfSmi(input, &ok);
    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
                DONT_DO_SMI_CHECK);
  }
490
  // We could be strict about internalized/string here, but as long as
491 492 493
  // hydrogen doesn't care, the stub doesn't have to care either.
  __ bind(&ok);
}
494 495


496 497 498
// On entry a1 and a2 are the values to be compared.
// On exit a0 is 0, positive or negative to indicate the result of
// the comparison.
499
void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
500 501 502 503 504
  Register lhs = a1;
  Register rhs = a0;
  Condition cc = GetCondition();

  Label miss;
505 506
  CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
  CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
507 508 509

  Label slow;  // Call builtin.
  Label not_smis, both_loaded_as_doubles;
510

511 512 513 514 515 516 517 518
  Label not_two_smis, smi_done;
  __ Or(a2, a1, a0);
  __ JumpIfNotSmi(a2, &not_two_smis);
  __ sra(a1, a1, 1);
  __ sra(a0, a0, 1);
  __ Ret(USE_DELAY_SLOT);
  __ subu(v0, a1, a0);
  __ bind(&not_two_smis);
519 520 521 522 523 524

  // NOTICE! This code is only reached after a smi-fast-case check, so
  // it is certain that at least one operand isn't a smi.

  // Handle the case where the objects are identical.  Either returns the answer
  // or goes to slow.  Only falls through if the objects were not identical.
525
  EmitIdenticalObjectComparison(masm, &slow, cc);
526 527 528 529

  // If either is a Smi (we know that not both are), then they can only
  // be strictly equal if the other is a HeapNumber.
  STATIC_ASSERT(kSmiTag == 0);
530
  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
531
  __ And(t2, lhs, Operand(rhs));
532 533 534 535 536 537 538 539 540
  __ JumpIfNotSmi(t2, &not_smis, t0);
  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
  // 1) Return the answer.
  // 2) Go to slow.
  // 3) Fall through to both_loaded_as_doubles.
  // 4) Jump to rhs_not_nan.
  // In cases 3 and 4 we have found out we were dealing with a number-number
  // comparison and the numbers have been loaded into f12 and f14 as doubles,
  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
541 542
  EmitSmiNonsmiComparison(masm, lhs, rhs,
                          &both_loaded_as_doubles, &slow, strict());
543 544 545 546 547

  __ bind(&both_loaded_as_doubles);
  // f12, f14 are the double representations of the left hand side
  // and the right hand side if we have FPU. Otherwise a2, a3 represent
  // left hand side and a0, a1 represent right hand side.
548 549 550 551 552 553 554 555 556 557
  Label nan;
  __ li(t0, Operand(LESS));
  __ li(t1, Operand(GREATER));
  __ li(t2, Operand(EQUAL));

  // Check if either rhs or lhs is NaN.
  __ BranchF(NULL, &nan, eq, f12, f14);

  // Check if LESS condition is satisfied. If true, move conditionally
  // result to v0.
558 559 560
  if (!IsMipsArchVariant(kMips32r6)) {
    __ c(OLT, D, f12, f14);
    __ Movt(v0, t0);
561
    // Use previous check to store conditionally to v0 opposite condition
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
    // (GREATER). If rhs is equal to lhs, this will be corrected in next
    // check.
    __ Movf(v0, t1);
    // Check if EQUAL condition is satisfied. If true, move conditionally
    // result to v0.
    __ c(EQ, D, f12, f14);
    __ Movt(v0, t2);
  } else {
    Label skip;
    __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
    __ mov(v0, t0);  // Return LESS as result.

    __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
    __ mov(v0, t2);  // Return EQUAL as result.

    __ mov(v0, t1);  // Return GREATER as result.
    __ bind(&skip);
  }
580

581
  __ Ret();
582

583 584 585
  __ bind(&nan);
  // NaN comparisons always fail.
  // Load whatever we need in v0 to make the comparison fail.
586
  DCHECK(is_int16(GREATER) && is_int16(LESS));
587
  __ Ret(USE_DELAY_SLOT);
588 589
  if (cc == lt || cc == le) {
    __ li(v0, Operand(GREATER));
590
  } else {
591
    __ li(v0, Operand(LESS));
592
  }
593

594 595 596 597

  __ bind(&not_smis);
  // At this point we know we are dealing with two different objects,
  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
598
  if (strict()) {
599 600
    // This returns non-equal for some object types, or falls through if it
    // was not lucky.
601
    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
602 603
  }

604
  Label check_for_internalized_strings;
605 606 607
  Label flat_string_check;
  // Check for heap-number-heap-number comparison. Can jump to slow case,
  // or load both doubles and jump to the code that handles
608 609
  // that case. If the inputs are not doubles then jumps to
  // check_for_internalized_strings.
610 611
  // In this case a2 will contain the type of lhs_.
  EmitCheckForTwoHeapNumbers(masm,
612 613
                             lhs,
                             rhs,
614
                             &both_loaded_as_doubles,
615
                             &check_for_internalized_strings,
616 617
                             &flat_string_check);

618
  __ bind(&check_for_internalized_strings);
619
  if (cc == eq && !strict()) {
620 621
    // Returns an answer for two internalized strings or two
    // detectable objects.
622 623
    // Otherwise jumps to string case or not both strings case.
    // Assumes that a2 is the type of lhs_ on entry.
624 625
    EmitCheckForInternalizedStringsOrObjects(
        masm, lhs, rhs, &flat_string_check, &slow);
626 627
  }

628 629
  // Check for both being sequential one-byte strings,
  // and inline if that is the case.
630 631
  __ bind(&flat_string_check);

632
  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
633

634 635
  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
                      a3);
636
  if (cc == eq) {
637
    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
638
  } else {
639 640
    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
                                                    t1);
641 642 643 644
  }
  // Never falls through to here.

  __ bind(&slow);
645
  if (cc == eq) {
646 647
    {
      FrameScope scope(masm, StackFrame::INTERNAL);
648
      __ Push(cp);
649 650
      __ Call(strict() ? BUILTIN_CODE(isolate(), StrictEqual)
                       : BUILTIN_CODE(isolate(), Equal),
651 652
              RelocInfo::CODE_TARGET);
      __ Pop(cp);
653 654 655 656 657 658
    }
    // Turn true into 0 and false into some non-zero value.
    STATIC_ASSERT(EQUAL == 0);
    __ LoadRoot(a0, Heap::kTrueValueRootIndex);
    __ Ret(USE_DELAY_SLOT);
    __ subu(v0, v0, a0);  // In delay slot.
659
  } else {
660 661 662
    // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
    // a1 (rhs) second.
    __ Push(lhs, rhs);
663 664 665
    int ncr;  // NaN compare result.
    if (cc == lt || cc == le) {
      ncr = GREATER;
666
    } else {
667 668
      DCHECK(cc == gt || cc == ge);  // Remaining cases.
      ncr = LESS;
669
    }
670 671
    __ li(a0, Operand(Smi::FromInt(ncr)));
    __ push(a0);
672

673 674
    // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
    // tagged as a small integer.
675
    __ TailCallRuntime(Runtime::kCompare);
676
  }
677 678 679

  __ bind(&miss);
  GenerateMiss(masm);
680 681 682
}


683 684 685
void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
  __ mov(t9, ra);
  __ pop(ra);
686
  __ PushSafepointRegisters();
687 688 689 690 691 692 693
  __ Jump(t9);
}


void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
  __ mov(t9, ra);
  __ pop(ra);
694
  __ PopSafepointRegisters();
695 696 697 698
  __ Jump(t9);
}


699 700 701 702 703
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
  // We don't allow a GC during a store buffer overflow so there is no need to
  // store the registers in any particular way, but we do have to store and
  // restore them.
  __ MultiPush(kJSCallerSaved | ra.bit());
704
  if (save_doubles()) {
705 706 707 708 709 710 711 712
    __ MultiPushFPU(kCallerSavedFPU);
  }
  const int argument_count = 1;
  const int fp_argument_count = 0;
  const Register scratch = a1;

  AllowExternalCallThatCantCauseGC scope(masm);
  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
713
  __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
714
  __ CallCFunction(
715
      ExternalReference::store_buffer_overflow_function(isolate()),
716
      argument_count);
717
  if (save_doubles()) {
718 719 720 721 722 723 724 725
    __ MultiPopFPU(kCallerSavedFPU);
  }

  __ MultiPop(kJSCallerSaved | ra.bit());
  __ Ret();
}


726
void MathPowStub::Generate(MacroAssembler* masm) {
727 728
  const Register exponent = MathPowTaggedDescriptor::exponent();
  DCHECK(exponent.is(a2));
729 730 731 732 733 734 735 736
  const DoubleRegister double_base = f2;
  const DoubleRegister double_exponent = f4;
  const DoubleRegister double_result = f0;
  const DoubleRegister double_scratch = f6;
  const FPURegister single_scratch = f8;
  const Register scratch = t5;
  const Register scratch2 = t3;

737
  Label call_runtime, done, int_exponent;
738
  if (exponent_type() == TAGGED) {
739
    // Base is already in double_base.
740
    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
741

742
    __ Ldc1(double_exponent,
743 744 745
            FieldMemOperand(exponent, HeapNumber::kValueOffset));
  }

746
  if (exponent_type() != INTEGER) {
747 748 749 750
    Label int_exponent_convert;
    // Detect integer exponents stored as double.
    __ EmitFPUTruncate(kRoundToMinusInf,
                       scratch,
751 752 753
                       double_exponent,
                       at,
                       double_scratch,
754 755 756 757 758
                       scratch2,
                       kCheckForInexactConversion);
    // scratch2 == 0 means there was no conversion error.
    __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));

759
    __ push(ra);
760 761
    {
      AllowExternalCallThatCantCauseGC scope(masm);
762
      __ PrepareCallCFunction(0, 2, scratch2);
763
      __ MovToFloatParameters(double_base, double_exponent);
764
      __ CallCFunction(
765
          ExternalReference::power_double_double_function(isolate()),
766
          0, 2);
767
    }
768
    __ pop(ra);
769
    __ MovFromFloatResult(double_result);
770 771 772 773 774 775 776 777
    __ jmp(&done);

    __ bind(&int_exponent_convert);
  }

  // Calculate power with integer exponent.
  __ bind(&int_exponent);

778
  // Get two copies of exponent in the registers scratch and exponent.
779
  if (exponent_type() == INTEGER) {
780 781 782 783 784 785
    __ mov(scratch, exponent);
  } else {
    // Exponent has previously been stored into scratch as untagged integer.
    __ mov(exponent, scratch);
  }

786 787 788 789
  __ mov_d(double_scratch, double_base);  // Back up base.
  __ Move(double_result, 1.0);

  // Get absolute value of exponent.
790
  Label positive_exponent, bail_out;
791 792
  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
  __ Subu(scratch, zero_reg, scratch);
793 794 795
  // Check when Subu overflows and we get negative result
  // (happens only when input is MIN_INT).
  __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
796
  __ bind(&positive_exponent);
797
  __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825

  Label while_true, no_carry, loop_end;
  __ bind(&while_true);

  __ And(scratch2, scratch, 1);

  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
  __ mul_d(double_result, double_result, double_scratch);
  __ bind(&no_carry);

  __ sra(scratch, scratch, 1);

  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
  __ mul_d(double_scratch, double_scratch, double_scratch);

  __ Branch(&while_true);

  __ bind(&loop_end);

  __ Branch(&done, ge, exponent, Operand(zero_reg));
  __ Move(double_scratch, 1.0);
  __ div_d(double_result, double_scratch, double_result);
  // Test whether result is zero.  Bail out to check for subnormal result.
  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);

  // double_exponent may not contain the exponent value if the input was a
  // smi.  We set it with exponent value before bailing out.
826
  __ bind(&bail_out);
827 828 829 830
  __ mtc1(exponent, single_scratch);
  __ cvt_d_w(double_exponent, single_scratch);

  // Returning or bailing out.
831 832 833 834 835 836 837
  __ push(ra);
  {
    AllowExternalCallThatCantCauseGC scope(masm);
    __ PrepareCallCFunction(0, 2, scratch);
    __ MovToFloatParameters(double_base, double_exponent);
    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
                     0, 2);
838
  }
839 840
  __ pop(ra);
  __ MovFromFloatResult(double_result);
841

842 843 844
  __ bind(&done);
  __ Ret();
}
845

846 847 848 849 850
bool CEntryStub::NeedsImmovableCode() {
  return true;
}


851 852 853
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
  CEntryStub::GenerateAheadOfTime(isolate);
  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
854
  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
855
  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
856
  CreateWeakCellStub::GenerateAheadOfTime(isolate);
857 858
  StoreRegistersStateStub::GenerateAheadOfTime(isolate);
  RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
859
  StoreFastElementStub::GenerateAheadOfTime(isolate);
860 861 862
}


863 864 865
void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
  StoreRegistersStateStub stub(isolate);
  stub.GetCode();
866 867 868
}


869 870 871
void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
  RestoreRegistersStateStub stub(isolate);
  stub.GetCode();
872 873 874
}


875
void CodeStub::GenerateFPStubs(Isolate* isolate) {
876
  // Generate if not already in cache.
877
  SaveFPRegsMode mode = kSaveFPRegs;
878 879
  CEntryStub(isolate, 1, mode).GetCode();
  StoreBufferOverflowStub(isolate, mode).GetCode();
880 881 882
}


883
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
884
  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
885
  stub.GetCode();
886 887
  CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
  save_doubles.GetCode();
888 889 890
}


891 892
void CEntryStub::Generate(MacroAssembler* masm) {
  // Called from JavaScript; parameters are on stack as if calling JS function
893 894
  // a0: number of arguments including receiver
  // a1: pointer to builtin function
895 896 897
  // fp: frame pointer    (restored after C call)
  // sp: stack pointer    (restored as callee's sp after C call)
  // cp: current context  (C callee-saved)
898 899 900
  //
  // If argv_in_register():
  // a2: pointer to the first argument
901

902
  ProfileEntryHookStub::MaybeCallEntryHook(masm);
903

904 905 906 907 908
  if (argv_in_register()) {
    // Move argv into the correct register.
    __ mov(s1, a2);
  } else {
    // Compute the argv pointer in a callee-saved register.
909
    __ Lsa(s1, sp, a0, kPointerSizeLog2);
910 911
    __ Subu(s1, s1, kPointerSize);
  }
912 913 914

  // Enter the exit frame that transitions from JavaScript to C++.
  FrameScope scope(masm, StackFrame::MANUAL);
915 916 917
  __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
                                           ? StackFrame::BUILTIN_EXIT
                                           : StackFrame::EXIT);
918 919 920 921 922

  // s0: number of arguments  including receiver (C callee-saved)
  // s1: pointer to first argument (C callee-saved)
  // s2: pointer to builtin function (C callee-saved)

923 924
  // Prepare arguments for C routine.
  // a0 = argc
925 926
  __ mov(s0, a0);
  __ mov(s2, a1);
927 928 929 930 931 932

  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
  // also need to reserve the 4 argument slots on the stack.

  __ AssertStackIsAligned();

933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
  int frame_alignment_mask = frame_alignment - 1;
  int result_stack_size;
  if (result_size() <= 2) {
    // a0 = argc, a1 = argv, a2 = isolate
    __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
    __ mov(a1, s1);
    result_stack_size = 0;
  } else {
    DCHECK_EQ(3, result_size());
    // Allocate additional space for the result.
    result_stack_size =
        ((result_size() * kPointerSize) + frame_alignment_mask) &
        ~frame_alignment_mask;
    __ Subu(sp, sp, Operand(result_stack_size));

    // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
    __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
    __ mov(a2, s1);
    __ mov(a1, a0);
    __ mov(a0, sp);
  }
955

956 957 958 959
  // To let the GC traverse the return address of the exit frames, we need to
  // know where the return address is. The CEntryStub is unmovable, so
  // we can store the address on the stack to be able to find it again and
  // we never have to restore it, because it will not change.
960
  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
961
    int kNumInstructionsToJump = 4;
962 963 964 965
    Label find_ra;
    // Adjust the value in ra to point to the correct return location, 2nd
    // instruction past the real call into C code (the jalr(t9)), and push it.
    // This is the return address of the exit frame.
966 967 968 969 970 971 972 973 974 975
    if (kArchVariant >= kMips32r6) {
      __ addiupc(ra, kNumInstructionsToJump + 1);
    } else {
      // This branch-and-link sequence is needed to find the current PC on mips
      // before r6, saved to the ra register.
      __ bal(&find_ra);  // bal exposes branch delay slot.
      __ Addu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
    }
    __ bind(&find_ra);

976 977
    // This spot was reserved in EnterExitFrame.
    __ sw(ra, MemOperand(sp, result_stack_size));
978
    // Stack space reservation moved to the branch delay slot below.
979 980 981
    // Stack is still aligned.

    // Call the C routine.
982 983
    __ mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
    __ jalr(t9);
984
    // Set up sp in the delay slot.
985
    __ addiu(sp, sp, -kCArgsSlotsSize);
986
    // Make sure the stored 'ra' points to this position.
987
    DCHECK_EQ(kNumInstructionsToJump,
988 989
              masm->InstructionsGeneratedSince(&find_ra));
  }
990 991 992 993 994 995 996 997
  if (result_size() > 2) {
    DCHECK_EQ(3, result_size());
    // Read result values stored on stack.
    __ lw(a0, MemOperand(v0, 2 * kPointerSize));
    __ lw(v1, MemOperand(v0, 1 * kPointerSize));
    __ lw(v0, MemOperand(v0, 0 * kPointerSize));
  }
  // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
998

999 1000 1001 1002 1003 1004 1005 1006 1007
  // Check result for exception sentinel.
  Label exception_returned;
  __ LoadRoot(t0, Heap::kExceptionRootIndex);
  __ Branch(&exception_returned, eq, t0, Operand(v0));

  // Check that there is no pending exception, otherwise we
  // should have returned the exception sentinel.
  if (FLAG_debug_code) {
    Label okay;
1008
    ExternalReference pending_exception_address(
1009
        IsolateAddressId::kPendingExceptionAddress, isolate());
1010 1011 1012 1013 1014 1015 1016 1017
    __ li(a2, Operand(pending_exception_address));
    __ lw(a2, MemOperand(a2));
    __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
    // Cannot use check here as it attempts to generate call into runtime.
    __ Branch(&okay, eq, t0, Operand(a2));
    __ stop("Unexpected pending exception");
    __ bind(&okay);
  }
1018 1019 1020 1021 1022

  // Exit C frame and return.
  // v0:v1: result
  // sp: stack pointer
  // fp: frame pointer
1023 1024 1025 1026 1027 1028 1029 1030 1031
  Register argc;
  if (argv_in_register()) {
    // We don't want to pop arguments so set argc to no_reg.
    argc = no_reg;
  } else {
    // s0: still holds argc (callee-saved).
    argc = s0;
  }
  __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
1032

1033 1034
  // Handling of exception.
  __ bind(&exception_returned);
1035 1036

  ExternalReference pending_handler_context_address(
1037
      IsolateAddressId::kPendingHandlerContextAddress, isolate());
1038
  ExternalReference pending_handler_code_address(
1039
      IsolateAddressId::kPendingHandlerCodeAddress, isolate());
1040
  ExternalReference pending_handler_offset_address(
1041
      IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
1042
  ExternalReference pending_handler_fp_address(
1043
      IsolateAddressId::kPendingHandlerFPAddress, isolate());
1044
  ExternalReference pending_handler_sp_address(
1045
      IsolateAddressId::kPendingHandlerSPAddress, isolate());
1046 1047 1048

  // Ask the runtime for help to determine the handler. This will set v0 to
  // contain the current pending exception, don't clobber it.
1049 1050
  ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
                                 isolate());
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
  {
    FrameScope scope(masm, StackFrame::MANUAL);
    __ PrepareCallCFunction(3, 0, a0);
    __ mov(a0, zero_reg);
    __ mov(a1, zero_reg);
    __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
    __ CallCFunction(find_handler, 3);
  }

  // Retrieve the handler context, SP and FP.
  __ li(cp, Operand(pending_handler_context_address));
  __ lw(cp, MemOperand(cp));
  __ li(sp, Operand(pending_handler_sp_address));
  __ lw(sp, MemOperand(sp));
  __ li(fp, Operand(pending_handler_fp_address));
  __ lw(fp, MemOperand(fp));

1068 1069
  // If the handler is a JS frame, restore the context to the frame. Note that
  // the context will be set to (cp == 0) for non-JS frames.
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
  Label zero;
  __ Branch(&zero, eq, cp, Operand(zero_reg));
  __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
  __ bind(&zero);

  // Compute the handler entry address and jump to it.
  __ li(a1, Operand(pending_handler_code_address));
  __ lw(a1, MemOperand(a1));
  __ li(a2, Operand(pending_handler_offset_address));
  __ lw(a2, MemOperand(a2));
  __ Addu(t9, a1, a2);
1081
  __ Jump(t9, Code::kHeaderSize - kHeapObjectTag);
1082 1083 1084
}


1085
void JSEntryStub::Generate(MacroAssembler* masm) {
1086
  Label invoke, handler_entry, exit;
1087
  Isolate* isolate = masm->isolate();
1088 1089 1090 1091

  // Registers:
  // a0: entry address
  // a1: function
1092
  // a2: receiver
1093 1094 1095 1096 1097 1098
  // a3: argc
  //
  // Stack:
  // 4 args slots
  // args

1099 1100
  ProfileEntryHookStub::MaybeCallEntryHook(masm);

1101
  // Save callee saved registers on the stack.
1102
  __ MultiPush(kCalleeSaved | ra.bit());
1103

1104 1105 1106 1107
  // Save callee-saved FPU registers.
  __ MultiPushFPU(kCalleeSavedFPU);
  // Set up the reserved register for 0.0.
  __ Move(kDoubleRegZero, 0.0);
1108

1109

1110
  // Load argv in s0 register.
1111
  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1112
  offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1113

1114
  __ InitializeRootRegister();
1115
  __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1116 1117 1118

  // We build an EntryFrame.
  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1119 1120 1121
  StackFrame::Type marker = type();
  __ li(t2, Operand(StackFrame::TypeToMarker(marker)));
  __ li(t1, Operand(StackFrame::TypeToMarker(marker)));
1122
  __ li(t0, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
1123
                                      isolate)));
1124 1125
  __ lw(t0, MemOperand(t0));
  __ Push(t3, t2, t1, t0);
1126
  // Set up frame pointer for the frame to be pushed.
1127 1128 1129 1130 1131
  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);

  // Registers:
  // a0: entry_address
  // a1: function
1132
  // a2: receiver_pointer
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
  // a3: argc
  // s0: argv
  //
  // Stack:
  // caller fp          |
  // function slot      | entry frame
  // context slot       |
  // bad fp (0xff...f)  |
  // callee saved registers + ra
  // 4 args slots
  // args

1145 1146
  // If this is the outermost JS call, set js_entry_sp value.
  Label non_outermost_js;
1147
  ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate);
1148 1149 1150 1151
  __ li(t1, Operand(ExternalReference(js_entry_sp)));
  __ lw(t2, MemOperand(t1));
  __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
  __ sw(fp, MemOperand(t1));
1152
  __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
1153 1154 1155 1156
  Label cont;
  __ b(&cont);
  __ nop();   // Branch delay slot nop.
  __ bind(&non_outermost_js);
1157
  __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME));
1158 1159
  __ bind(&cont);
  __ push(t0);
1160

1161 1162 1163 1164 1165 1166 1167
  // Jump to a faked try block that does the invoke, with a faked catch
  // block that sets the pending exception.
  __ jmp(&invoke);
  __ bind(&handler_entry);
  handler_offset_ = handler_entry.pos();
  // Caught exception: Store result (exception) in the pending exception
  // field in the JSEnv and return a failure sentinel.  Coming in here the
1168
  // fp will be invalid because the PushStackHandler below sets it to 0 to
1169
  // signal the existence of the JSEntry frame.
1170 1171
  __ li(t0, Operand(ExternalReference(
                IsolateAddressId::kPendingExceptionAddress, isolate)));
1172
  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
1173
  __ LoadRoot(v0, Heap::kExceptionRootIndex);
1174 1175 1176
  __ b(&exit);  // b exposes branch delay slot.
  __ nop();   // Branch delay slot nop.

1177
  // Invoke: Link this frame into the handler chain.
1178
  __ bind(&invoke);
1179
  __ PushStackHandler();
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
  // If an exception not caught by another handler occurs, this handler
  // returns control to the code after the bal(&invoke) above, which
  // restores all kCalleeSaved registers (including cp and fp) to their
  // saved values before returning a failure to C.

  // Invoke the function by calling through JS entry trampoline builtin.
  // Notice that we cannot store a reference to the trampoline code directly in
  // this stub, because runtime stubs are not traversed when doing GC.

  // Registers:
  // a0: entry_address
  // a1: function
1192
  // a2: receiver_pointer
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
  // a3: argc
  // s0: argv
  //
  // Stack:
  // handler frame
  // entry frame
  // callee saved registers + ra
  // 4 args slots
  // args

1203
  if (type() == StackFrame::CONSTRUCT_ENTRY) {
1204 1205
    __ Call(BUILTIN_CODE(isolate, JSConstructEntryTrampoline),
            RelocInfo::CODE_TARGET);
1206
  } else {
1207
    __ Call(BUILTIN_CODE(isolate, JSEntryTrampoline), RelocInfo::CODE_TARGET);
1208 1209
  }

1210
  // Unlink this frame from the handler chain.
1211
  __ PopStackHandler();
1212

1213
  __ bind(&exit);  // v0 holds result
1214 1215 1216
  // Check if the current stack frame is marked as the outermost JS frame.
  Label non_outermost_js_2;
  __ pop(t1);
1217 1218
  __ Branch(&non_outermost_js_2, ne, t1,
            Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
1219 1220 1221
  __ li(t1, Operand(ExternalReference(js_entry_sp)));
  __ sw(zero_reg, MemOperand(t1));
  __ bind(&non_outermost_js_2);
1222 1223 1224

  // Restore the top frame descriptors from the stack.
  __ pop(t1);
1225
  __ li(t0, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
1226
                                      isolate)));
1227 1228 1229 1230 1231
  __ sw(t1, MemOperand(t0));

  // Reset the stack to the callee saved registers.
  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);

1232 1233
  // Restore callee-saved fpu registers.
  __ MultiPopFPU(kCalleeSavedFPU);
1234

1235
  // Restore callee saved registers from the stack.
1236
  __ MultiPop(kCalleeSaved | ra.bit());
1237 1238
  // Return.
  __ Jump(ra);
1239 1240 1241
}


1242
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1243
  // a0 : number of arguments to the construct function
1244
  // a2 : feedback vector
1245 1246 1247
  // a3 : slot in feedback vector (Smi)
  // a1 : the function to call
  FrameScope scope(masm, StackFrame::INTERNAL);
1248 1249 1250
  const RegList kSavedRegs = 1 << 4 |  // a0
                             1 << 5 |  // a1
                             1 << 6 |  // a2
1251 1252
                             1 << 7 |  // a3
                             1 << cp.code();
1253

1254
  // Number-of-arguments register must be smi-tagged to call out.
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
  __ SmiTag(a0);
  __ MultiPush(kSavedRegs);

  __ CallStub(stub);

  __ MultiPop(kSavedRegs);
  __ SmiUntag(a0);
}


1265
static void GenerateRecordCallTarget(MacroAssembler* masm) {
1266
  // Cache the called function in a feedback vector slot.  Cache states
1267 1268
  // are uninitialized, monomorphic (indicated by a JSFunction), and
  // megamorphic.
1269
  // a0 : number of arguments to the construct function
1270
  // a1 : the function to call
1271
  // a2 : feedback vector
1272
  // a3 : slot in feedback vector (Smi)
1273
  Label initialize, done, miss, megamorphic, not_array_function;
1274

1275
  DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
1276
            masm->isolate()->heap()->megamorphic_symbol());
1277
  DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
1278
            masm->isolate()->heap()->uninitialized_symbol());
1279

1280
  // Load the cache state into t2.
1281
  __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1282
  __ lw(t2, FieldMemOperand(t2, FeedbackVector::kFeedbackSlotsOffset));
1283 1284 1285

  // A monomorphic cache hit or an already megamorphic state: invoke the
  // function without changing the state.
1286
  // We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at
1287
  // this position in a symbol (see static asserts in feedback-vector.h).
1288 1289 1290
  Label check_allocation_site;
  Register feedback_map = t1;
  Register weak_value = t4;
1291
  __ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
1292
  __ Branch(&done, eq, a1, Operand(weak_value));
1293
  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1294 1295
  __ Branch(&done, eq, t2, Operand(at));
  __ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
1296
  __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
1297
  __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
1298

1299
  // If the weak cell is cleared, we have a new chance to become monomorphic.
1300 1301
  __ JumpIfSmi(weak_value, &initialize);
  __ jmp(&megamorphic);
1302

1303 1304 1305 1306 1307 1308 1309 1310 1311
  __ bind(&check_allocation_site);
  // If we came here, we need to see if we are the array function.
  // If we didn't have a matching function, and we didn't find the megamorph
  // sentinel, then we have in the slot either some other function or an
  // AllocationSite.
  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
  __ Branch(&miss, ne, feedback_map, Operand(at));

  // Make sure the function is the Array() function
1312
  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
1313
  __ Branch(&megamorphic, ne, a1, Operand(t2));
1314
  __ jmp(&done);
1315

1316
  __ bind(&miss);
1317

1318 1319
  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
  // megamorphic.
1320
  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
1321
  __ Branch(&initialize, eq, t2, Operand(at));
1322 1323
  // MegamorphicSentinel is an immortal immovable object (undefined) so no
  // write-barrier is needed.
1324
  __ bind(&megamorphic);
1325
  __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1326
  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1327
  __ sw(at, FieldMemOperand(t2, FeedbackVector::kFeedbackSlotsOffset));
1328 1329
  __ jmp(&done);

1330
  // An uninitialized cache is patched with the function.
1331
  __ bind(&initialize);
1332
  // Make sure the function is the Array() function.
1333
  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
1334 1335 1336 1337 1338 1339
  __ Branch(&not_array_function, ne, a1, Operand(t2));

  // The target function is the Array constructor,
  // Create an AllocationSite if we don't already have it, store it in the
  // slot.
  CreateAllocationSiteStub create_stub(masm->isolate());
1340
  CallStubInRecordCallTarget(masm, &create_stub);
1341
  __ Branch(&done);
1342 1343 1344

  __ bind(&not_array_function);
  CreateWeakCellStub weak_cell_stub(masm->isolate());
1345
  CallStubInRecordCallTarget(masm, &weak_cell_stub);
1346

1347
  __ bind(&done);
1348

1349
  // Increment the call count for all function calls.
1350
  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1351 1352
  __ lw(t0, FieldMemOperand(
                at, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
1353
  __ Addu(t0, t0, Operand(Smi::FromInt(1)));
1354 1355
  __ sw(t0, FieldMemOperand(
                at, FeedbackVector::kFeedbackSlotsOffset + kPointerSize));
1356 1357 1358
}


1359
void CallConstructStub::Generate(MacroAssembler* masm) {
1360 1361
  // a0 : number of arguments
  // a1 : the function to call
1362
  // a2 : feedback vector
1363
  // a3 : slot in feedback vector (Smi, for RecordCallTarget)
1364

1365
  Label non_function;
1366
  // Check that the function is not a smi.
1367
  __ JumpIfSmi(a1, &non_function);
1368
  // Check that the function is a JSFunction.
1369
  __ GetObjectType(a1, t1, t1);
1370
  __ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE));
1371

1372 1373
  GenerateRecordCallTarget(masm);

1374
  __ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize);
1375 1376
  Label feedback_register_initialized;
  // Put the AllocationSite from the feedback vector into a2, or undefined.
1377
  __ lw(a2, FieldMemOperand(t1, FeedbackVector::kFeedbackSlotsOffset));
1378 1379 1380 1381 1382 1383 1384
  __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
  __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
  __ bind(&feedback_register_initialized);

  __ AssertUndefinedOrAllocationSite(a2, t1);
1385

1386
  // Pass function as new target.
1387
  __ mov(a3, a1);
1388

1389 1390 1391 1392
  // Tail call to the function-specific construct stub (still in the caller
  // context at this point).
  __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
  __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
1393
  __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
1394

1395 1396
  __ bind(&non_function);
  __ mov(a3, a1);
1397
  __ Jump(BUILTIN_CODE(isolate(), Construct), RelocInfo::CODE_TARGET);
1398 1399
}

1400
// StringCharCodeAtGenerator.
1401
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
1402 1403 1404
  DCHECK(!t0.is(index_));
  DCHECK(!t0.is(result_));
  DCHECK(!t0.is(object_));
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
  if (check_mode_ == RECEIVER_IS_UNKNOWN) {
    // If the receiver is a smi trigger the non-string case.
    __ JumpIfSmi(object_, receiver_not_string_);

    // Fetch the instance type of the receiver into result register.
    __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
    __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
    // If the receiver is not a string trigger the non-string case.
    __ And(t0, result_, Operand(kIsNotStringMask));
    __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
  }
1416 1417 1418 1419 1420 1421 1422 1423

  // If the index is non-smi trigger the non-smi case.
  __ JumpIfNotSmi(index_, &index_not_smi_);

  __ bind(&got_smi_index_);

  // Check for index out of range.
  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
1424
  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
1425

1426
  __ sra(index_, index_, kSmiTagSize);
1427

1428 1429 1430 1431 1432
  StringCharLoadGenerator::Generate(masm,
                                    object_,
                                    index_,
                                    result_,
                                    &call_runtime_);
1433 1434 1435

  __ sll(result_, result_, kSmiTagSize);
  __ bind(&exit_);
1436 1437 1438 1439
}


void StringCharCodeAtGenerator::GenerateSlow(
1440
    MacroAssembler* masm, EmbedMode embed_mode,
1441
    const RuntimeCallHelper& call_helper) {
1442
  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
1443 1444 1445 1446 1447

  // Index is not a smi.
  __ bind(&index_not_smi_);
  // If index is a heap number, try converting it to an integer.
  __ CheckMap(index_,
1448
              result_,
1449 1450
              Heap::kHeapNumberMapRootIndex,
              index_not_number_,
1451
              DONT_DO_SMI_CHECK);
1452 1453
  call_helper.BeforeCall(masm);
  // Consumed by runtime conversion function:
1454
  if (embed_mode == PART_OF_IC_HANDLER) {
1455 1456
    __ Push(LoadWithVectorDescriptor::VectorRegister(),
            LoadWithVectorDescriptor::SlotRegister(), object_, index_);
1457 1458 1459
  } else {
    __ Push(object_, index_);
  }
1460
  __ CallRuntime(Runtime::kNumberToSmi);
1461 1462 1463

  // Save the conversion result before the pop instructions below
  // have a chance to overwrite it.
1464
  __ Move(index_, v0);
1465
  if (embed_mode == PART_OF_IC_HANDLER) {
1466 1467
    __ Pop(LoadWithVectorDescriptor::VectorRegister(),
           LoadWithVectorDescriptor::SlotRegister(), object_);
1468 1469 1470
  } else {
    __ pop(object_);
  }
1471 1472 1473 1474 1475
  // Reload the instance type.
  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
  call_helper.AfterCall(masm);
  // If index is still not a smi, it must be out of range.
1476
  __ JumpIfNotSmi(index_, index_out_of_range_);
1477 1478 1479 1480 1481 1482 1483 1484
  // Otherwise, return to the fast path.
  __ Branch(&got_smi_index_);

  // Call runtime. We get here when the receiver is a string and the
  // index is a number, but the code of getting the actual character
  // is too complex (e.g., when the string needs to be flattened).
  __ bind(&call_runtime_);
  call_helper.BeforeCall(masm);
1485
  __ sll(index_, index_, kSmiTagSize);
1486
  __ Push(object_, index_);
1487
  __ CallRuntime(Runtime::kStringCharCodeAtRT);
1488 1489 1490 1491 1492 1493

  __ Move(result_, v0);

  call_helper.AfterCall(masm);
  __ jmp(&exit_);

1494
  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
1495 1496
}

1497 1498 1499
void StringHelper::GenerateFlatOneByteStringEquals(
    MacroAssembler* masm, Register left, Register right, Register scratch1,
    Register scratch2, Register scratch3) {
1500 1501 1502 1503 1504 1505 1506 1507
  Register length = scratch1;

  // Compare lengths.
  Label strings_not_equal, check_zero_length;
  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
  __ bind(&strings_not_equal);
1508
  DCHECK(is_int16(NOT_EQUAL));
1509
  __ Ret(USE_DELAY_SLOT);
1510 1511 1512 1513 1514 1515 1516
  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));

  // Check if the length is zero.
  Label compare_chars;
  __ bind(&check_zero_length);
  STATIC_ASSERT(kSmiTag == 0);
  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
1517
  DCHECK(is_int16(EQUAL));
1518
  __ Ret(USE_DELAY_SLOT);
1519 1520 1521 1522 1523
  __ li(v0, Operand(Smi::FromInt(EQUAL)));

  // Compare characters.
  __ bind(&compare_chars);

1524 1525
  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
                                  v0, &strings_not_equal);
1526 1527

  // Characters are equal.
1528
  __ Ret(USE_DELAY_SLOT);
1529
  __ li(v0, Operand(Smi::FromInt(EQUAL)));
1530 1531 1532
}


1533
void StringHelper::GenerateCompareFlatOneByteStrings(
1534 1535
    MacroAssembler* masm, Register left, Register right, Register scratch1,
    Register scratch2, Register scratch3, Register scratch4) {
1536 1537 1538 1539 1540 1541 1542
  Label result_not_equal, compare_lengths;
  // Find minimum length and length difference.
  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
  __ Subu(scratch3, scratch1, Operand(scratch2));
  Register length_delta = scratch3;
  __ slt(scratch4, scratch2, scratch1);
1543
  __ Movn(scratch1, scratch2, scratch4);
1544 1545 1546 1547 1548
  Register min_length = scratch1;
  STATIC_ASSERT(kSmiTag == 0);
  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));

  // Compare loop.
1549 1550
  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
                                  scratch4, v0, &result_not_equal);
1551 1552 1553

  // Compare lengths - strings up to min-length are equal.
  __ bind(&compare_lengths);
1554
  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
  // Use length_delta as result if it's zero.
  __ mov(scratch2, length_delta);
  __ mov(scratch4, zero_reg);
  __ mov(v0, zero_reg);

  __ bind(&result_not_equal);
  // Conditionally update the result based either on length_delta or
  // the last comparion performed in the loop above.
  Label ret;
  __ Branch(&ret, eq, scratch2, Operand(scratch4));
  __ li(v0, Operand(Smi::FromInt(GREATER)));
  __ Branch(&ret, gt, scratch2, Operand(scratch4));
  __ li(v0, Operand(Smi::FromInt(LESS)));
  __ bind(&ret);
  __ Ret();
}


1573
void StringHelper::GenerateOneByteCharsCompareLoop(
1574 1575
    MacroAssembler* masm, Register left, Register right, Register length,
    Register scratch1, Register scratch2, Register scratch3,
1576 1577 1578 1579 1580 1581
    Label* chars_not_equal) {
  // Change index to run from -length to -1 by adding length to string
  // start. This means that loop ends when index reaches zero, which
  // doesn't need an additional compare.
  __ SmiUntag(length);
  __ Addu(scratch1, length,
1582
          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
  __ Addu(left, left, Operand(scratch1));
  __ Addu(right, right, Operand(scratch1));
  __ Subu(length, zero_reg, length);
  Register index = length;  // index = -length;


  // Compare loop.
  Label loop;
  __ bind(&loop);
  __ Addu(scratch3, left, index);
  __ lbu(scratch1, MemOperand(scratch3));
  __ Addu(scratch3, right, index);
  __ lbu(scratch2, MemOperand(scratch3));
  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
  __ Addu(index, index, 1);
  __ Branch(&loop, ne, index, Operand(zero_reg));
1599 1600 1601
}


1602 1603 1604 1605 1606 1607
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
  DCHECK_EQ(CompareICState::BOOLEAN, state());
  Label miss;

  __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
  __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
1608 1609 1610 1611 1612
  if (!Token::IsEqualityOp(op())) {
    __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
    __ AssertSmi(a1);
    __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
    __ AssertSmi(a0);
1613
  }
1614 1615
  __ Ret(USE_DELAY_SLOT);
  __ Subu(v0, a1, a0);
1616 1617 1618 1619 1620 1621

  __ bind(&miss);
  GenerateMiss(masm);
}


1622
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
1623
  DCHECK(state() == CompareICState::SMI);
1624 1625 1626 1627 1628 1629
  Label miss;
  __ Or(a2, a1, a0);
  __ JumpIfNotSmi(a2, &miss);

  if (GetCondition() == eq) {
    // For equality we do not care about the sign of the result.
1630
    __ Ret(USE_DELAY_SLOT);
1631 1632 1633 1634 1635
    __ Subu(v0, a0, a1);
  } else {
    // Untag before subtracting to avoid handling overflow.
    __ SmiUntag(a1);
    __ SmiUntag(a0);
1636
    __ Ret(USE_DELAY_SLOT);
1637 1638 1639 1640 1641
    __ Subu(v0, a1, a0);
  }

  __ bind(&miss);
  GenerateMiss(masm);
1642 1643 1644
}


1645
void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
1646
  DCHECK(state() == CompareICState::NUMBER);
1647 1648

  Label generic_stub;
1649
  Label unordered, maybe_undefined1, maybe_undefined2;
1650 1651
  Label miss;

1652
  if (left() == CompareICState::SMI) {
1653 1654
    __ JumpIfNotSmi(a1, &miss);
  }
1655
  if (right() == CompareICState::SMI) {
1656 1657
    __ JumpIfNotSmi(a0, &miss);
  }
1658 1659

  // Inlining the double comparison and falling back to the general compare
1660 1661 1662 1663 1664 1665 1666
  // stub if NaN is involved.
  // Load left and right operand.
  Label done, left, left_smi, right_smi;
  __ JumpIfSmi(a0, &right_smi);
  __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
              DONT_DO_SMI_CHECK);
  __ Subu(a2, a0, Operand(kHeapObjectTag));
1667
  __ Ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
1668 1669 1670 1671 1672 1673
  __ Branch(&left);
  __ bind(&right_smi);
  __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
  FPURegister single_scratch = f6;
  __ mtc1(a2, single_scratch);
  __ cvt_d_w(f2, single_scratch);
1674

1675 1676 1677 1678 1679
  __ bind(&left);
  __ JumpIfSmi(a1, &left_smi);
  __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
              DONT_DO_SMI_CHECK);
  __ Subu(a2, a1, Operand(kHeapObjectTag));
1680
  __ Ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
1681 1682 1683 1684 1685 1686
  __ Branch(&done);
  __ bind(&left_smi);
  __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
  single_scratch = f8;
  __ mtc1(a2, single_scratch);
  __ cvt_d_w(f0, single_scratch);
1687

1688
  __ bind(&done);
1689

1690 1691 1692 1693
  // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
  Label fpu_eq, fpu_lt;
  // Test if equal, and also handle the unordered/NaN case.
  __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
1694

1695 1696
  // Test if less (unordered case is already handled).
  __ BranchF(&fpu_lt, NULL, lt, f0, f2);
1697

1698
  // Otherwise it's greater, so just fall through, and return.
1699
  DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
1700
  __ Ret(USE_DELAY_SLOT);
1701
  __ li(v0, Operand(GREATER));
1702

1703
  __ bind(&fpu_eq);
1704
  __ Ret(USE_DELAY_SLOT);
1705 1706 1707
  __ li(v0, Operand(EQUAL));

  __ bind(&fpu_lt);
1708
  __ Ret(USE_DELAY_SLOT);
1709
  __ li(v0, Operand(LESS));
1710

1711
  __ bind(&unordered);
1712
  __ bind(&generic_stub);
1713
  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
1714
                     CompareICState::GENERIC, CompareICState::GENERIC);
1715
  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1716

1717
  __ bind(&maybe_undefined1);
1718
  if (Token::IsOrderedRelationalCompareOp(op())) {
1719 1720
    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
    __ Branch(&miss, ne, a0, Operand(at));
1721
    __ JumpIfSmi(a1, &unordered);
1722 1723 1724 1725 1726 1727
    __ GetObjectType(a1, a2, a2);
    __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
    __ jmp(&unordered);
  }

  __ bind(&maybe_undefined2);
1728
  if (Token::IsOrderedRelationalCompareOp(op())) {
1729 1730 1731 1732
    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
    __ Branch(&unordered, eq, a1, Operand(at));
  }

1733 1734
  __ bind(&miss);
  GenerateMiss(masm);
1735 1736 1737
}


1738
void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
1739
  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
  Label miss;

  // Registers containing left and right operands respectively.
  Register left = a1;
  Register right = a0;
  Register tmp1 = a2;
  Register tmp2 = a3;

  // Check that both operands are heap objects.
  __ JumpIfEitherSmi(left, right, &miss);

1751
  // Check that both operands are internalized strings.
1752 1753 1754 1755
  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
1756 1757 1758 1759
  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
  __ Or(tmp1, tmp1, Operand(tmp2));
  __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
  __ Branch(&miss, ne, at, Operand(zero_reg));
1760

1761 1762
  // Make sure a0 is non-zero. At this point input operands are
  // guaranteed to be non-zero.
1763
  DCHECK(right.is(a0));
1764 1765 1766
  STATIC_ASSERT(EQUAL == 0);
  STATIC_ASSERT(kSmiTag == 0);
  __ mov(v0, right);
1767
  // Internalized strings are compared by identity.
1768
  __ Ret(ne, left, Operand(right));
1769
  DCHECK(is_int16(EQUAL));
1770
  __ Ret(USE_DELAY_SLOT);
1771 1772 1773 1774 1775
  __ li(v0, Operand(Smi::FromInt(EQUAL)));

  __ bind(&miss);
  GenerateMiss(masm);
}
1776 1777


1778
void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
1779
  DCHECK(state() == CompareICState::UNIQUE_NAME);
1780
  DCHECK(GetCondition() == eq);
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
  Label miss;

  // Registers containing left and right operands respectively.
  Register left = a1;
  Register right = a0;
  Register tmp1 = a2;
  Register tmp2 = a3;

  // Check that both operands are heap objects.
  __ JumpIfEitherSmi(left, right, &miss);

  // Check that both operands are unique names. This leaves the instance
  // types loaded in tmp1 and tmp2.
  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));

1799 1800
  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
1801 1802 1803 1804 1805 1806 1807 1808 1809

  // Use a0 as result
  __ mov(v0, a0);

  // Unique names are compared by identity.
  Label done;
  __ Branch(&done, ne, left, Operand(right));
  // Make sure a0 is non-zero. At this point input operands are
  // guaranteed to be non-zero.
1810
  DCHECK(right.is(a0));
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
  STATIC_ASSERT(EQUAL == 0);
  STATIC_ASSERT(kSmiTag == 0);
  __ li(v0, Operand(Smi::FromInt(EQUAL)));
  __ bind(&done);
  __ Ret();

  __ bind(&miss);
  GenerateMiss(masm);
}


1822
void CompareICStub::GenerateStrings(MacroAssembler* masm) {
1823
  DCHECK(state() == CompareICState::STRING);
1824 1825
  Label miss;

1826
  bool equality = Token::IsEqualityOp(op());
1827

1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
  // Registers containing left and right operands respectively.
  Register left = a1;
  Register right = a0;
  Register tmp1 = a2;
  Register tmp2 = a3;
  Register tmp3 = t0;
  Register tmp4 = t1;
  Register tmp5 = t2;

  // Check that both operands are heap objects.
  __ JumpIfEitherSmi(left, right, &miss);

  // Check that both operands are strings. This leaves the instance
  // types loaded in tmp1 and tmp2.
  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
  STATIC_ASSERT(kNotStringTag != 0);
  __ Or(tmp3, tmp1, tmp2);
  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
  __ Branch(&miss, ne, tmp5, Operand(zero_reg));

  // Fast check for identical strings.
  Label left_ne_right;
  STATIC_ASSERT(EQUAL == 0);
  STATIC_ASSERT(kSmiTag == 0);
1855 1856
  __ Branch(&left_ne_right, ne, left, Operand(right));
  __ Ret(USE_DELAY_SLOT);
1857 1858 1859 1860 1861
  __ mov(v0, zero_reg);  // In the delay slot.
  __ bind(&left_ne_right);

  // Handle not identical strings.

1862
  // Check that both strings are internalized strings. If they are, we're done
1863 1864
  // because we already know they are not identical. We know they are both
  // strings.
1865
  if (equality) {
1866
    DCHECK(GetCondition() == eq);
1867 1868 1869
    STATIC_ASSERT(kInternalizedTag == 0);
    __ Or(tmp3, tmp1, Operand(tmp2));
    __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
1870
    Label is_symbol;
1871
    __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
1872 1873
    // Make sure a0 is non-zero. At this point input operands are
    // guaranteed to be non-zero.
1874
    DCHECK(right.is(a0));
1875 1876
    __ Ret(USE_DELAY_SLOT);
    __ mov(v0, a0);  // In the delay slot.
1877 1878
    __ bind(&is_symbol);
  }
1879

1880
  // Check that both strings are sequential one-byte.
1881
  Label runtime;
1882 1883
  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
                                                    &runtime);
1884

1885
  // Compare flat one-byte strings. Returns when done.
1886
  if (equality) {
1887 1888
    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
                                                  tmp3);
1889
  } else {
1890 1891
    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
                                                    tmp2, tmp3, tmp4);
1892
  }
1893 1894 1895

  // Handle more complex cases in runtime.
  __ bind(&runtime);
1896
  if (equality) {
1897 1898 1899 1900 1901 1902 1903 1904
    {
      FrameScope scope(masm, StackFrame::INTERNAL);
      __ Push(left, right);
      __ CallRuntime(Runtime::kStringEqual);
    }
    __ LoadRoot(a0, Heap::kTrueValueRootIndex);
    __ Ret(USE_DELAY_SLOT);
    __ Subu(v0, v0, a0);  // In delay slot.
1905
  } else {
1906
    __ Push(left, right);
1907
    __ TailCallRuntime(Runtime::kStringCompare);
1908
  }
1909 1910 1911

  __ bind(&miss);
  GenerateMiss(masm);
1912 1913 1914
}


1915 1916
void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
  DCHECK_EQ(CompareICState::RECEIVER, state());
1917 1918 1919 1920
  Label miss;
  __ And(a2, a1, Operand(a0));
  __ JumpIfSmi(a2, &miss);

1921
  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1922
  __ GetObjectType(a0, a2, a2);
1923
  __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
1924
  __ GetObjectType(a1, a2, a2);
1925
  __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
1926

1927
  DCHECK_EQ(eq, GetCondition());
1928 1929
  __ Ret(USE_DELAY_SLOT);
  __ subu(v0, a0, a1);
1930 1931 1932

  __ bind(&miss);
  GenerateMiss(masm);
1933 1934 1935
}


1936
void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
1937
  Label miss;
1938
  Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
1939 1940
  __ And(a2, a1, a0);
  __ JumpIfSmi(a2, &miss);
1941
  __ GetWeakValue(t0, cell);
1942 1943
  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
1944 1945
  __ Branch(&miss, ne, a2, Operand(t0));
  __ Branch(&miss, ne, a3, Operand(t0));
1946

1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
  if (Token::IsEqualityOp(op())) {
    __ Ret(USE_DELAY_SLOT);
    __ subu(v0, a0, a1);
  } else {
    if (op() == Token::LT || op() == Token::LTE) {
      __ li(a2, Operand(Smi::FromInt(GREATER)));
    } else {
      __ li(a2, Operand(Smi::FromInt(LESS)));
    }
    __ Push(a1, a0, a2);
1957
    __ TailCallRuntime(Runtime::kCompare);
1958
  }
1959

1960 1961 1962 1963
  __ bind(&miss);
  GenerateMiss(masm);
}

1964

1965
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
1966
  {
1967
    // Call the runtime system in a fresh internal frame.
1968 1969
    FrameScope scope(masm, StackFrame::INTERNAL);
    __ Push(a1, a0);
1970
    __ Push(ra, a1, a0);
1971
    __ li(t0, Operand(Smi::FromInt(op())));
1972
    __ addiu(sp, sp, -kPointerSize);
1973 1974
    __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
                   USE_DELAY_SLOT);
1975
    __ sw(t0, MemOperand(sp));  // In the delay slot.
1976 1977 1978 1979
    // Compute the entry point of the rewritten stub.
    __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
    // Restore registers.
    __ Pop(a1, a0, ra);
1980
  }
1981 1982 1983
  __ Jump(a2);
}

1984

1985
void DirectCEntryStub::Generate(MacroAssembler* masm) {
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
  // Make place for arguments to fit C calling convention. Most of the callers
  // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
  // so they handle stack restoring and we don't have to do that here.
  // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
  // kCArgsSlotsSize stack space after the call.
  __ Subu(sp, sp, Operand(kCArgsSlotsSize));
  // Place the return address on the stack, making the call
  // GC safe. The RegExp backend also relies on this.
  __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
  __ Call(t9);  // Call the C++ function.
1996 1997
  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));

1998
  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1999 2000 2001 2002
    // In case of an error the return address may point to a memory area
    // filled with kZapValue by the GC.
    // Dereference the address and check for this.
    __ lw(t0, MemOperand(t9));
2003
    __ Assert(ne, kReceivedInvalidReturnAddress, t0,
2004 2005 2006
        Operand(reinterpret_cast<uint32_t>(kZapValue)));
  }
  __ Jump(t9);
2007 2008 2009
}


2010 2011
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
                                    Register target) {
2012
  intptr_t loc =
2013
      reinterpret_cast<intptr_t>(GetCode().location());
2014
  __ Move(t9, target);
2015 2016
  __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
  __ Call(at);
2017 2018 2019
}


2020 2021 2022 2023 2024 2025 2026
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
                                                      Label* miss,
                                                      Label* done,
                                                      Register receiver,
                                                      Register properties,
                                                      Handle<Name> name,
                                                      Register scratch0) {
2027
  DCHECK(name->IsUniqueName());
2028 2029 2030 2031
  // If names of slots in range from 1 to kProbes - 1 for the hash value are
  // not equal to the name and kProbes-th slot is not used (its name is the
  // undefined value), it guarantees the hash table doesn't contain the
  // property. It's true even if some slots represent deleted properties
2032
  // (their names are the hole value).
2033 2034 2035 2036 2037 2038 2039 2040
  for (int i = 0; i < kInlinedProbes; i++) {
    // scratch0 points to properties hash.
    // Compute the masked index: (hash + i + i * i) & mask.
    Register index = scratch0;
    // Capacity is smi 2^n.
    __ lw(index, FieldMemOperand(properties, kCapacityOffset));
    __ Subu(index, index, Operand(1));
    __ And(index, index, Operand(
2041
        Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
2042 2043

    // Scale the index by multiplying by the entry size.
2044
    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2045
    __ Lsa(index, index, index, 1);
2046 2047 2048

    Register entity_name = scratch0;
    // Having undefined at this place means the name is not contained.
2049
    STATIC_ASSERT(kSmiTagSize == 1);
2050
    Register tmp = properties;
2051
    __ Lsa(tmp, properties, index, 1);
2052 2053
    __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));

2054
    DCHECK(!tmp.is(entity_name));
2055 2056 2057
    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
    __ Branch(done, eq, entity_name, Operand(tmp));

2058 2059
    // Load the hole ready for use below:
    __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
2060

2061 2062
    // Stop if found the property.
    __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
2063

2064 2065
    Label good;
    __ Branch(&good, eq, entity_name, Operand(tmp));
2066

2067 2068 2069 2070
    // Check if the entry name is not a unique name.
    __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
    __ lbu(entity_name,
           FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2071
    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2072
    __ bind(&good);
2073

2074 2075
    // Restore the properties.
    __ lw(properties,
2076
          FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
2077 2078 2079 2080 2081 2082 2083
  }

  const int spill_mask =
      (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
       a2.bit() | a1.bit() | a0.bit() | v0.bit());

  __ MultiPush(spill_mask);
2084
  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
2085
  __ li(a1, Operand(Handle<Name>(name)));
2086
  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2087 2088 2089 2090 2091 2092 2093 2094
  __ CallStub(&stub);
  __ mov(at, v0);
  __ MultiPop(spill_mask);

  __ Branch(done, eq, at, Operand(zero_reg));
  __ Branch(miss, ne, at, Operand(zero_reg));
}

2095
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2096 2097
  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
  // we cannot call anything that could cause a GC from this stub.
2098
  // Registers:
2099
  //  result: NameDictionary to probe
2100
  //  a1: key
2101 2102 2103
  //  dictionary: NameDictionary to probe.
  //  index: will hold an index of entry if lookup is successful.
  //         might alias with result_.
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
  // Returns:
  //  result_ is zero if lookup failed, non zero otherwise.

  Register result = v0;
  Register dictionary = a0;
  Register key = a1;
  Register index = a2;
  Register mask = a3;
  Register hash = t0;
  Register undefined = t1;
  Register entry_key = t2;

  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;

  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
  __ sra(mask, mask, kSmiTagSize);
  __ Subu(mask, mask, Operand(1));

2122
  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
2123 2124 2125 2126 2127 2128 2129 2130 2131 2132

  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);

  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
    // Compute the masked index: (hash + i + i * i) & mask.
    // Capacity is smi 2^n.
    if (i > 0) {
      // Add the probe offset (i + i * i) left shifted to avoid right shifting
      // the hash in a separate instruction. The value hash + i + i * i is right
      // shifted in the following and instruction.
2133
      DCHECK(NameDictionary::GetProbeOffset(i) <
2134
             1 << (32 - Name::kHashFieldOffset));
2135
      __ Addu(index, hash, Operand(
2136
          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2137 2138 2139
    } else {
      __ mov(index, hash);
    }
2140
    __ srl(index, index, Name::kHashShift);
2141 2142 2143
    __ And(index, mask, index);

    // Scale the index by multiplying by the entry size.
2144
    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2145
    // index *= 3.
2146
    __ Lsa(index, index, index, 1);
2147

2148
    STATIC_ASSERT(kSmiTagSize == 1);
2149
    __ Lsa(index, dictionary, index, 2);
2150 2151 2152 2153 2154 2155 2156 2157
    __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));

    // Having undefined at this place means the name is not contained.
    __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));

    // Stop if found the property.
    __ Branch(&in_dictionary, eq, entry_key, Operand(key));

2158
    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
2159
      // Check if the entry name is not a unique name.
2160 2161 2162
      __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
      __ lbu(entry_key,
             FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
2163
      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
2164 2165 2166 2167 2168 2169 2170
    }
  }

  __ bind(&maybe_in_dictionary);
  // If we are doing negative lookup then probing failure should be
  // treated as a lookup success. For positive lookup probing failure
  // should be treated as lookup failure.
2171
  if (mode() == POSITIVE_LOOKUP) {
2172
    __ Ret(USE_DELAY_SLOT);
2173 2174 2175 2176
    __ mov(result, zero_reg);
  }

  __ bind(&in_dictionary);
2177
  __ Ret(USE_DELAY_SLOT);
2178 2179 2180
  __ li(result, 1);

  __ bind(&not_in_dictionary);
2181
  __ Ret(USE_DELAY_SLOT);
2182
  __ mov(result, zero_reg);
2183 2184 2185
}


2186 2187
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
    Isolate* isolate) {
2188
  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
2189
  stub1.GetCode();
2190
  // Hydrogen code stubs need stub2 at snapshot time.
2191
  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
2192
  stub2.GetCode();
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
}


// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed.  The word in the object where the value has been
// written is in the address register.
void RecordWriteStub::Generate(MacroAssembler* masm) {
  Label skip_to_incremental_noncompacting;
  Label skip_to_incremental_compacting;

  // The first two branch+nop instructions are generated with labels so as to
  // get the offset fixed up correctly by the bind(Label*) call.  We patch it
  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
  // incremental heap marking.
  // See RecordWriteStub::Patch for details.
  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
  __ nop();
  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
  __ nop();

2215 2216 2217 2218 2219
  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
    __ RememberedSetHelper(object(),
                           address(),
                           value(),
                           save_fp_regs_mode(),
2220
                           MacroAssembler::kReturnAtEnd);
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240
  }
  __ Ret();

  __ bind(&skip_to_incremental_noncompacting);
  GenerateIncremental(masm, INCREMENTAL);

  __ bind(&skip_to_incremental_compacting);
  GenerateIncremental(masm, INCREMENTAL_COMPACTION);

  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
  // Will be checked in IncrementalMarking::ActivateGeneratedStub.

  PatchBranchIntoNop(masm, 0);
  PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
}


void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
  regs_.Save(masm);

2241
  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2242 2243 2244
    Label dont_need_remembered_set;

    __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
2245
    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
2246 2247 2248
                           regs_.scratch0(),
                           &dont_need_remembered_set);

ulan's avatar
ulan committed
2249 2250
    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
                        &dont_need_remembered_set);
2251 2252 2253 2254 2255

    // First notify the incremental marker if necessary, then update the
    // remembered set.
    CheckNeedsToInformIncrementalMarker(
        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
2256
    InformIncrementalMarker(masm);
2257
    regs_.Restore(masm);
2258 2259 2260 2261
    __ RememberedSetHelper(object(),
                           address(),
                           value(),
                           save_fp_regs_mode(),
2262
                           MacroAssembler::kReturnAtEnd);
2263 2264 2265 2266 2267 2268

    __ bind(&dont_need_remembered_set);
  }

  CheckNeedsToInformIncrementalMarker(
      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
2269
  InformIncrementalMarker(masm);
2270 2271 2272 2273 2274
  regs_.Restore(masm);
  __ Ret();
}


2275
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
2276
  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
2277 2278 2279 2280
  int argument_count = 3;
  __ PrepareCallCFunction(argument_count, regs_.scratch0());
  Register address =
      a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
2281 2282
  DCHECK(!address.is(regs_.object()));
  DCHECK(!address.is(a0));
2283 2284
  __ Move(address, regs_.address());
  __ Move(a0, regs_.object());
2285
  __ Move(a1, address);
2286
  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
2287 2288

  AllowExternalCallThatCantCauseGC scope(masm);
2289
  __ CallCFunction(
2290
      ExternalReference::incremental_marking_record_write_function(isolate()),
2291
      argument_count);
2292
  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
}


void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
    MacroAssembler* masm,
    OnNoNeedToInformIncrementalMarker on_no_need,
    Mode mode) {
  Label need_incremental;
  Label need_incremental_pop_scratch;

2303 2304
#ifndef V8_CONCURRENT_MARKING
  Label on_black;
2305 2306 2307 2308 2309 2310
  // Let's look at the color of the object:  If it is not black we don't have
  // to inform the incremental marker.
  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);

  regs_.Restore(masm);
  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2311 2312 2313 2314
    __ RememberedSetHelper(object(),
                           address(),
                           value(),
                           save_fp_regs_mode(),
2315
                           MacroAssembler::kReturnAtEnd);
2316 2317 2318 2319 2320
  } else {
    __ Ret();
  }

  __ bind(&on_black);
2321
#endif
2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346

  // Get the value from the slot.
  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));

  if (mode == INCREMENTAL_COMPACTION) {
    Label ensure_not_white;

    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
                     regs_.scratch1(),  // Scratch.
                     MemoryChunk::kEvacuationCandidateMask,
                     eq,
                     &ensure_not_white);

    __ CheckPageFlag(regs_.object(),
                     regs_.scratch1(),  // Scratch.
                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
                     eq,
                     &need_incremental);

    __ bind(&ensure_not_white);
  }

  // We need extra registers for this, so we push the object and the address
  // register temporarily.
  __ Push(regs_.object(), regs_.address());
hpayer's avatar
hpayer committed
2347 2348 2349 2350 2351
  __ JumpIfWhite(regs_.scratch0(),  // The value.
                 regs_.scratch1(),  // Scratch.
                 regs_.object(),    // Scratch.
                 regs_.address(),   // Scratch.
                 &need_incremental_pop_scratch);
2352 2353 2354 2355
  __ Pop(regs_.object(), regs_.address());

  regs_.Restore(masm);
  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2356 2357 2358 2359
    __ RememberedSetHelper(object(),
                           address(),
                           value(),
                           save_fp_regs_mode(),
2360
                           MacroAssembler::kReturnAtEnd);
2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
  } else {
    __ Ret();
  }

  __ bind(&need_incremental_pop_scratch);
  __ Pop(regs_.object(), regs_.address());

  __ bind(&need_incremental);

  // Fall through when we need to inform the incremental marker.
}

2373
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
2374
                                                     Zone* zone) {
2375 2376 2377 2378
  if (tasm->isolate()->function_entry_hook() != NULL) {
    tasm->push(ra);
    tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
    tasm->pop(ra);
2379 2380
  }
}
2381

2382
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
2383
  if (masm->isolate()->function_entry_hook() != NULL) {
2384
    ProfileEntryHookStub stub(masm->isolate());
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
    __ push(ra);
    __ CallStub(&stub);
    __ pop(ra);
  }
}


void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
  // The entry hook is a "push ra" instruction, followed by a call.
  // Note: on MIPS "push" is 2 instruction
  const int32_t kReturnAddressDistanceFromFunctionStart =
      Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);

2398 2399 2400 2401 2402
  // This should contain all kJSCallerSaved registers.
  const RegList kSavedRegs =
     kJSCallerSaved |  // Caller saved registers.
     s5.bit();         // Saved stack pointer.

2403
  // We also save ra, so the count here is one higher than the mask indicates.
2404
  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
2405 2406

  // Save all caller-save registers as this may be called from anywhere.
2407
  __ MultiPush(kSavedRegs | ra.bit());
2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418

  // Compute the function's address for the first argument.
  __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));

  // The caller's return address is above the saved temporaries.
  // Grab that for the second argument to the hook.
  __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));

  // Align the stack if necessary.
  int frame_alignment = masm->ActivationFrameAlignment();
  if (frame_alignment > kPointerSize) {
2419
    __ mov(s5, sp);
2420
    DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2421 2422
    __ And(sp, sp, Operand(-frame_alignment));
  }
2423
  __ Subu(sp, sp, kCArgsSlotsSize);
2424 2425
#if defined(V8_HOST_ARCH_MIPS)
  int32_t entry_hook =
2426
      reinterpret_cast<int32_t>(isolate()->function_entry_hook());
2427
  __ li(t9, Operand(entry_hook));
2428 2429 2430
#else
  // Under the simulator we need to indirect the entry hook through a
  // trampoline function at a known address.
2431
  // It additionally takes an isolate as a third parameter.
2432
  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
2433

2434
  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
2435
  __ li(t9, Operand(ExternalReference(&dispatcher,
2436
                                      ExternalReference::BUILTIN_CALL,
2437
                                      isolate())));
2438
#endif
2439 2440
  // Call C function through t9 to conform ABI for PIC.
  __ Call(t9);
2441 2442 2443

  // Restore the stack pointer if needed.
  if (frame_alignment > kPointerSize) {
2444
    __ mov(sp, s5);
2445 2446
  } else {
    __ Addu(sp, sp, kCArgsSlotsSize);
2447 2448
  }

2449
  // Also pop ra to get Ret(0).
2450
  __ MultiPop(kSavedRegs | ra.bit());
2451 2452 2453 2454
  __ Ret();
}


2455
template<class T>
2456 2457 2458
static void CreateArrayDispatch(MacroAssembler* masm,
                                AllocationSiteOverrideMode mode) {
  if (mode == DISABLE_ALLOCATION_SITES) {
2459
    T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
2460
    __ TailCallStub(&stub);
2461
  } else if (mode == DONT_OVERRIDE) {
2462 2463
    int last_index =
        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
2464 2465
    for (int i = 0; i <= last_index; ++i) {
      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
2466
      T stub(masm->isolate(), kind);
2467
      __ TailCallStub(&stub, eq, a3, Operand(kind));
2468
    }
2469

2470 2471 2472 2473 2474
    // If we reached this point there is a problem.
    __ Abort(kUnexpectedElementsKindInArrayConstructor);
  } else {
    UNREACHABLE();
  }
2475 2476 2477
}


2478 2479
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
                                           AllocationSiteOverrideMode mode) {
2480
  // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
2481
  // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
2482 2483 2484
  // a0 - number of arguments
  // a1 - constructor?
  // sp[0] - last argument
2485 2486 2487 2488 2489 2490
    STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
    STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
    STATIC_ASSERT(PACKED_ELEMENTS == 2);
    STATIC_ASSERT(HOLEY_ELEMENTS == 3);
    STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
    STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
2491 2492 2493 2494

  if (mode == DISABLE_ALLOCATION_SITES) {
    ElementsKind initial = GetInitialFastElementsKind();
    ElementsKind holey_initial = GetHoleyElementsKind(initial);
2495

2496 2497
    ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
                                                  holey_initial,
2498 2499 2500
                                                  DISABLE_ALLOCATION_SITES);
    __ TailCallStub(&stub_holey);
  } else if (mode == DONT_OVERRIDE) {
2501 2502 2503 2504 2505
    // is the low bit set? If so, we are holey and that is good.
    Label normal_sequence;
    __ And(at, a3, Operand(1));
    __ Branch(&normal_sequence, ne, at, Operand(zero_reg));

2506
    // We are going to create a holey array, but our kind is non-holey.
2507
    // Fix kind and retry (only if we have an allocation site in the slot).
2508 2509 2510
    __ Addu(a3, a3, Operand(1));

    if (FLAG_debug_code) {
2511
      __ lw(t1, FieldMemOperand(a2, 0));
2512
      __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2513
      __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
2514
    }
2515

2516 2517 2518 2519
    // Save the resulting elements kind in type info. We can't just store a3
    // in the AllocationSite::transition_info field because elements kind is
    // restricted to a portion of the field...upper bits need to be left alone.
    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
2520 2521
    __ lw(t0, FieldMemOperand(
                  a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
2522
    __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
2523 2524
    __ sw(t0, FieldMemOperand(
                  a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
2525 2526

    __ bind(&normal_sequence);
2527 2528
    int last_index =
        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
2529 2530
    for (int i = 0; i <= last_index; ++i) {
      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
2531
      ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
2532
      __ TailCallStub(&stub, eq, a3, Operand(kind));
2533 2534 2535 2536 2537 2538 2539
    }

    // If we reached this point there is a problem.
    __ Abort(kUnexpectedElementsKindInArrayConstructor);
  } else {
    UNREACHABLE();
  }
2540 2541 2542 2543 2544
}


template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
2545 2546
  int to_index =
      GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
2547 2548
  for (int i = 0; i <= to_index; ++i) {
    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
2549
    T stub(isolate, kind);
2550
    stub.GetCode();
2551
    if (AllocationSite::ShouldTrack(kind)) {
2552
      T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
2553
      stub1.GetCode();
2554
    }
2555 2556 2557
  }
}

2558
void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
2559 2560 2561 2562
  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
      isolate);
  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
      isolate);
2563 2564
  ArrayNArgumentsConstructorStub stub(isolate);
  stub.GetCode();
2565
  ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
2566 2567
  for (int i = 0; i < 2; i++) {
    // For internal arrays we only need a few things.
2568
    InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
2569
    stubh1.GetCode();
2570
    InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
2571
    stubh2.GetCode();
2572 2573 2574 2575
  }
}


2576 2577 2578
void ArrayConstructorStub::GenerateDispatchToArrayStub(
    MacroAssembler* masm,
    AllocationSiteOverrideMode mode) {
2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590
  Label not_zero_case, not_one_case;
  __ And(at, a0, a0);
  __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);

  __ bind(&not_zero_case);
  __ Branch(&not_one_case, gt, a0, Operand(1));
  CreateArrayDispatchOneArgument(masm, mode);

  __ bind(&not_one_case);
  ArrayNArgumentsConstructorStub stub(masm->isolate());
  __ TailCallStub(&stub);
2591 2592 2593
}


2594 2595
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
  // ----------- S t a t e -------------
2596
  //  -- a0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
2597
  //  -- a1 : constructor
2598
  //  -- a2 : AllocationSite or undefined
dslomov's avatar
dslomov committed
2599
  //  -- a3 : Original constructor
2600
  //  -- sp[0] : last argument
2601
  // -----------------------------------
2602

2603 2604 2605 2606 2607
  if (FLAG_debug_code) {
    // The array construct code is only set for the global and natives
    // builtin Array functions which always have maps.

    // Initial map for the builtin Array function should be a map.
2608
    __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
2609
    // Will both indicate a NULL and a Smi.
2610
    __ SmiTst(t0, at);
2611
    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
2612
        at, Operand(zero_reg));
2613
    __ GetObjectType(t0, t0, t1);
2614
    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
2615
        t1, Operand(MAP_TYPE));
2616

2617 2618
    // We should either have undefined in a2 or a valid AllocationSite
    __ AssertUndefinedOrAllocationSite(a2, t0);
2619 2620
  }

2621 2622 2623
  // Enter the context of the Array function.
  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));

dslomov's avatar
dslomov committed
2624 2625 2626
  Label subclassing;
  __ Branch(&subclassing, ne, a1, Operand(a3));

2627
  Label no_info;
2628
  // Get the elements kind and case on that.
2629
  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2630 2631
  __ Branch(&no_info, eq, a2, Operand(at));

2632 2633
  __ lw(a3, FieldMemOperand(
                a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
2634
  __ SmiUntag(a3);
2635 2636
  STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
  __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
2637
  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
2638

2639 2640
  __ bind(&no_info);
  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
dslomov's avatar
dslomov committed
2641

2642
  // Subclassing.
dslomov's avatar
dslomov committed
2643
  __ bind(&subclassing);
2644 2645 2646 2647
  __ Lsa(at, sp, a0, kPointerSizeLog2);
  __ sw(a1, MemOperand(at));
  __ li(at, Operand(3));
  __ addu(a0, a0, at);
2648 2649
  __ Push(a3, a2);
  __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
2650 2651 2652
}


2653 2654 2655
void InternalArrayConstructorStub::GenerateCase(
    MacroAssembler* masm, ElementsKind kind) {

2656
  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
2657
  __ TailCallStub(&stub0, lo, a0, Operand(1));
2658

2659
  ArrayNArgumentsConstructorStub stubN(isolate());
2660
  __ TailCallStub(&stubN, hi, a0, Operand(1));
2661 2662 2663 2664 2665 2666 2667

  if (IsFastPackedElementsKind(kind)) {
    // We might need to create a holey array
    // look at the first argument.
    __ lw(at, MemOperand(sp, 0));

    InternalArraySingleArgumentConstructorStub
2668
        stub1_holey(isolate(), GetHoleyElementsKind(kind));
2669
    __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
2670 2671
  }

2672
  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691
  __ TailCallStub(&stub1);
}


void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
  // ----------- S t a t e -------------
  //  -- a0 : argc
  //  -- a1 : constructor
  //  -- sp[0] : return address
  //  -- sp[4] : last argument
  // -----------------------------------

  if (FLAG_debug_code) {
    // The array construct code is only set for the global and natives
    // builtin Array functions which always have maps.

    // Initial map for the builtin Array function should be a map.
    __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
    // Will both indicate a NULL and a Smi.
2692
    __ SmiTst(a3, at);
2693
    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
2694 2695
        at, Operand(zero_reg));
    __ GetObjectType(a3, a3, t0);
2696
    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
2697 2698 2699
        t0, Operand(MAP_TYPE));
  }

2700 2701
  // Figure out the right elements kind.
  __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
2702

2703 2704 2705 2706
  // Load the map's "bit field 2" into a3. We only need the first byte,
  // but the following bit field extraction takes care of that anyway.
  __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
  // Retrieve elements_kind from bit field 2.
2707
  __ DecodeField<Map::ElementsKindBits>(a3);
2708

2709 2710
  if (FLAG_debug_code) {
    Label done;
2711 2712 2713
    __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
    __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, a3,
              Operand(HOLEY_ELEMENTS));
2714 2715
    __ bind(&done);
  }
2716

2717
  Label fast_elements_case;
2718 2719
  __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
  GenerateCase(masm, HOLEY_ELEMENTS);
2720

2721
  __ bind(&fast_elements_case);
2722
  GenerateCase(masm, PACKED_ELEMENTS);
2723 2724
}

2725 2726 2727 2728 2729 2730 2731 2732 2733
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
  return ref0.address() - ref1.address();
}


// Calls an API function.  Allocates HandleScope, extracts returned value
// from handle and propagates exceptions.  Restores context.  stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
2734 2735 2736 2737
static void CallApiFunctionAndReturn(
    MacroAssembler* masm, Register function_address,
    ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
    MemOperand return_value_operand, MemOperand* context_restore_operand) {
2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817
  Isolate* isolate = masm->isolate();
  ExternalReference next_address =
      ExternalReference::handle_scope_next_address(isolate);
  const int kNextOffset = 0;
  const int kLimitOffset = AddressOffset(
      ExternalReference::handle_scope_limit_address(isolate), next_address);
  const int kLevelOffset = AddressOffset(
      ExternalReference::handle_scope_level_address(isolate), next_address);

  DCHECK(function_address.is(a1) || function_address.is(a2));

  Label profiler_disabled;
  Label end_profiler_check;
  __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
  __ lb(t9, MemOperand(t9, 0));
  __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));

  // Additional parameter is the address of the actual callback.
  __ li(t9, Operand(thunk_ref));
  __ jmp(&end_profiler_check);

  __ bind(&profiler_disabled);
  __ mov(t9, function_address);
  __ bind(&end_profiler_check);

  // Allocate HandleScope in callee-save registers.
  __ li(s3, Operand(next_address));
  __ lw(s0, MemOperand(s3, kNextOffset));
  __ lw(s1, MemOperand(s3, kLimitOffset));
  __ lw(s2, MemOperand(s3, kLevelOffset));
  __ Addu(s2, s2, Operand(1));
  __ sw(s2, MemOperand(s3, kLevelOffset));

  if (FLAG_log_timer_events) {
    FrameScope frame(masm, StackFrame::MANUAL);
    __ PushSafepointRegisters();
    __ PrepareCallCFunction(1, a0);
    __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
    __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
                     1);
    __ PopSafepointRegisters();
  }

  // Native call returns to the DirectCEntry stub which redirects to the
  // return address pushed on stack (could have moved after GC).
  // DirectCEntry stub itself is generated early and never moves.
  DirectCEntryStub stub(isolate);
  stub.GenerateCall(masm, t9);

  if (FLAG_log_timer_events) {
    FrameScope frame(masm, StackFrame::MANUAL);
    __ PushSafepointRegisters();
    __ PrepareCallCFunction(1, a0);
    __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
    __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
                     1);
    __ PopSafepointRegisters();
  }

  Label promote_scheduled_exception;
  Label delete_allocated_handles;
  Label leave_exit_frame;
  Label return_value_loaded;

  // Load value from ReturnValue.
  __ lw(v0, return_value_operand);
  __ bind(&return_value_loaded);

  // No more valid handles (the result handle was the last one). Restore
  // previous handle scope.
  __ sw(s0, MemOperand(s3, kNextOffset));
  if (__ emit_debug_code()) {
    __ lw(a1, MemOperand(s3, kLevelOffset));
    __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
  }
  __ Subu(s2, s2, Operand(1));
  __ sw(s2, MemOperand(s3, kLevelOffset));
  __ lw(at, MemOperand(s3, kLimitOffset));
  __ Branch(&delete_allocated_handles, ne, s1, Operand(at));

2818
  // Leave the API exit frame.
2819 2820 2821 2822 2823 2824
  __ bind(&leave_exit_frame);

  bool restore_context = context_restore_operand != NULL;
  if (restore_context) {
    __ lw(cp, *context_restore_operand);
  }
2825 2826 2827 2828
  if (stack_space_offset != kInvalidStackOffset) {
    // ExitFrame contains four MIPS argument slots after DirectCEntryStub call
    // so this must be accounted for.
    __ lw(s0, MemOperand(sp, stack_space_offset + kCArgsSlotsSize));
2829 2830 2831
  } else {
    __ li(s0, Operand(stack_space));
  }
2832
  __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
2833
                    stack_space_offset != kInvalidStackOffset);
2834

2835 2836 2837 2838 2839 2840 2841 2842 2843
  // Check if the function scheduled an exception.
  __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
  __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
  __ lw(t1, MemOperand(at));
  __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));

  __ Ret();

  // Re-throw by promoting a scheduled exception.
2844
  __ bind(&promote_scheduled_exception);
2845
  __ TailCallRuntime(Runtime::kPromoteScheduledException);
2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859

  // HandleScope limit has changed. Delete allocated extensions.
  __ bind(&delete_allocated_handles);
  __ sw(s1, MemOperand(s3, kLimitOffset));
  __ mov(s0, v0);
  __ mov(a0, v0);
  __ PrepareCallCFunction(1, s1);
  __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
  __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
                   1);
  __ mov(v0, s0);
  __ jmp(&leave_exit_frame);
}

vogelheim's avatar
vogelheim committed
2860
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
2861 2862 2863 2864
  // ----------- S t a t e -------------
  //  -- a0                  : callee
  //  -- t0                  : call_data
  //  -- a2                  : holder
2865
  //  -- a1                  : api_function_address
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876
  //  -- cp                  : context
  //  --
  //  -- sp[0]               : last argument
  //  -- ...
  //  -- sp[(argc - 1)* 4]   : first argument
  //  -- sp[argc * 4]        : receiver
  // -----------------------------------

  Register callee = a0;
  Register call_data = t0;
  Register holder = a2;
2877
  Register api_function_address = a1;
2878 2879 2880 2881
  Register context = cp;

  typedef FunctionCallbackArguments FCA;

2882 2883
  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
  STATIC_ASSERT(FCA::kCalleeIndex == 5);
2884 2885 2886 2887 2888
  STATIC_ASSERT(FCA::kDataIndex == 4);
  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
  STATIC_ASSERT(FCA::kIsolateIndex == 1);
  STATIC_ASSERT(FCA::kHolderIndex == 0);
2889 2890 2891 2892 2893
  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
  STATIC_ASSERT(FCA::kArgsLength == 8);

  // new target
  __ PushRoot(Heap::kUndefinedValueRootIndex);
2894 2895

  // Save context, callee and call data.
2896
  __ Push(context, callee, call_data);
2897 2898 2899 2900
  if (!is_lazy()) {
    // Load context from callee.
    __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
  }
2901 2902

  Register scratch = call_data;
2903
  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2904 2905
  // Push return value and default return value.
  __ Push(scratch, scratch);
2906
  __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
2907 2908 2909 2910 2911 2912 2913 2914
  // Push isolate and holder.
  __ Push(scratch, holder);

  // Prepare arguments.
  __ mov(scratch, sp);

  // Allocate the v8::Arguments structure in the arguments' space since
  // it's not controlled by GC.
2915
  const int kApiStackSpace = 3;
2916 2917 2918 2919

  FrameScope frame_scope(masm, StackFrame::MANUAL);
  __ EnterExitFrame(false, kApiStackSpace);

2920
  DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
2921 2922 2923 2924 2925
  // a0 = FunctionCallbackInfo&
  // Arguments is after the return address.
  __ Addu(a0, sp, Operand(1 * kPointerSize));
  // FunctionCallbackInfo::implicit_args_
  __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
vogelheim's avatar
vogelheim committed
2926 2927 2928 2929 2930 2931
  // FunctionCallbackInfo::values_
  __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
  __ sw(at, MemOperand(a0, 1 * kPointerSize));
  // FunctionCallbackInfo::length_ = argc
  __ li(at, Operand(argc()));
  __ sw(at, MemOperand(a0, 2 * kPointerSize));
2932

2933
  ExternalReference thunk_ref =
2934
      ExternalReference::invoke_function_callback(masm->isolate());
2935 2936 2937 2938

  AllowExternalCallThatCantCauseGC scope(masm);
  MemOperand context_restore_operand(
      fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
2939
  // Stores return the first js argument.
2940
  int return_value_offset = 0;
vogelheim's avatar
vogelheim committed
2941
  if (is_store()) {
2942 2943 2944 2945
    return_value_offset = 2 + FCA::kArgsLength;
  } else {
    return_value_offset = 2 + FCA::kReturnValueOffset;
  }
2946
  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
2947 2948 2949
  int stack_space = 0;
  int32_t stack_space_offset = 3 * kPointerSize;
  stack_space = argc() + FCA::kArgsLength + 1;
2950
  // TODO(adamk): Why are we clobbering this immediately?
2951
  stack_space_offset = kInvalidStackOffset;
2952
  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
2953
                           stack_space_offset, return_value_operand,
2954
                           &context_restore_operand);
2955 2956 2957
}


2958
void CallApiGetterStub::Generate(MacroAssembler* masm) {
2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973
  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
  // name below the exit frame to make GC aware of them.
  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);

  Register receiver = ApiGetterDescriptor::ReceiverRegister();
  Register holder = ApiGetterDescriptor::HolderRegister();
  Register callback = ApiGetterDescriptor::CallbackRegister();
  Register scratch = t0;
2974
  DCHECK(!AreAliased(receiver, holder, callback, scratch));
2975 2976 2977

  Register api_function_address = a2;

2978 2979 2980 2981 2982 2983
  // Here and below +1 is for name() pushed after the args_ array.
  typedef PropertyCallbackArguments PCA;
  __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
  __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
  __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
  __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
2984
  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2985 2986 2987 2988 2989 2990 2991
  __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
  __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
                                    kPointerSize));
  __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
  __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
  __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
  // should_throw_on_error -> false
2992
  DCHECK(Smi::kZero == nullptr);
2993 2994
  __ sw(zero_reg,
        MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
2995
  __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
2996 2997
  __ sw(scratch, MemOperand(sp, 0 * kPointerSize));

2998 2999 3000 3001 3002 3003
  // v8::PropertyCallbackInfo::args_ array and name handle.
  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;

  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
  __ mov(a0, sp);                              // a0 = Handle<Name>
  __ Addu(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
3004 3005 3006 3007 3008

  const int kApiStackSpace = 1;
  FrameScope frame_scope(masm, StackFrame::MANUAL);
  __ EnterExitFrame(false, kApiStackSpace);

3009 3010
  // Create v8::PropertyCallbackInfo object on the stack and initialize
  // it's args_ field.
3011
  __ sw(a1, MemOperand(sp, 1 * kPointerSize));
3012
  __ Addu(a1, sp, Operand(1 * kPointerSize));  // a1 = v8::PropertyCallbackInfo&
3013

3014 3015
  ExternalReference thunk_ref =
      ExternalReference::invoke_accessor_getter_callback(isolate());
3016

3017 3018 3019 3020
  __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
  __ lw(api_function_address,
        FieldMemOperand(scratch, Foreign::kForeignAddressOffset));

3021 3022 3023
  // +3 is to skip prolog, return address and name handle.
  MemOperand return_value_operand(
      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3024
  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3025
                           kStackUnwindSpace, kInvalidStackOffset,
3026
                           return_value_operand, NULL);
3027 3028
}

3029 3030
#undef __

3031 3032
}  // namespace internal
}  // namespace v8
3033 3034

#endif  // V8_TARGET_ARCH_MIPS